diff --git a/ovn_octavia_provider/agent.py b/ovn_octavia_provider/agent.py index 95613a2e..0d2e5dde 100644 --- a/ovn_octavia_provider/agent.py +++ b/ovn_octavia_provider/agent.py @@ -14,9 +14,13 @@ from oslo_log import log as logging +from ovn_octavia_provider.common import config as ovn_conf from ovn_octavia_provider import driver +from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.ovsdb import impl_idl_ovn +ovn_conf.register_opts() + LOG = logging.getLogger(__name__) OVN_EVENT_LOCK_NAME = "neutron_ovn_octavia_event_lock" @@ -24,7 +28,7 @@ OVN_EVENT_LOCK_NAME = "neutron_ovn_octavia_event_lock" def OvnProviderAgent(exit_event): - helper = driver.OvnProviderHelper() + helper = ovn_helper.OvnProviderHelper() events = [driver.LogicalRouterPortEvent(helper), driver.LogicalSwitchPortUpdateEvent(helper)] diff --git a/ovn_octavia_provider/common/config.py b/ovn_octavia_provider/common/config.py index 0109e1ca..807a08f7 100644 --- a/ovn_octavia_provider/common/config.py +++ b/ovn_octavia_provider/common/config.py @@ -58,14 +58,36 @@ ovn_opts = [ 'to 60 seconds.')), ] -cfg.CONF.register_opts(ovn_opts, group='ovn') -ks_loading.register_auth_conf_options(cfg.CONF, 'service_auth') -ks_loading.register_session_conf_options(cfg.CONF, 'service_auth') +neutron_opts = [ + cfg.StrOpt('service_name', + help=_('The name of the neutron service in the ' + 'keystone catalog')), + cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint ' + 'in the keystone catalog.')), + cfg.StrOpt('region_name', + help=_('Region in Identity service catalog to use for ' + 'communication with the OpenStack services.')), + cfg.StrOpt('endpoint_type', default='publicURL', + help=_('Endpoint interface in identity service to use')), + cfg.StrOpt('ca_certificates_file', + help=_('CA certificates file path')), + cfg.BoolOpt('insecure', + default=False, + help=_('Disable certificate validation on SSL connections ')), +] + + +def register_opts(): + cfg.CONF.register_opts(ovn_opts, group='ovn') + cfg.CONF.register_opts(neutron_opts, group='neutron') + ks_loading.register_auth_conf_options(cfg.CONF, 'service_auth') + ks_loading.register_session_conf_options(cfg.CONF, 'service_auth') def list_opts(): return [ ('ovn', ovn_opts), + ('neutron', neutron_opts), ] diff --git a/ovn_octavia_provider/driver.py b/ovn_octavia_provider/driver.py index c2dd078a..38a26cc2 100644 --- a/ovn_octavia_provider/driver.py +++ b/ovn_octavia_provider/driver.py @@ -13,36 +13,25 @@ # under the License. import copy -import queue import re -import threading import netaddr -from neutronclient.common import exceptions as n_exc from octavia_lib.api.drivers import data_models as o_datamodels -from octavia_lib.api.drivers import driver_lib as o_driver_lib from octavia_lib.api.drivers import exceptions as driver_exceptions from octavia_lib.api.drivers import provider_base as driver_base from octavia_lib.common import constants -from oslo_config import cfg from oslo_log import log as logging -from oslo_serialization import jsonutils -from ovs.stream import Stream from ovsdbapp.backend.ovs_idl import event as row_event -from ovsdbapp.backend.ovs_idl import idlutils -import tenacity -from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import config as ovn_conf # TODO(mjozefcz): Start consuming const and utils # from neutron-lib once released. from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider.common import exceptions as ovn_exc -from ovn_octavia_provider.common import utils +from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.i18n import _ -from ovn_octavia_provider.ovsdb import impl_idl_ovn -CONF = cfg.CONF # Gets Octavia Conf as it runs under o-api domain +ovn_conf.register_opts() LOG = logging.getLogger(__name__) @@ -95,1756 +84,11 @@ class LogicalSwitchPortUpdateEvent(row_event.RowEvent): self.driver.vip_port_update_handler(row) -class OvnProviderHelper(object): - - def __init__(self): - self.requests = queue.Queue() - self.helper_thread = threading.Thread(target=self.request_handler) - self.helper_thread.daemon = True - self._octavia_driver_lib = o_driver_lib.DriverLibrary() - self._check_and_set_ssl_files() - self._init_lb_actions() - - # NOTE(mjozefcz): This API is only for handling octavia API requests. - self.ovn_nbdb = impl_idl_ovn.OvnNbIdlForLb() - self.ovn_nbdb_api = self.ovn_nbdb.start() - - self.helper_thread.start() - - def _init_lb_actions(self): - self._lb_request_func_maps = { - ovn_const.REQ_TYPE_LB_CREATE: self.lb_create, - ovn_const.REQ_TYPE_LB_DELETE: self.lb_delete, - ovn_const.REQ_TYPE_LB_UPDATE: self.lb_update, - ovn_const.REQ_TYPE_LB_FAILOVER: self.lb_failover, - ovn_const.REQ_TYPE_LISTENER_CREATE: self.listener_create, - ovn_const.REQ_TYPE_LISTENER_DELETE: self.listener_delete, - ovn_const.REQ_TYPE_LISTENER_UPDATE: self.listener_update, - ovn_const.REQ_TYPE_POOL_CREATE: self.pool_create, - ovn_const.REQ_TYPE_POOL_DELETE: self.pool_delete, - ovn_const.REQ_TYPE_POOL_UPDATE: self.pool_update, - ovn_const.REQ_TYPE_MEMBER_CREATE: self.member_create, - ovn_const.REQ_TYPE_MEMBER_DELETE: self.member_delete, - ovn_const.REQ_TYPE_MEMBER_UPDATE: self.member_update, - ovn_const.REQ_TYPE_LB_CREATE_LRP_ASSOC: self.lb_create_lrp_assoc, - ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC: self.lb_delete_lrp_assoc, - ovn_const.REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip, - ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR: self.handle_member_dvr, - } - - @staticmethod - def _is_lb_empty(external_ids): - """Check if there is no pool or listener defined.""" - return not any([k.startswith('listener') or k.startswith('pool') - for k in external_ids]) - - @staticmethod - def _delete_disabled_from_status(status): - d_regex = ':%s$' % ovn_const.DISABLED_RESOURCE_SUFFIX - return { - k: [{c: re.sub(d_regex, '', d) for c, d in i.items()} - for i in v] - for k, v in status.items()} - - def _check_and_set_ssl_files(self): - # TODO(reedip): Make ovsdb_monitor's _check_and_set_ssl_files() public - # This is a copy of ovsdb_monitor._check_and_set_ssl_files - priv_key_file = ovn_conf.get_ovn_nb_private_key() - cert_file = ovn_conf.get_ovn_nb_certificate() - ca_cert_file = ovn_conf.get_ovn_nb_ca_cert() - if priv_key_file: - Stream.ssl_set_private_key_file(priv_key_file) - - if cert_file: - Stream.ssl_set_certificate_file(cert_file) - - if ca_cert_file: - Stream.ssl_set_ca_cert_file(ca_cert_file) - - def shutdown(self): - self.requests.put({'type': ovn_const.REQ_TYPE_EXIT}) - self.helper_thread.join() - self.ovn_nbdb.stop() - - @staticmethod - def _map_val(row, col, key): - # If the row doesnt exist, RowNotFound is raised by the _map_val - # and is expected to be caught by the caller. - try: - return getattr(row, col)[key] - except KeyError: - raise idlutils.RowNotFound(table=row._table.name, - col=col, match=key) - - def _get_nw_router_info_on_interface_event(self, lrp): - """Get the Router and Network information on an interface event - - This function is called when a new interface between a router and - a network is added or deleted. - Input: Logical Router Port row which is coming from - LogicalRouterPortEvent. - Output: A row from router table and network table matching the router - and network for which the event was generated. - Exception: RowNotFound exception can be generated. - """ - router = self.ovn_nbdb_api.lookup( - 'Logical_Router', utils.ovn_name(self._map_val( - lrp, 'external_ids', ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY))) - network = self.ovn_nbdb_api.lookup( - 'Logical_Switch', - self._map_val(lrp, 'external_ids', - ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY)) - return router, network - - def _clean_lb_if_empty(self, ovn_lb, lb_id, external_ids): - commands = [] - lb_to_delete = False - if OvnProviderHelper._is_lb_empty(external_ids): - # Verify if its only OVN LB defined. If so - leave with - # undefined protocol. If there is different for other protocol - # remove this one. - try: - defined_ovn_lbs = self._find_ovn_lbs(lb_id) - except idlutils.RowNotFound: - defined_ovn_lbs = [] - if len(defined_ovn_lbs) == 1: - commands.append( - self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, ('protocol', []))) - elif len(defined_ovn_lbs) > 1: - # Delete the lb. - commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) - lb_to_delete = True - return (commands, lb_to_delete) - - def lb_delete_lrp_assoc_handler(self, row): - try: - router, network = self._get_nw_router_info_on_interface_event(row) - except idlutils.RowNotFound: - LOG.debug("Router or network information not found") - return - request_info = {'network': network, - 'router': router} - self.add_request({'type': ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC, - 'info': request_info}) - - def lb_delete_lrp_assoc(self, info): - # TODO(reedip): When OVS>=2.12, LB can be deleted without removing - # Network and Router references as pushed in the patch - # https://github.com/openvswitch/ovs/commit - # /612f80fa8ebf88dad2e204364c6c02b451dca36c - commands = [] - network = info['network'] - router = info['router'] - - # Find all loadbalancers which have a reference with the network - nw_lb = self._find_lb_in_ls(network=network) - # Find all loadbalancers which have a reference with the router - r_lb = set(router.load_balancer) - nw_lb - # Delete all LB on N/W from Router - for nlb in nw_lb: - commands.extend(self._update_lb_to_lr_association(nlb, router, - delete=True)) - # Delete all LB on Router from N/W - for rlb in r_lb: - commands.append(self.ovn_nbdb_api.ls_lb_del( - network.uuid, rlb.uuid)) - if commands: - self._execute_commands(commands) - - def lb_create_lrp_assoc_handler(self, row): - try: - router, network = self._get_nw_router_info_on_interface_event(row) - except idlutils.RowNotFound: - LOG.debug("Router or network information not found") - return - request_info = {'network': network, - 'router': router} - self.add_request({'type': ovn_const.REQ_TYPE_LB_CREATE_LRP_ASSOC, - 'info': request_info}) - - def lb_create_lrp_assoc(self, info): - commands = [] - - router_lb = set(info['router'].load_balancer) - network_lb = set(info['network'].load_balancer) - # Add only those lb to routers which are unique to the network - for lb in (network_lb - router_lb): - commands.extend(self._update_lb_to_lr_association( - lb, info['router'])) - - # Add those lb to the network which are unique to the router - for lb in (router_lb - network_lb): - commands.append(self.ovn_nbdb_api.ls_lb_add( - info['network'].uuid, lb.uuid, may_exist=True)) - if commands: - self._execute_commands(commands) - - def vip_port_update_handler(self, vip_lp): - """Handler for VirtualIP port updates. - - If a floating ip is associated to a vip port, then networking-ovn sets - the fip in the external_ids column of the logical port as: - Logical_Switch_Port.external_ids:port_fip = . - Then, in the Load_Balancer table for the vip, networking-ovn creates - another vip entry for the FIP. - If a floating ip is disassociated from the vip, then it deletes - the vip entry for the FIP. - """ - - port_name = vip_lp.external_ids.get(ovn_const.OVN_PORT_NAME_EXT_ID_KEY) - lb_id = port_name[len(ovn_const.LB_VIP_PORT_PREFIX):] - try: - ovn_lbs = self._find_ovn_lbs_with_retry(lb_id) - except idlutils.RowNotFound: - LOG.debug("Loadbalancer %s not found!", lb_id) - return - - # Loop over all defined LBs with given ID, because it is possible - # than there is more than one (for more than 1 L4 protocol). - for lb in ovn_lbs: - fip = vip_lp.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY) - lb_vip_fip = lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) - request_info = {'ovn_lb': lb, - 'vip_fip': fip} - if fip and fip != lb_vip_fip: - request_info['action'] = ovn_const.REQ_INFO_ACTION_ASSOCIATE - elif fip is None and fip != lb_vip_fip: - request_info['action'] = ovn_const.REQ_INFO_ACTION_DISASSOCIATE - else: - continue - self.add_request({'type': ovn_const.REQ_TYPE_HANDLE_VIP_FIP, - 'info': request_info}) - - def _find_lb_in_ls(self, network): - """Find LB associated to a Network using Network information - - This function retrieves those loadbalancers whose ls_ref - column in the OVN northbound database's load_balancer table - has the network's name. Though different networks can be - associated with a loadbalancer, but ls_ref of a loadbalancer - points to the network where it was actually created, and this - function tries to retrieve all those loadbalancers created on this - network. - Input : row of type Logical_Switch - Output: set of rows of type Load_Balancer or empty set - """ - return {lb for lb in network.load_balancer - if network.name in lb.external_ids.get( - ovn_const.LB_EXT_IDS_LS_REFS_KEY, - [])} - - def _find_lb_in_table(self, lb, table): - return [item for item in self.ovn_nbdb_api.tables[table].rows.values() - if lb in item.load_balancer] - - def request_handler(self): - while True: - try: - request = self.requests.get() - request_type = request['type'] - if request_type == ovn_const.REQ_TYPE_EXIT: - break - - request_handler = self._lb_request_func_maps.get(request_type) - if request_handler: - status = request_handler(request['info']) - if status: - self._update_status_to_octavia(status) - self.requests.task_done() - except Exception: - # If any unexpected exception happens we don't want the - # notify_loop to exit. - LOG.exception('Unexpected exception in request_handler') - - def add_request(self, req): - self.requests.put(req) - - def _update_status_to_octavia(self, status): - try: - status = OvnProviderHelper._delete_disabled_from_status(status) - LOG.debug('Updating status to octavia: %s', status) - self._octavia_driver_lib.update_loadbalancer_status(status) - except driver_exceptions.UpdateStatusError as e: - msg = ("Error while updating the load balancer " - "status: %s") % e.fault_string - LOG.error(msg) - raise driver_exceptions.UpdateStatusError(msg) - - @tenacity.retry( - retry=tenacity.retry_if_exception_type(idlutils.RowNotFound), - wait=tenacity.wait_exponential(), - stop=tenacity.stop_after_delay(10), - reraise=True) - def _find_ovn_lbs_with_retry(self, lb_id, protocol=None): - return self._find_ovn_lbs(lb_id, protocol=protocol) - - def _find_ovn_lbs(self, lb_id, protocol=None): - """Find the Loadbalancers in OVN with the given lb_id as its name - - This function searches for the LoadBalancers whose Name has the pattern - passed in lb_id. - @param lb_id: LoadBalancer ID provided by Octavia in its API - request. Note that OVN saves the above ID in the 'name' column. - @type lb_id: str - @param protocol: Loadbalancer protocol. - @type protocol: str or None if not defined. - - :returns: LoadBalancer row if protocol specified - or list of rows matching the lb_id. - :raises: RowNotFound can be generated if the LoadBalancer is not - found. - """ - lbs = self.ovn_nbdb_api.db_find_rows( - 'Load_Balancer', ('name', '=', lb_id)).execute() - if not protocol: - if lbs: - return lbs - raise idlutils.RowNotFound(table='Load_Balancer', - col='name', match=lb_id) - # If there is only one LB without protocol defined, so - # it is 'clean' LB record without any listener. - if len(lbs) == 1 and not lbs[0].protocol: - return lbs[0] - # Search for other lbs. - for lb in lbs: - if lb.protocol[0].upper() == protocol.upper(): - return lb - raise idlutils.RowNotFound(table='Load_Balancer', - col='name', match=lb_id) - - def _get_or_create_ovn_lb(self, lb_id, protocol, admin_state_up): - """Find or create ovn lb with given protocol - - Find the loadbalancer configured with given protocol or - create required if not found - """ - # Make sure that its lowercase - OVN NBDB stores lowercases - # for this field. - protocol = protocol.lower() - ovn_lbs = self._find_ovn_lbs(lb_id) - lbs_with_required_protocol = [ - ovn_lb for ovn_lb in ovn_lbs - if protocol in ovn_lb.protocol] - lbs_with_no_protocol = [ovn_lb for ovn_lb in ovn_lbs - if not ovn_lb.protocol] - if lbs_with_required_protocol: - # We found existing LB with required - # protocol, just return it. - return lbs_with_required_protocol[0] - elif lbs_with_no_protocol: - ovn_lb = lbs_with_no_protocol[0] - # Set required protocol here. - self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, - ('protocol', protocol)).execute(check_error=True) - else: - # NOTE(mjozefcz): Looks like loadbalancer with given protocol - # doesn't exist. Try to add it with required protocol - # by copy the existing one data. - lb_info = { - 'id': lb_id, - 'protocol': protocol, - 'vip_address': ovn_lbs[0].external_ids.get( - ovn_const.LB_EXT_IDS_VIP_KEY), - 'vip_port_id': - ovn_lbs[0].external_ids.get( - ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY), - ovn_const.LB_EXT_IDS_LR_REF_KEY: - ovn_lbs[0].external_ids.get( - ovn_const.LB_EXT_IDS_LR_REF_KEY), - ovn_const.LB_EXT_IDS_LS_REFS_KEY: - ovn_lbs[0].external_ids.get( - ovn_const.LB_EXT_IDS_LS_REFS_KEY), - 'admin_state_up': admin_state_up} - # NOTE(mjozefcz): Handle vip_fip info if exists. - vip_fip = ovn_lbs[0].external_ids.get( - ovn_const.LB_EXT_IDS_VIP_FIP_KEY) - if vip_fip: - lb_info.update({ovn_const.LB_EXT_IDS_VIP_FIP_KEY: vip_fip}) - self.lb_create(lb_info, protocol=protocol) - # Looks like we've just added new LB - # or updated exising, empty one. - return self._find_ovn_lbs(lb_id, protocol=protocol) - - def _find_ovn_lb_with_pool_key(self, pool_key): - lbs = self.ovn_nbdb_api.db_list_rows( - 'Load_Balancer').execute(check_error=True) - for lb in lbs: - if pool_key in lb.external_ids: - return lb - - def _find_ovn_lb_by_pool_id(self, pool_id): - pool_key = self._get_pool_key(pool_id) - ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) - if not ovn_lb: - pool_key = self._get_pool_key(pool_id, is_enabled=False) - ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) - return pool_key, ovn_lb - - def _execute_commands(self, commands): - with self.ovn_nbdb_api.transaction(check_error=True) as txn: - for command in commands: - txn.add(command) - - def _update_lb_to_ls_association(self, ovn_lb, network_id=None, - subnet_id=None, associate=True): - """Update LB association with Logical Switch - - This function deals with updating the References of Logical Switch - in LB and addition of LB to LS. - """ - commands = [] - if not network_id and not subnet_id: - return commands - - if network_id: - ls_name = utils.ovn_name(network_id) - else: - neutron_client = clients.get_neutron_client() - try: - subnet = neutron_client.show_subnet(subnet_id) - ls_name = utils.ovn_name(subnet['subnet']['network_id']) - except n_exc.NotFound: - LOG.warning('Subnet %s not found while trying to ' - 'fetch its data.', subnet_id) - ls_name = None - ovn_ls = None - - if ls_name: - try: - ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( - check_error=True) - except idlutils.RowNotFound: - LOG.warning("LogicalSwitch %s could not be found.", ls_name) - if associate: - LOG.warning('Cannot associate LB %(lb)s to ' - 'LS %(ls)s because LS row ' - 'not found in OVN NBDB. Exiting.', - {'ls': ls_name, 'lb': ovn_lb.name}) - return commands - ovn_ls = None - - ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) - if ls_refs: - try: - ls_refs = jsonutils.loads(ls_refs) - except ValueError: - ls_refs = {} - else: - ls_refs = {} - - if associate and ls_name: - if ls_name in ls_refs: - ref_ct = ls_refs[ls_name] - ls_refs[ls_name] = ref_ct + 1 - else: - ls_refs[ls_name] = 1 - if ovn_ls: - commands.append(self.ovn_nbdb_api.ls_lb_add( - ovn_ls.uuid, ovn_lb.uuid, may_exist=True)) - else: - if ls_name not in ls_refs: - # Nothing to be done. - return commands - - ref_ct = ls_refs[ls_name] - if ref_ct == 1: - del ls_refs[ls_name] - if ovn_ls: - commands.append(self.ovn_nbdb_api.ls_lb_del( - ovn_ls.uuid, ovn_lb.uuid, if_exists=True)) - else: - ls_refs[ls_name] = ref_ct - 1 - - ls_refs = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps(ls_refs)} - commands.append(self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, - ('external_ids', ls_refs))) - - return commands - - def _del_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_ref): - commands = [] - if lr_ref: - try: - lr_ref = [r for r in - [lr.strip() for lr in lr_ref.split(',')] - if r != ovn_lr.name] - except ValueError: - msg = ('The loadbalancer %(lb)s is not associated with ' - 'the router %(router)s' % - {'lb': ovn_lb.name, - 'router': ovn_lr.name}) - LOG.warning(msg) - if lr_ref: - commands.append( - self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, - ('external_ids', - {ovn_const.LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)}))) - else: - commands.append( - self.ovn_nbdb_api.db_remove( - 'Load_Balancer', ovn_lb.uuid, 'external_ids', - (ovn_const.LB_EXT_IDS_LR_REF_KEY))) - commands.append( - self.ovn_nbdb_api.lr_lb_del(ovn_lr.uuid, ovn_lb.uuid, - if_exists=True)) - for net in self._find_ls_for_lr(ovn_lr): - commands.append(self.ovn_nbdb_api.ls_lb_del( - net, ovn_lb.uuid, if_exists=True)) - return commands - - def _add_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_rf): - commands = [] - commands.append( - self.ovn_nbdb_api.lr_lb_add(ovn_lr.uuid, ovn_lb.uuid, - may_exist=True)) - for net in self._find_ls_for_lr(ovn_lr): - commands.append(self.ovn_nbdb_api.ls_lb_add( - net, ovn_lb.uuid, may_exist=True)) - - if ovn_lr.name not in str(lr_rf): - # Multiple routers in lr_rf are separated with ',' - if lr_rf: - lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: - "%s,%s" % (lr_rf, ovn_lr.name)} - else: - lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} - commands.append( - self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, - ('external_ids', lr_rf))) - return commands - - def _update_lb_to_lr_association(self, ovn_lb, ovn_lr, delete=False): - lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) - if delete: - return self._del_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) - else: - return self._add_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) - - def _find_ls_for_lr(self, router): - neutron_client = clients.get_neutron_client() - ls = [] - for port in router.ports: - if port.gateway_chassis: - continue - sids = port.external_ids.get( - ovn_const.OVN_SUBNET_EXT_IDS_KEY, '').split(' ') - for sid in sids: - try: - subnet = neutron_client.show_subnet(sid) - ls.append(utils.ovn_name(subnet['subnet']['network_id'])) - except n_exc.NotFound: - LOG.exception('Subnet %s not found while trying to ' - 'fetch its data.', sid) - return ls - - def _find_lr_of_ls(self, ovn_ls): - lsp_router_port = None - for port in ovn_ls.ports or []: - if port.type == 'router': - lsp_router_port = port - break - else: - return - - lrp_name = lsp_router_port.options.get('router-port') - if not lrp_name: - return - - for lr in self.ovn_nbdb_api.tables['Logical_Router'].rows.values(): - for lrp in lr.ports: - if lrp.name == lrp_name: - return lr - # Handles networks with only gateway port in the router - if (utils.ovn_lrouter_port_name( - lr.external_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY)) == - lrp_name): - return lr - - def _get_listener_key(self, listener_id, is_enabled=True): - listener_key = ovn_const.LB_EXT_IDS_LISTENER_PREFIX + str(listener_id) - if not is_enabled: - listener_key += ':' + ovn_const.DISABLED_RESOURCE_SUFFIX - return listener_key - - def _get_pool_key(self, pool_id, is_enabled=True): - pool_key = ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id) - if not is_enabled: - pool_key += ':' + ovn_const.DISABLED_RESOURCE_SUFFIX - return pool_key - - def _extract_member_info(self, member): - mem_info = [] - if member: - for mem in member.split(','): - mem_ip_port = mem.split('_')[2] - mem_info.append(tuple(mem_ip_port.rsplit(':', 1))) - return mem_info - - def _get_member_key(self, member, old_convention=False): - member_info = '' - if isinstance(member, dict): - member_info = '%s%s_%s:%s' % ( - ovn_const.LB_EXT_IDS_MEMBER_PREFIX, - member[constants.ID], - member[constants.ADDRESS], - member[constants.PROTOCOL_PORT]) - if not old_convention and member.get(constants.SUBNET_ID): - member_info += "_" + member[constants.SUBNET_ID] - elif isinstance(member, o_datamodels.Member): - member_info = '%s%s_%s:%s' % ( - ovn_const.LB_EXT_IDS_MEMBER_PREFIX, - member.member_id, - member.address, - member.protocol_port) - if not old_convention and member.subnet_id: - member_info += "_" + member.subnet_id - return member_info - - def _make_listener_key_value(self, listener_port, pool_id): - return str(listener_port) + ':' + pool_id - - def _extract_listener_key_value(self, listener_value): - v = listener_value.split(':') - if len(v) == 2: - return (v[0], v[1]) - else: - return (None, None) - - def _is_listener_disabled(self, listener_key): - v = listener_key.split(':') - if len(v) == 2 and v[1] == ovn_const.DISABLED_RESOURCE_SUFFIX: - return True - - return False - - def _get_pool_listeners(self, ovn_lb, pool_key): - pool_listeners = [] - for k, v in ovn_lb.external_ids.items(): - if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k: - continue - vip_port, p_key = self._extract_listener_key_value(v) - if pool_key == p_key: - pool_listeners.append( - k[len(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):]) - return pool_listeners - - def _frame_vip_ips(self, lb_external_ids): - vip_ips = {} - # If load balancer is disabled, return - if lb_external_ids.get('enabled') == 'False': - return vip_ips - - lb_vip = lb_external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] - vip_fip = lb_external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) - - for k, v in lb_external_ids.items(): - if (ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k or - self._is_listener_disabled(k)): - continue - - vip_port, pool_id = self._extract_listener_key_value(v) - if not vip_port or not pool_id: - continue - - if pool_id not in lb_external_ids or not lb_external_ids[pool_id]: - continue - - ips = [] - for member_ip, member_port in self._extract_member_info( - lb_external_ids[pool_id]): - if netaddr.IPNetwork(member_ip).version == 6: - ips.append('[%s]:%s' % (member_ip, member_port)) - else: - ips.append('%s:%s' % (member_ip, member_port)) - - if netaddr.IPNetwork(lb_vip).version == 6: - lb_vip = '[%s]' % lb_vip - vip_ips[lb_vip + ':' + vip_port] = ','.join(ips) - - if vip_fip: - if netaddr.IPNetwork(vip_fip).version == 6: - vip_fip = '[%s]' % vip_fip - vip_ips[vip_fip + ':' + vip_port] = ','.join(ips) - - return vip_ips - - def _refresh_lb_vips(self, ovn_lb_uuid, lb_external_ids): - vip_ips = self._frame_vip_ips(lb_external_ids) - return [self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb_uuid, - 'vips'), - self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb_uuid, - ('vips', vip_ips))] - - def _is_listener_in_lb(self, lb): - for key in list(lb.external_ids): - if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX): - return True - return False - - def check_lb_protocol(self, lb_id, listener_protocol): - ovn_lb = self._find_ovn_lbs(lb_id, protocol=listener_protocol) - if not ovn_lb: - return False - elif not self._is_listener_in_lb(ovn_lb): - return True - else: - return str(listener_protocol).lower() in ovn_lb.protocol - - def lb_create(self, loadbalancer, protocol=None): - port = None - neutron_client = clients.get_neutron_client() - if loadbalancer.get(constants.VIP_PORT_ID): - # In case we don't have vip_network_id - port = neutron_client.show_port( - loadbalancer[constants.VIP_PORT_ID])['port'] - elif (loadbalancer.get(constants.VIP_NETWORK_ID) and - loadbalancer.get(constants.VIP_ADDRESS)): - ports = neutron_client.list_ports( - network_id=loadbalancer[constants.VIP_NETWORK_ID]) - for p in ports['ports']: - for ip in p['fixed_ips']: - if ip['ip_address'] == loadbalancer[constants.VIP_ADDRESS]: - port = p - break - - # If protocol set make sure its lowercase - protocol = protocol.lower() if protocol else None - # In case port is not found for the vip_address we will see an - # exception when port['id'] is accessed. - external_ids = { - ovn_const.LB_EXT_IDS_VIP_KEY: loadbalancer[constants.VIP_ADDRESS], - ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: - loadbalancer.get(constants.VIP_PORT_ID) or port['id'], - 'enabled': str(loadbalancer[constants.ADMIN_STATE_UP])} - # In case vip_fip was passed - use it. - vip_fip = loadbalancer.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) - if vip_fip: - external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = vip_fip - # In case of lr_ref passed - use it. - lr_ref = loadbalancer.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) - if lr_ref: - external_ids[ovn_const.LB_EXT_IDS_LR_REF_KEY] = lr_ref - - try: - self.ovn_nbdb_api.db_create( - 'Load_Balancer', name=loadbalancer[constants.ID], - protocol=protocol, - external_ids=external_ids).execute(check_error=True) - ovn_lb = self._find_ovn_lbs( - loadbalancer[constants.ID], - protocol=protocol) - ovn_lb = ovn_lb if protocol else ovn_lb[0] - commands = self._update_lb_to_ls_association( - ovn_lb, network_id=port['network_id'], - associate=True) - ls_name = utils.ovn_name(port['network_id']) - ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( - check_error=True) - ovn_lr = self._find_lr_of_ls(ovn_ls) - if ovn_lr: - commands.extend(self._update_lb_to_lr_association( - ovn_lb, ovn_lr)) - - # NOTE(mjozefcz): In case of LS references where passed - - # apply LS to the new LB. That could happend in case we - # need another loadbalancer for other L4 protocol. - ls_refs = loadbalancer.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) - if ls_refs: - try: - ls_refs = jsonutils.loads(ls_refs) - except ValueError: - ls_refs = {} - for ls in ls_refs: - # Skip previously added LS because we don't want - # to duplicate. - if ls == ovn_ls.name: - continue - commands.extend(self._update_lb_to_ls_association( - ovn_lb, network_id=ls.replace('neutron-', ''), - associate=True)) - - self._execute_commands(commands) - operating_status = constants.ONLINE - # The issue is that since OVN doesnt support any HMs, - # we ideally should never put the status as 'ONLINE' - if not loadbalancer.get(constants.ADMIN_STATE_UP, True): - operating_status = constants.OFFLINE - status = { - constants.LOADBALANCERS: [ - {constants.ID: loadbalancer[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE, - constants.OPERATING_STATUS: operating_status}]} - # If the connection with the OVN NB db server is broken, then - # ovsdbapp will throw either TimeOutException or RunTimeError. - # May be we can catch these specific exceptions. - # It is important to report the status to octavia. We can report - # immediately or reschedule the lb_create request later. - # For now lets report immediately. - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "creation of loadbalancer") - # Any Exception set the status to ERROR - if isinstance(port, dict): - self.delete_vip_port(port.get('id')) - LOG.warning("Deleting the VIP port %s since LB went into " - "ERROR state", str(port.get('id'))) - status = { - constants.LOADBALANCERS: [ - {constants.ID: loadbalancer[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR, - constants.OPERATING_STATUS: constants.ERROR}]} - return status - - def lb_delete(self, loadbalancer): - port_id = None - lbalancer_status = { - constants.ID: loadbalancer[constants.ID], - constants.PROVISIONING_STATUS: constants.DELETED, - constants.OPERATING_STATUS: constants.OFFLINE} - status = { - constants.LOADBALANCERS: [lbalancer_status], - constants.LISTENERS: [], - constants.POOLS: [], - constants.MEMBERS: []} - - ovn_lbs = None - try: - ovn_lbs = self._find_ovn_lbs(loadbalancer[constants.ID]) - except idlutils.RowNotFound: - LOG.warning("Loadbalancer %s not found in OVN Northbound DB. " - "Setting the Loadbalancer status to DELETED " - "in Octavia", str(loadbalancer[constants.ID])) - return status - - try: - port_id = ovn_lbs[0].external_ids[ - ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY] - for ovn_lb in ovn_lbs: - status = self._lb_delete(loadbalancer, ovn_lb, status) - # Clear the status dict of any key having [] value - # Python 3.6 doesnt allow deleting an element in a - # dict while iterating over it. So first get a list of keys. - # https://cito.github.io/blog/never-iterate-a-changing-dict/ - status = {key: value for key, value in status.items() if value} - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of loadbalancer") - lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR - lbalancer_status[constants.OPERATING_STATUS] = constants.ERROR - # Delete VIP port from neutron. - self.delete_vip_port(port_id) - return status - - def _lb_delete(self, loadbalancer, ovn_lb, status): - commands = [] - if loadbalancer['cascade']: - # Delete all pools - for key, value in ovn_lb.external_ids.items(): - if key.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX): - pool_id = key.split('_')[1] - # Delete all members in the pool - if value and len(value.split(',')) > 0: - for mem_info in value.split(','): - status[constants.MEMBERS].append({ - constants.ID: mem_info.split('_')[1], - constants.PROVISIONING_STATUS: - constants.DELETED}) - status[constants.POOLS].append( - {constants.ID: pool_id, - constants.PROVISIONING_STATUS: constants.DELETED}) - - if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX): - status[constants.LISTENERS].append({ - constants.ID: key.split('_')[1], - constants.PROVISIONING_STATUS: constants.DELETED, - constants.OPERATING_STATUS: constants.OFFLINE}) - ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY, {}) - if ls_refs: - try: - ls_refs = jsonutils.loads(ls_refs) - except ValueError: - ls_refs = {} - for ls_name in ls_refs.keys(): - try: - ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( - check_error=True) - commands.append( - self.ovn_nbdb_api.ls_lb_del(ovn_ls.uuid, ovn_lb.uuid)) - except idlutils.RowNotFound: - LOG.warning("LogicalSwitch %s could not be found. Cannot " - "delete Load Balancer from it", ls_name) - # Delete LB from all Networks the LB is indirectly associated - for ls in self._find_lb_in_table(ovn_lb, 'Logical_Switch'): - commands.append( - self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid, - if_exists=True)) - lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY, {}) - if lr_ref: - for lr in self.ovn_nbdb_api.tables[ - 'Logical_Router'].rows.values(): - if lr.name == lr_ref: - commands.append(self.ovn_nbdb_api.lr_lb_del( - lr.uuid, ovn_lb.uuid)) - break - # Delete LB from all Routers the LB is indirectly associated - for lr in self._find_lb_in_table(ovn_lb, 'Logical_Router'): - commands.append( - self.ovn_nbdb_api.lr_lb_del(lr.uuid, ovn_lb.uuid, - if_exists=True)) - commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) - self._execute_commands(commands) - return status - - def lb_failover(self, loadbalancer): - status = { - constants.LOADBALANCERS: [ - {constants.ID: loadbalancer[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - return status - - def lb_update(self, loadbalancer): - lb_status = {constants.ID: loadbalancer[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE} - status = {constants.LOADBALANCERS: [lb_status]} - if constants.ADMIN_STATE_UP not in loadbalancer: - return status - lb_enabled = loadbalancer[constants.ADMIN_STATE_UP] - - try: - ovn_lbs = self._find_ovn_lbs(loadbalancer[constants.ID]) - # It should be unique for all the LBS for all protocols, - # so we could just easly loop over all defined for given - # Octavia LB. - for ovn_lb in ovn_lbs: - if str(ovn_lb.external_ids['enabled']) != str(lb_enabled): - commands = [] - enable_info = {'enabled': str(lb_enabled)} - ovn_lb.external_ids['enabled'] = str(lb_enabled) - commands.append( - self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, - ('external_ids', enable_info)) - ) - commands.extend( - self._refresh_lb_vips(ovn_lb.uuid, - ovn_lb.external_ids)) - self._execute_commands(commands) - if lb_enabled: - operating_status = constants.ONLINE - else: - operating_status = constants.OFFLINE - lb_status[constants.OPERATING_STATUS] = operating_status - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "update of loadbalancer") - lb_status[constants.PROVISIONING_STATUS] = constants.ERROR - lb_status[constants.OPERATING_STATUS] = constants.ERROR - return status - - def listener_create(self, listener): - ovn_lb = self._get_or_create_ovn_lb( - listener[constants.LOADBALANCER_ID], - listener[constants.PROTOCOL], - listener[constants.ADMIN_STATE_UP]) - - external_ids = copy.deepcopy(ovn_lb.external_ids) - listener_key = self._get_listener_key( - listener[constants.ID], - is_enabled=listener[constants.ADMIN_STATE_UP]) - - if listener.get(constants.DEFAULT_POOL_ID): - pool_key = self._get_pool_key(listener[constants.DEFAULT_POOL_ID]) - else: - pool_key = '' - external_ids[listener_key] = self._make_listener_key_value( - listener[constants.PROTOCOL_PORT], pool_key) - - listener_info = {listener_key: external_ids[listener_key]} - try: - commands = [] - commands.append( - self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, - ('external_ids', listener_info))) - if not self._is_listener_in_lb(ovn_lb): - commands.append( - self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, - ('protocol', - str(listener[constants.PROTOCOL]).lower()))) - commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - self._execute_commands(commands) - - operating_status = constants.ONLINE - if not listener.get(constants.ADMIN_STATE_UP, True): - operating_status = constants.OFFLINE - status = { - constants.LISTENERS: [ - {constants.ID: listener[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE, - constants.OPERATING_STATUS: operating_status}], - constants.LOADBALANCERS: [ - {constants.ID: listener[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "creation of listener") - status = { - constants.LISTENERS: [ - {constants.ID: listener[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR, - constants.OPERATING_STATUS: constants.ERROR}], - constants.LOADBALANCERS: [ - {constants.ID: listener[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - return status - - def listener_delete(self, listener): - status = { - constants.LISTENERS: [ - {constants.ID: listener[constants.ID], - constants.PROVISIONING_STATUS: constants.DELETED, - constants.OPERATING_STATUS: constants.OFFLINE}], - constants.LOADBALANCERS: [ - {constants.ID: listener[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - try: - ovn_lb = self._find_ovn_lbs( - listener[constants.LOADBALANCER_ID], - protocol=listener[constants.PROTOCOL]) - except idlutils.RowNotFound: - # Listener already deleted. - return status - - external_ids = copy.deepcopy(ovn_lb.external_ids) - listener_key = self._get_listener_key(listener[constants.ID]) - if listener_key in external_ids: - try: - commands = [] - commands.append( - self.ovn_nbdb_api.db_remove( - 'Load_Balancer', ovn_lb.uuid, 'external_ids', - (listener_key))) - # Drop current listener from LB. - del external_ids[listener_key] - - # Set LB protocol to undefined only if there are no more - # listeners and pools defined in the LB. - cmds, lb_to_delete = self._clean_lb_if_empty( - ovn_lb, listener[constants.LOADBALANCER_ID], external_ids) - commands.extend(cmds) - # Do not refresh vips if OVN LB for given protocol - # has pending delete operation. - if not lb_to_delete: - commands.extend( - self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - self._execute_commands(commands) - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of listener") - status = { - constants.LISTENERS: [ - {constants.ID: listener[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR, - constants.OPERATING_STATUS: constants.ERROR}], - constants.LOADBALANCERS: [ - {constants.ID: listener[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - return status - - def listener_update(self, listener): - # NOTE(mjozefcz): Based on - # https://docs.openstack.org/api-ref/load-balancer/v2/?expanded=update-a-listener-detail - # there is no possibility to update listener protocol or port. - listener_status = {constants.ID: listener[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE} - lbalancer_status = { - constants.ID: listener[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE} - pool_status = [] - status = { - constants.LISTENERS: [listener_status], - constants.LOADBALANCERS: [lbalancer_status], - constants.POOLS: pool_status} - - try: - ovn_lb = self._find_ovn_lbs( - listener[constants.LOADBALANCER_ID], - protocol=listener[constants.PROTOCOL]) - except idlutils.RowNotFound: - LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener") - # LB row not found during update of a listener. That is a problem. - listener_status[constants.PROVISIONING_STATUS] = constants.ERROR - lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR - return status - - l_key_when_enabled = self._get_listener_key(listener[constants.ID]) - l_key_when_disabled = self._get_listener_key( - listener[constants.ID], is_enabled=False) - - external_ids = copy.deepcopy(ovn_lb.external_ids) - if constants.ADMIN_STATE_UP not in listener and ( - constants.DEFAULT_POOL_ID not in listener): - return status - - l_key_to_add = {} - if l_key_when_enabled in external_ids: - present_l_key = l_key_when_enabled - elif l_key_when_disabled in external_ids: - present_l_key = l_key_when_disabled - else: - # Something is terribly wrong. This cannot happen. - return status - - try: - commands = [] - new_l_key = None - l_key_to_remove = None - if constants.ADMIN_STATE_UP in listener: - if listener[constants.ADMIN_STATE_UP]: - # We need to enable the listener - new_l_key = l_key_when_enabled - listener_status[constants.OPERATING_STATUS] = ( - constants.ONLINE) - else: - # We need to disable the listener - new_l_key = l_key_when_disabled - listener_status[constants.OPERATING_STATUS] = ( - constants.OFFLINE) - - if present_l_key != new_l_key: - external_ids[new_l_key] = external_ids[present_l_key] - l_key_to_add[new_l_key] = external_ids[present_l_key] - del external_ids[present_l_key] - l_key_to_remove = present_l_key - - if l_key_to_remove: - commands.append( - self.ovn_nbdb_api.db_remove( - 'Load_Balancer', ovn_lb.uuid, 'external_ids', - (l_key_to_remove))) - else: - new_l_key = present_l_key - - if constants.DEFAULT_POOL_ID in listener: - pool_key = self._get_pool_key( - listener[constants.DEFAULT_POOL_ID]) - l_key_value = self._make_listener_key_value( - listener[constants.PROTOCOL_PORT], pool_key) - l_key_to_add[new_l_key] = l_key_value - external_ids[new_l_key] = l_key_value - pool_status.append( - {constants.ID: listener[constants.DEFAULT_POOL_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}) - - if l_key_to_add: - commands.append( - self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, - ('external_ids', l_key_to_add))) - - commands.extend( - self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - self._execute_commands(commands) - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener") - status = { - constants.LISTENERS: [ - {constants.ID: listener[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR}], - constants.LOADBALANCERS: [ - {constants.ID: listener[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - return status - - def pool_create(self, pool): - ovn_lb = self._get_or_create_ovn_lb( - pool[constants.LOADBALANCER_ID], - pool[constants.PROTOCOL], - pool[constants.ADMIN_STATE_UP]) - external_ids = copy.deepcopy(ovn_lb.external_ids) - pool_key = self._get_pool_key( - pool[constants.ID], is_enabled=pool[constants.ADMIN_STATE_UP]) - external_ids[pool_key] = '' - if pool[constants.LISTENER_ID]: - listener_key = self._get_listener_key(pool[constants.LISTENER_ID]) - if listener_key in ovn_lb.external_ids: - external_ids[listener_key] = str( - external_ids[listener_key]) + str(pool_key) - try: - self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, - ('external_ids', external_ids)).execute(check_error=True) - # Pool status will be set to Online after a member is added to it. - operating_status = constants.OFFLINE - - status = { - constants.POOLS: [ - {constants.ID: pool[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE, - constants.OPERATING_STATUS: operating_status}], - constants.LOADBALANCERS: [ - {constants.ID: pool[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - if pool[constants.LISTENER_ID]: - listener_status = [ - {constants.ID: pool[constants.LISTENER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}] - status[constants.LISTENERS] = listener_status - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "creation of pool") - status = { - constants.POOLS: [ - {constants.ID: pool[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR}], - constants.LOADBALANCERS: [ - {constants.ID: pool[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - if pool[constants.LISTENER_ID]: - listener_status = [ - {constants.ID: pool[constants.LISTENER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}] - status[constants.LISTENERS] = listener_status - - return status - - def pool_delete(self, pool): - status = { - constants.POOLS: [ - {constants.ID: pool[constants.ID], - constants.PROVISIONING_STATUS: constants.DELETED}], - constants.LOADBALANCERS: [ - {constants.ID: pool[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - try: - ovn_lb = self._find_ovn_lbs( - pool[constants.LOADBALANCER_ID], - pool[constants.PROTOCOL]) - except idlutils.RowNotFound: - # LB row not found that means pool is deleted. - return status - - pool_key = self._get_pool_key(pool[constants.ID]) - commands = [] - external_ids = copy.deepcopy(ovn_lb.external_ids) - try: - if pool_key in ovn_lb.external_ids: - commands.append( - self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, - 'external_ids', (pool_key))) - del external_ids[pool_key] - commands.extend( - self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - # Remove Pool from Listener if it is associated - listener_id = None - for key, value in ovn_lb.external_ids.items(): - if (key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX) and - pool_key in value): - external_ids[key] = value.split(':')[0] + ':' - commands.append( - self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, - ('external_ids', external_ids))) - listener_id = key.split('_')[1] - - pool_key_when_disabled = self._get_pool_key(pool[constants.ID], - is_enabled=False) - if pool_key_when_disabled in ovn_lb.external_ids: - commands.append( - self.ovn_nbdb_api.db_remove( - 'Load_Balancer', ovn_lb.uuid, - 'external_ids', (pool_key_when_disabled))) - - commands.extend( - self._clean_lb_if_empty( - ovn_lb, pool[constants.LOADBALANCER_ID], external_ids)[0]) - self._execute_commands(commands) - - if listener_id: - status[constants.LISTENERS] = [ - {constants.ID: listener_id, - constants.PROVISIONING_STATUS: constants.ACTIVE}] - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of pool") - status = { - constants.POOLS: [ - {constants.ID: pool[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR}], - constants.LOADBALANCERS: [ - {constants.ID: pool[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - - return status - - def pool_update(self, pool): - pool_status = {constants.ID: pool[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE} - lbalancer_status = {constants.ID: pool[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE} - status = { - constants.POOLS: [pool_status], - constants.LOADBALANCERS: [lbalancer_status]} - if constants.ADMIN_STATE_UP not in pool: - return status - try: - ovn_lb = self._find_ovn_lbs( - pool[constants.LOADBALANCER_ID], - protocol=pool[constants.PROTOCOL]) - except idlutils.RowNotFound: - LOG.exception(ovn_const.EXCEPTION_MSG, "update of pool") - # LB row not found during update of a listener. That is a problem. - pool_status[constants.PROVISIONING_STATUS] = constants.ERROR - lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR - return status - - pool_key = self._get_pool_key(pool[constants.ID]) - p_key_when_disabled = self._get_pool_key(pool[constants.ID], - is_enabled=False) - external_ids = copy.deepcopy(ovn_lb.external_ids) - p_key_to_remove = None - p_key_to_add = {} - - try: - if pool[constants.ADMIN_STATE_UP]: - if p_key_when_disabled in external_ids: - p_key_to_add[pool_key] = external_ids[p_key_when_disabled] - external_ids[pool_key] = external_ids[p_key_when_disabled] - del external_ids[p_key_when_disabled] - p_key_to_remove = p_key_when_disabled - else: - if pool_key in external_ids: - p_key_to_add[p_key_when_disabled] = external_ids[pool_key] - external_ids[p_key_when_disabled] = external_ids[pool_key] - del external_ids[pool_key] - p_key_to_remove = pool_key - - if p_key_to_remove: - commands = [] - commands.append( - self.ovn_nbdb_api.db_remove( - 'Load_Balancer', ovn_lb.uuid, 'external_ids', - (p_key_to_remove))) - - commands.append( - self.ovn_nbdb_api.db_set( - 'Load_Balancer', ovn_lb.uuid, - ('external_ids', p_key_to_add))) - - commands.extend( - self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - self._execute_commands(commands) - if pool[constants.ADMIN_STATE_UP]: - operating_status = constants.ONLINE - else: - operating_status = constants.OFFLINE - pool_status[constants.OPERATING_STATUS] = operating_status - - pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) - listener_status = [] - for listener in pool_listeners: - listener_status.append( - {constants.ID: listener, - constants.PROVISIONING_STATUS: constants.ACTIVE}) - status[constants.LISTENERS] = listener_status - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "update of pool") - status = { - constants.POOLS: [ - {constants.ID: pool[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR}], - constants.LOADBALANCERS: [ - {constants.ID: pool[constants.LOADBALANCER_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - - return status - - def _add_member(self, member, ovn_lb, pool_key): - external_ids = copy.deepcopy(ovn_lb.external_ids) - existing_members = external_ids[pool_key] - if existing_members: - existing_members = existing_members.split(",") - member_info = self._get_member_key(member) - # TODO(mjozefcz): Remove this workaround in W release. - member_info_old = self._get_member_key(member, old_convention=True) - member_found = [x for x in existing_members - if re.match(member_info_old, x)] - if member_found: - # Member already present - return - if existing_members: - existing_members.append(member_info) - pool_data = {pool_key: ",".join(existing_members)} - else: - pool_data = {pool_key: member_info} - - commands = [] - commands.append( - self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, - ('external_ids', pool_data))) - - external_ids[pool_key] = pool_data[pool_key] - commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - commands.extend( - self._update_lb_to_ls_association( - ovn_lb, subnet_id=member[constants.SUBNET_ID], associate=True)) - self._execute_commands(commands) - - def member_create(self, member): - try: - pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( - member[constants.POOL_ID]) - self._add_member(member, ovn_lb, pool_key) - pool = {constants.ID: member[constants.POOL_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE, - constants.OPERATING_STATUS: constants.ONLINE} - status = { - constants.POOLS: [pool], - constants.MEMBERS: [ - {constants.ID: member[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}], - constants.LOADBALANCERS: [ - {constants.ID: ovn_lb.name, - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) - listener_status = [] - for listener in pool_listeners: - listener_status.append( - {constants.ID: listener, - constants.PROVISIONING_STATUS: constants.ACTIVE}) - status[constants.LISTENERS] = listener_status - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "creation of member") - status = { - constants.POOLS: [ - {constants.ID: member[constants.POOL_ID], - constants.PROVISIONING_STATUS: constants.ERROR}], - constants.MEMBERS: [ - {constants.ID: member[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}], - constants.LOADBALANCERS: [ - {constants.ID: ovn_lb.name, - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - - return status - - def _remove_member(self, member, ovn_lb, pool_key): - external_ids = copy.deepcopy(ovn_lb.external_ids) - existing_members = external_ids[pool_key].split(",") - # TODO(mjozefcz): Delete this workaround in W release. - # To support backward compatibility member - # could be defined as `member`_`id`_`ip`:`port`_`subnet_id` - # or defined as `member`_`id`_`ip`:`port - member_info_old = self._get_member_key(member, old_convention=True) - - member_found = [x for x in existing_members - if re.match(member_info_old, x)] - if member_found: - commands = [] - existing_members.remove(member_found[0]) - - if not existing_members: - pool_status = constants.OFFLINE - else: - pool_status = constants.ONLINE - pool_data = {pool_key: ",".join(existing_members)} - commands.append( - self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, - ('external_ids', pool_data))) - external_ids[pool_key] = ",".join(existing_members) - commands.extend( - self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - commands.extend( - self._update_lb_to_ls_association( - ovn_lb, subnet_id=member.get(constants.SUBNET_ID), - associate=False)) - self._execute_commands(commands) - return pool_status - else: - msg = "Member %s not found in the pool" % member[constants.ID] - raise driver_exceptions.DriverError( - user_fault_string=msg, - operator_fault_string=msg) - - def member_delete(self, member): - try: - pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( - member[constants.POOL_ID]) - pool_status = self._remove_member(member, ovn_lb, pool_key) - pool = {constants.ID: member[constants.POOL_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE, - constants.OPERATING_STATUS: pool_status} - status = { - constants.POOLS: [pool], - constants.MEMBERS: [ - {constants.ID: member[constants.ID], - constants.PROVISIONING_STATUS: constants.DELETED}], - constants.LOADBALANCERS: [ - {constants.ID: ovn_lb.name, - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) - listener_status = [] - for listener in pool_listeners: - listener_status.append( - {constants.ID: listener, - constants.PROVISIONING_STATUS: constants.ACTIVE}) - status[constants.LISTENERS] = listener_status - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of member") - status = { - constants.POOLS: [ - {constants.ID: member[constants.POOL_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}], - constants.MEMBERS: [ - {constants.ID: member[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR}], - constants.LOADBALANCERS: [ - {constants.ID: ovn_lb.name, - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - - return status - - def _update_member(self, member, ovn_lb, pool_key): - commands = [] - external_ids = copy.deepcopy(ovn_lb.external_ids) - existing_members = external_ids[pool_key].split(",") - member_info = self._get_member_key(member) - for mem in existing_members: - if (member_info.split('_')[1] == mem.split('_')[1] and - mem != member_info): - existing_members.remove(mem) - existing_members.append(member_info) - pool_data = {pool_key: ",".join(existing_members)} - commands.append( - self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, - ('external_ids', pool_data))) - external_ids[pool_key] = ",".join(existing_members) - commands.extend( - self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - self._execute_commands(commands) - - def member_update(self, member): - try: - pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( - member[constants.POOL_ID]) - member_status = {constants.ID: member[constants.ID], - constants.PROVISIONING_STATUS: constants.ACTIVE} - status = { - constants.POOLS: [ - {constants.ID: member[constants.POOL_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}], - constants.MEMBERS: [member_status], - constants.LOADBALANCERS: [ - {constants.ID: ovn_lb.name, - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - self._update_member(member, ovn_lb, pool_key) - if constants.ADMIN_STATE_UP in member: - if member[constants.ADMIN_STATE_UP]: - member_status[constants.OPERATING_STATUS] = ( - constants.ONLINE) - else: - member_status[constants.OPERATING_STATUS] = ( - constants.OFFLINE) - - pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) - listener_status = [] - for listener in pool_listeners: - listener_status.append( - {constants.ID: listener, - constants.PROVISIONING_STATUS: constants.ACTIVE}) - status[constants.LISTENERS] = listener_status - except Exception: - LOG.exception(ovn_const.EXCEPTION_MSG, "update of member") - status = { - constants.POOLS: [ - {constants.ID: member[constants.POOL_ID], - constants.PROVISIONING_STATUS: constants.ACTIVE}], - constants.MEMBERS: [ - {constants.ID: member[constants.ID], - constants.PROVISIONING_STATUS: constants.ERROR}], - constants.LOADBALANCERS: [ - {constants.ID: ovn_lb.name, - constants.PROVISIONING_STATUS: constants.ACTIVE}]} - return status - - def _get_existing_pool_members(self, pool_id): - pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id) - if not ovn_lb: - msg = _("Loadbalancer with pool %s does not exist") % pool_key - raise driver_exceptions.DriverError(msg) - external_ids = dict(ovn_lb.external_ids) - return external_ids[pool_key] - - def get_pool_member_id(self, pool_id, mem_addr_port=None): - '''Gets Member information - - :param pool_id: ID of the Pool whose member information is reqd. - :param mem_addr_port: Combination of Member Address+Port. Default=None - :returns: UUID -- ID of the Member if member exists in pool. - :returns: None -- if no member exists in the pool - :raises: Exception if Loadbalancer is not found for a Pool ID - ''' - existing_members = self._get_existing_pool_members(pool_id) - # Members are saved in OVN in the form of - # member1_UUID_IP:Port, member2_UUID_IP:Port - # Match the IP:Port for all members with the mem_addr_port - # information and return the UUID. - for meminf in existing_members.split(','): - if mem_addr_port == meminf.split('_')[2]: - return meminf.split('_')[1] - - def create_vip_port(self, project_id, lb_id, vip_d): - port = {'port': {'name': ovn_const.LB_VIP_PORT_PREFIX + str(lb_id), - 'network_id': vip_d[constants.VIP_NETWORK_ID], - 'fixed_ips': [{'subnet_id': vip_d['vip_subnet_id']}], - 'admin_state_up': True, - 'project_id': project_id}} - try: - port['port']['fixed_ips'][0]['ip_address'] = ( - vip_d[constants.VIP_ADDRESS]) - except KeyError: - pass - neutron_client = clients.get_neutron_client() - try: - return neutron_client.create_port(port) - except n_exc.IpAddressAlreadyAllocatedClient: - # Sometimes the VIP is already created (race-conditions) - # Lets get the it from Neutron API. - ports = neutron_client.list_ports( - network_id=vip_d[constants.VIP_NETWORK_ID], - name='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, lb_id)) - if not ports['ports']: - LOG.error('Cannot create/get LoadBalancer VIP port with ' - 'fixed IP: %s', vip_d[constants.VIP_ADDRESS]) - status = { - constants.LOADBALANCERS: [{ - constants.ID: lb_id, - constants.PROVISIONING_STATUS: constants.ERROR, - constants.OPERATING_STATUS: constants.ERROR}]} - self._update_status_to_octavia(status) - return - # there should only be one port returned - port = ports['ports'][0] - LOG.debug('VIP Port already exists, uuid: %s', port['id']) - return {'port': port} - - def delete_vip_port(self, port_id): - neutron_client = clients.get_neutron_client() - try: - neutron_client.delete_port(port_id) - except n_exc.PortNotFoundClient: - LOG.warning("Port %s could not be found. Please " - "check Neutron logs. Perhaps port " - "was already deleted.", port_id) - - def handle_vip_fip(self, fip_info): - ovn_lb = fip_info['ovn_lb'] - external_ids = copy.deepcopy(ovn_lb.external_ids) - commands = [] - - if fip_info['action'] == ovn_const.REQ_INFO_ACTION_ASSOCIATE: - external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = ( - fip_info['vip_fip']) - vip_fip_info = { - ovn_const.LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']} - commands.append( - self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, - ('external_ids', vip_fip_info))) - else: - external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) - commands.append( - self.ovn_nbdb_api.db_remove( - 'Load_Balancer', ovn_lb.uuid, 'external_ids', - (ovn_const.LB_EXT_IDS_VIP_FIP_KEY))) - - commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) - self._execute_commands(commands) - - def handle_member_dvr(self, info): - pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(info['pool_id']) - if not ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY): - LOG.debug("LB %(lb)s has no FIP on VIP configured. " - "There is no need to centralize member %(member)s " - "traffic.", - {'lb': ovn_lb.uuid, 'member': info['id']}) - return - - # Find out if member has FIP assigned. - neutron_client = clients.get_neutron_client() - try: - subnet = neutron_client.show_subnet(info['subnet_id']) - ls_name = utils.ovn_name(subnet['subnet']['network_id']) - except n_exc.NotFound: - LOG.exception('Subnet %s not found while trying to ' - 'fetch its data.', info['subnet_id']) - return - - try: - ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name) - except idlutils.RowNotFound: - LOG.warning("Logical Switch %s not found. " - "Cannot verify member FIP configuration.", - ls_name) - return - - fip = None - f = utils.remove_macs_from_lsp_addresses - for port in ls.ports: - if info['address'] in f(port.addresses): - # We found particular port - fip = self.ovn_nbdb_api.db_find_rows( - 'NAT', ('external_ids', '=', { - ovn_const.OVN_FIP_PORT_EXT_ID_KEY: port.name}) - ).execute(check_error=True) - fip = fip[0] if fip else fip - break - - if not fip: - LOG.debug('Member %s has no FIP assigned. ' - 'There is no need to modify its NAT.', - info['id']) - return - - if info['action'] == ovn_const.REQ_INFO_MEMBER_ADDED: - LOG.info('Member %(member)s is added to Load Balancer %(lb)s ' - 'and both have FIP assigned. Member FIP %(fip)s ' - 'needs to be centralized in those conditions. ' - 'Deleting external_mac/logical_port from it.', - {'member': info['id'], - 'lb': ovn_lb.uuid, - 'fip': fip.external_ip}) - self.ovn_nbdb_api.db_clear( - 'NAT', fip.uuid, 'external_mac').execute(check_error=True) - self.ovn_nbdb_api.db_clear( - 'NAT', fip.uuid, 'logical_port').execute(check_error=True) - else: - LOG.info('Member %(member)s is deleted from Load Balancer ' - '%(lb)s and both have FIP assigned. Member FIP %(fip)s ' - 'can be decentralized now if environment has DVR ' - 'enabled. Updating FIP object for recomputation.', - {'member': info['id'], - 'lb': ovn_lb.uuid, - 'fip': fip.external_ip}) - # NOTE(mjozefcz): We don't know if this env is DVR or not. - # We should call neutron API to do 'empty' update of the FIP. - # It will bump revision number and do recomputation of the FIP. - try: - fip_info = neutron_client.show_floatingip( - fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY]) - empty_update = { - "floatingip": { - 'description': fip_info['floatingip']['description']}} - neutron_client.update_floatingip( - fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY], - empty_update) - except n_exc.NotFound: - LOG.warning('Members %(member)s FIP %(fip)s not found in ' - 'Neutron. Cannot update it.', - {'member': info['id'], - 'fip': fip.external_ip}) - - class OvnProviderDriver(driver_base.ProviderDriver): def __init__(self): super(OvnProviderDriver, self).__init__() - self._ovn_helper = OvnProviderHelper() + self._ovn_helper = ovn_helper.OvnProviderHelper() def __del__(self): self._ovn_helper.shutdown() diff --git a/ovn_octavia_provider/helper.py b/ovn_octavia_provider/helper.py new file mode 100644 index 00000000..f3177c8c --- /dev/null +++ b/ovn_octavia_provider/helper.py @@ -0,0 +1,1790 @@ +# Copyright 2020 Red Hat, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import queue +import re +import threading + +import netaddr +from neutronclient.common import exceptions as n_exc +from octavia_lib.api.drivers import data_models as o_datamodels +from octavia_lib.api.drivers import driver_lib as o_driver_lib +from octavia_lib.api.drivers import exceptions as driver_exceptions +from octavia_lib.common import constants +from oslo_config import cfg +from oslo_log import log as logging +from oslo_serialization import jsonutils +from ovs.stream import Stream +from ovsdbapp.backend.ovs_idl import idlutils +import tenacity + +from ovn_octavia_provider.common import clients +from ovn_octavia_provider.common import config as ovn_conf +# TODO(mjozefcz): Start consuming const and utils +# from neutron-lib once released. +from ovn_octavia_provider.common import constants as ovn_const +from ovn_octavia_provider.common import utils +from ovn_octavia_provider.i18n import _ +from ovn_octavia_provider.ovsdb import impl_idl_ovn + +CONF = cfg.CONF # Gets Octavia Conf as it runs under o-api domain +ovn_conf.register_opts() + +LOG = logging.getLogger(__name__) + + +class OvnProviderHelper(object): + + def __init__(self): + self.requests = queue.Queue() + self.helper_thread = threading.Thread(target=self.request_handler) + self.helper_thread.daemon = True + self._octavia_driver_lib = o_driver_lib.DriverLibrary() + self._check_and_set_ssl_files() + self._init_lb_actions() + + # NOTE(mjozefcz): This API is only for handling octavia API requests. + self.ovn_nbdb = impl_idl_ovn.OvnNbIdlForLb() + self.ovn_nbdb_api = self.ovn_nbdb.start() + + self.helper_thread.start() + + def _init_lb_actions(self): + self._lb_request_func_maps = { + ovn_const.REQ_TYPE_LB_CREATE: self.lb_create, + ovn_const.REQ_TYPE_LB_DELETE: self.lb_delete, + ovn_const.REQ_TYPE_LB_UPDATE: self.lb_update, + ovn_const.REQ_TYPE_LB_FAILOVER: self.lb_failover, + ovn_const.REQ_TYPE_LISTENER_CREATE: self.listener_create, + ovn_const.REQ_TYPE_LISTENER_DELETE: self.listener_delete, + ovn_const.REQ_TYPE_LISTENER_UPDATE: self.listener_update, + ovn_const.REQ_TYPE_POOL_CREATE: self.pool_create, + ovn_const.REQ_TYPE_POOL_DELETE: self.pool_delete, + ovn_const.REQ_TYPE_POOL_UPDATE: self.pool_update, + ovn_const.REQ_TYPE_MEMBER_CREATE: self.member_create, + ovn_const.REQ_TYPE_MEMBER_DELETE: self.member_delete, + ovn_const.REQ_TYPE_MEMBER_UPDATE: self.member_update, + ovn_const.REQ_TYPE_LB_CREATE_LRP_ASSOC: self.lb_create_lrp_assoc, + ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC: self.lb_delete_lrp_assoc, + ovn_const.REQ_TYPE_HANDLE_VIP_FIP: self.handle_vip_fip, + ovn_const.REQ_TYPE_HANDLE_MEMBER_DVR: self.handle_member_dvr, + } + + @staticmethod + def _is_lb_empty(external_ids): + """Check if there is no pool or listener defined.""" + return not any([k.startswith('listener') or k.startswith('pool') + for k in external_ids]) + + @staticmethod + def _delete_disabled_from_status(status): + d_regex = ':%s$' % ovn_const.DISABLED_RESOURCE_SUFFIX + return { + k: [{c: re.sub(d_regex, '', d) for c, d in i.items()} + for i in v] + for k, v in status.items()} + + def _check_and_set_ssl_files(self): + # TODO(reedip): Make ovsdb_monitor's _check_and_set_ssl_files() public + # This is a copy of ovsdb_monitor._check_and_set_ssl_files + priv_key_file = ovn_conf.get_ovn_nb_private_key() + cert_file = ovn_conf.get_ovn_nb_certificate() + ca_cert_file = ovn_conf.get_ovn_nb_ca_cert() + if priv_key_file: + Stream.ssl_set_private_key_file(priv_key_file) + + if cert_file: + Stream.ssl_set_certificate_file(cert_file) + + if ca_cert_file: + Stream.ssl_set_ca_cert_file(ca_cert_file) + + def shutdown(self): + self.requests.put({'type': ovn_const.REQ_TYPE_EXIT}) + self.helper_thread.join() + self.ovn_nbdb.stop() + + @staticmethod + def _map_val(row, col, key): + # If the row doesnt exist, RowNotFound is raised by the _map_val + # and is expected to be caught by the caller. + try: + return getattr(row, col)[key] + except KeyError: + raise idlutils.RowNotFound(table=row._table.name, + col=col, match=key) + + def _get_nw_router_info_on_interface_event(self, lrp): + """Get the Router and Network information on an interface event + + This function is called when a new interface between a router and + a network is added or deleted. + Input: Logical Router Port row which is coming from + LogicalRouterPortEvent. + Output: A row from router table and network table matching the router + and network for which the event was generated. + Exception: RowNotFound exception can be generated. + """ + router = self.ovn_nbdb_api.lookup( + 'Logical_Router', utils.ovn_name(self._map_val( + lrp, 'external_ids', ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY))) + network = self.ovn_nbdb_api.lookup( + 'Logical_Switch', + self._map_val(lrp, 'external_ids', + ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY)) + return router, network + + def _clean_lb_if_empty(self, ovn_lb, lb_id, external_ids): + commands = [] + lb_to_delete = False + if OvnProviderHelper._is_lb_empty(external_ids): + # Verify if its only OVN LB defined. If so - leave with + # undefined protocol. If there is different for other protocol + # remove this one. + try: + defined_ovn_lbs = self._find_ovn_lbs(lb_id) + except idlutils.RowNotFound: + defined_ovn_lbs = [] + if len(defined_ovn_lbs) == 1: + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, ('protocol', []))) + elif len(defined_ovn_lbs) > 1: + # Delete the lb. + commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) + lb_to_delete = True + return (commands, lb_to_delete) + + def lb_delete_lrp_assoc_handler(self, row): + try: + router, network = self._get_nw_router_info_on_interface_event(row) + except idlutils.RowNotFound: + LOG.debug("Router or network information not found") + return + request_info = {'network': network, + 'router': router} + self.add_request({'type': ovn_const.REQ_TYPE_LB_DELETE_LRP_ASSOC, + 'info': request_info}) + + def lb_delete_lrp_assoc(self, info): + # TODO(reedip): When OVS>=2.12, LB can be deleted without removing + # Network and Router references as pushed in the patch + # https://github.com/openvswitch/ovs/commit + # /612f80fa8ebf88dad2e204364c6c02b451dca36c + commands = [] + network = info['network'] + router = info['router'] + + # Find all loadbalancers which have a reference with the network + nw_lb = self._find_lb_in_ls(network=network) + # Find all loadbalancers which have a reference with the router + r_lb = set(router.load_balancer) - nw_lb + # Delete all LB on N/W from Router + for nlb in nw_lb: + commands.extend(self._update_lb_to_lr_association(nlb, router, + delete=True)) + # Delete all LB on Router from N/W + for rlb in r_lb: + commands.append(self.ovn_nbdb_api.ls_lb_del( + network.uuid, rlb.uuid)) + if commands: + self._execute_commands(commands) + + def lb_create_lrp_assoc_handler(self, row): + try: + router, network = self._get_nw_router_info_on_interface_event(row) + except idlutils.RowNotFound: + LOG.debug("Router or network information not found") + return + request_info = {'network': network, + 'router': router} + self.add_request({'type': ovn_const.REQ_TYPE_LB_CREATE_LRP_ASSOC, + 'info': request_info}) + + def lb_create_lrp_assoc(self, info): + commands = [] + + router_lb = set(info['router'].load_balancer) + network_lb = set(info['network'].load_balancer) + # Add only those lb to routers which are unique to the network + for lb in (network_lb - router_lb): + commands.extend(self._update_lb_to_lr_association( + lb, info['router'])) + + # Add those lb to the network which are unique to the router + for lb in (router_lb - network_lb): + commands.append(self.ovn_nbdb_api.ls_lb_add( + info['network'].uuid, lb.uuid, may_exist=True)) + if commands: + self._execute_commands(commands) + + def vip_port_update_handler(self, vip_lp): + """Handler for VirtualIP port updates. + + If a floating ip is associated to a vip port, then networking-ovn sets + the fip in the external_ids column of the logical port as: + Logical_Switch_Port.external_ids:port_fip = . + Then, in the Load_Balancer table for the vip, networking-ovn creates + another vip entry for the FIP. + If a floating ip is disassociated from the vip, then it deletes + the vip entry for the FIP. + """ + + port_name = vip_lp.external_ids.get(ovn_const.OVN_PORT_NAME_EXT_ID_KEY) + lb_id = port_name[len(ovn_const.LB_VIP_PORT_PREFIX):] + try: + ovn_lbs = self._find_ovn_lbs_with_retry(lb_id) + except idlutils.RowNotFound: + LOG.debug("Loadbalancer %s not found!", lb_id) + return + + # Loop over all defined LBs with given ID, because it is possible + # than there is more than one (for more than 1 L4 protocol). + for lb in ovn_lbs: + fip = vip_lp.external_ids.get(ovn_const.OVN_PORT_FIP_EXT_ID_KEY) + lb_vip_fip = lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) + request_info = {'ovn_lb': lb, + 'vip_fip': fip} + if fip and fip != lb_vip_fip: + request_info['action'] = ovn_const.REQ_INFO_ACTION_ASSOCIATE + elif fip is None and fip != lb_vip_fip: + request_info['action'] = ovn_const.REQ_INFO_ACTION_DISASSOCIATE + else: + continue + self.add_request({'type': ovn_const.REQ_TYPE_HANDLE_VIP_FIP, + 'info': request_info}) + + def _find_lb_in_ls(self, network): + """Find LB associated to a Network using Network information + + This function retrieves those loadbalancers whose ls_ref + column in the OVN northbound database's load_balancer table + has the network's name. Though different networks can be + associated with a loadbalancer, but ls_ref of a loadbalancer + points to the network where it was actually created, and this + function tries to retrieve all those loadbalancers created on this + network. + Input : row of type Logical_Switch + Output: set of rows of type Load_Balancer or empty set + """ + return {lb for lb in network.load_balancer + if network.name in lb.external_ids.get( + ovn_const.LB_EXT_IDS_LS_REFS_KEY, + [])} + + def _find_lb_in_table(self, lb, table): + return [item for item in self.ovn_nbdb_api.tables[table].rows.values() + if lb in item.load_balancer] + + def request_handler(self): + while True: + try: + request = self.requests.get() + request_type = request['type'] + if request_type == ovn_const.REQ_TYPE_EXIT: + break + + request_handler = self._lb_request_func_maps.get(request_type) + if request_handler: + status = request_handler(request['info']) + if status: + self._update_status_to_octavia(status) + self.requests.task_done() + except Exception: + # If any unexpected exception happens we don't want the + # notify_loop to exit. + LOG.exception('Unexpected exception in request_handler') + + def add_request(self, req): + self.requests.put(req) + + def _update_status_to_octavia(self, status): + try: + status = OvnProviderHelper._delete_disabled_from_status(status) + LOG.debug('Updating status to octavia: %s', status) + self._octavia_driver_lib.update_loadbalancer_status(status) + except driver_exceptions.UpdateStatusError as e: + msg = ("Error while updating the load balancer " + "status: %s") % e.fault_string + LOG.error(msg) + raise driver_exceptions.UpdateStatusError(msg) + + @tenacity.retry( + retry=tenacity.retry_if_exception_type(idlutils.RowNotFound), + wait=tenacity.wait_exponential(), + stop=tenacity.stop_after_delay(10), + reraise=True) + def _find_ovn_lbs_with_retry(self, lb_id, protocol=None): + return self._find_ovn_lbs(lb_id, protocol=protocol) + + def _find_ovn_lbs(self, lb_id, protocol=None): + """Find the Loadbalancers in OVN with the given lb_id as its name + + This function searches for the LoadBalancers whose Name has the pattern + passed in lb_id. + @param lb_id: LoadBalancer ID provided by Octavia in its API + request. Note that OVN saves the above ID in the 'name' column. + @type lb_id: str + @param protocol: Loadbalancer protocol. + @type protocol: str or None if not defined. + + :returns: LoadBalancer row if protocol specified + or list of rows matching the lb_id. + :raises: RowNotFound can be generated if the LoadBalancer is not + found. + """ + lbs = self.ovn_nbdb_api.db_find_rows( + 'Load_Balancer', ('name', '=', lb_id)).execute() + if not protocol: + if lbs: + return lbs + raise idlutils.RowNotFound(table='Load_Balancer', + col='name', match=lb_id) + # If there is only one LB without protocol defined, so + # it is 'clean' LB record without any listener. + if len(lbs) == 1 and not lbs[0].protocol: + return lbs[0] + # Search for other lbs. + for lb in lbs: + if lb.protocol[0].upper() == protocol.upper(): + return lb + raise idlutils.RowNotFound(table='Load_Balancer', + col='name', match=lb_id) + + def _get_or_create_ovn_lb(self, lb_id, protocol, admin_state_up): + """Find or create ovn lb with given protocol + + Find the loadbalancer configured with given protocol or + create required if not found + """ + # Make sure that its lowercase - OVN NBDB stores lowercases + # for this field. + protocol = protocol.lower() + ovn_lbs = self._find_ovn_lbs(lb_id) + lbs_with_required_protocol = [ + ovn_lb for ovn_lb in ovn_lbs + if protocol in ovn_lb.protocol] + lbs_with_no_protocol = [ovn_lb for ovn_lb in ovn_lbs + if not ovn_lb.protocol] + if lbs_with_required_protocol: + # We found existing LB with required + # protocol, just return it. + return lbs_with_required_protocol[0] + elif lbs_with_no_protocol: + ovn_lb = lbs_with_no_protocol[0] + # Set required protocol here. + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('protocol', protocol)).execute(check_error=True) + else: + # NOTE(mjozefcz): Looks like loadbalancer with given protocol + # doesn't exist. Try to add it with required protocol + # by copy the existing one data. + lb_info = { + 'id': lb_id, + 'protocol': protocol, + 'vip_address': ovn_lbs[0].external_ids.get( + ovn_const.LB_EXT_IDS_VIP_KEY), + 'vip_port_id': + ovn_lbs[0].external_ids.get( + ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY), + ovn_const.LB_EXT_IDS_LR_REF_KEY: + ovn_lbs[0].external_ids.get( + ovn_const.LB_EXT_IDS_LR_REF_KEY), + ovn_const.LB_EXT_IDS_LS_REFS_KEY: + ovn_lbs[0].external_ids.get( + ovn_const.LB_EXT_IDS_LS_REFS_KEY), + 'admin_state_up': admin_state_up} + # NOTE(mjozefcz): Handle vip_fip info if exists. + vip_fip = ovn_lbs[0].external_ids.get( + ovn_const.LB_EXT_IDS_VIP_FIP_KEY) + if vip_fip: + lb_info.update({ovn_const.LB_EXT_IDS_VIP_FIP_KEY: vip_fip}) + self.lb_create(lb_info, protocol=protocol) + # Looks like we've just added new LB + # or updated exising, empty one. + return self._find_ovn_lbs(lb_id, protocol=protocol) + + def _find_ovn_lb_with_pool_key(self, pool_key): + lbs = self.ovn_nbdb_api.db_list_rows( + 'Load_Balancer').execute(check_error=True) + for lb in lbs: + if pool_key in lb.external_ids: + return lb + + def _find_ovn_lb_by_pool_id(self, pool_id): + pool_key = self._get_pool_key(pool_id) + ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) + if not ovn_lb: + pool_key = self._get_pool_key(pool_id, is_enabled=False) + ovn_lb = self._find_ovn_lb_with_pool_key(pool_key) + return pool_key, ovn_lb + + def _execute_commands(self, commands): + with self.ovn_nbdb_api.transaction(check_error=True) as txn: + for command in commands: + txn.add(command) + + def _update_lb_to_ls_association(self, ovn_lb, network_id=None, + subnet_id=None, associate=True): + """Update LB association with Logical Switch + + This function deals with updating the References of Logical Switch + in LB and addition of LB to LS. + """ + commands = [] + if not network_id and not subnet_id: + return commands + + if network_id: + ls_name = utils.ovn_name(network_id) + else: + neutron_client = clients.get_neutron_client() + try: + subnet = neutron_client.show_subnet(subnet_id) + ls_name = utils.ovn_name(subnet['subnet']['network_id']) + except n_exc.NotFound: + LOG.warning('Subnet %s not found while trying to ' + 'fetch its data.', subnet_id) + ls_name = None + ovn_ls = None + + if ls_name: + try: + ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( + check_error=True) + except idlutils.RowNotFound: + LOG.warning("LogicalSwitch %s could not be found.", ls_name) + if associate: + LOG.warning('Cannot associate LB %(lb)s to ' + 'LS %(ls)s because LS row ' + 'not found in OVN NBDB. Exiting.', + {'ls': ls_name, 'lb': ovn_lb.name}) + return commands + ovn_ls = None + + ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) + if ls_refs: + try: + ls_refs = jsonutils.loads(ls_refs) + except ValueError: + ls_refs = {} + else: + ls_refs = {} + + if associate and ls_name: + if ls_name in ls_refs: + ref_ct = ls_refs[ls_name] + ls_refs[ls_name] = ref_ct + 1 + else: + ls_refs[ls_name] = 1 + if ovn_ls: + commands.append(self.ovn_nbdb_api.ls_lb_add( + ovn_ls.uuid, ovn_lb.uuid, may_exist=True)) + else: + if ls_name not in ls_refs: + # Nothing to be done. + return commands + + ref_ct = ls_refs[ls_name] + if ref_ct == 1: + del ls_refs[ls_name] + if ovn_ls: + commands.append(self.ovn_nbdb_api.ls_lb_del( + ovn_ls.uuid, ovn_lb.uuid, if_exists=True)) + else: + ls_refs[ls_name] = ref_ct - 1 + + ls_refs = {ovn_const.LB_EXT_IDS_LS_REFS_KEY: jsonutils.dumps(ls_refs)} + commands.append(self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', ls_refs))) + + return commands + + def _del_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_ref): + commands = [] + if lr_ref: + try: + lr_ref = [r for r in + [lr.strip() for lr in lr_ref.split(',')] + if r != ovn_lr.name] + except ValueError: + msg = ('The loadbalancer %(lb)s is not associated with ' + 'the router %(router)s' % + {'lb': ovn_lb.name, + 'router': ovn_lr.name}) + LOG.warning(msg) + if lr_ref: + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', + {ovn_const.LB_EXT_IDS_LR_REF_KEY: ','.join(lr_ref)}))) + else: + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (ovn_const.LB_EXT_IDS_LR_REF_KEY))) + commands.append( + self.ovn_nbdb_api.lr_lb_del(ovn_lr.uuid, ovn_lb.uuid, + if_exists=True)) + for net in self._find_ls_for_lr(ovn_lr): + commands.append(self.ovn_nbdb_api.ls_lb_del( + net, ovn_lb.uuid, if_exists=True)) + return commands + + def _add_lb_to_lr_association(self, ovn_lb, ovn_lr, lr_rf): + commands = [] + commands.append( + self.ovn_nbdb_api.lr_lb_add(ovn_lr.uuid, ovn_lb.uuid, + may_exist=True)) + for net in self._find_ls_for_lr(ovn_lr): + commands.append(self.ovn_nbdb_api.ls_lb_add( + net, ovn_lb.uuid, may_exist=True)) + + if ovn_lr.name not in str(lr_rf): + # Multiple routers in lr_rf are separated with ',' + if lr_rf: + lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: + "%s,%s" % (lr_rf, ovn_lr.name)} + else: + lr_rf = {ovn_const.LB_EXT_IDS_LR_REF_KEY: ovn_lr.name} + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', lr_rf))) + return commands + + def _update_lb_to_lr_association(self, ovn_lb, ovn_lr, delete=False): + lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) + if delete: + return self._del_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) + else: + return self._add_lb_to_lr_association(ovn_lb, ovn_lr, lr_ref) + + def _find_ls_for_lr(self, router): + neutron_client = clients.get_neutron_client() + ls = [] + for port in router.ports: + if port.gateway_chassis: + continue + sids = port.external_ids.get( + ovn_const.OVN_SUBNET_EXT_IDS_KEY, '').split(' ') + for sid in sids: + try: + subnet = neutron_client.show_subnet(sid) + ls.append(utils.ovn_name(subnet['subnet']['network_id'])) + except n_exc.NotFound: + LOG.exception('Subnet %s not found while trying to ' + 'fetch its data.', sid) + return ls + + def _find_lr_of_ls(self, ovn_ls): + lsp_router_port = None + for port in ovn_ls.ports or []: + if port.type == 'router': + lsp_router_port = port + break + else: + return + + lrp_name = lsp_router_port.options.get('router-port') + if not lrp_name: + return + + for lr in self.ovn_nbdb_api.tables['Logical_Router'].rows.values(): + for lrp in lr.ports: + if lrp.name == lrp_name: + return lr + # Handles networks with only gateway port in the router + if (utils.ovn_lrouter_port_name( + lr.external_ids.get(ovn_const.OVN_GW_PORT_EXT_ID_KEY)) == + lrp_name): + return lr + + def _get_listener_key(self, listener_id, is_enabled=True): + listener_key = ovn_const.LB_EXT_IDS_LISTENER_PREFIX + str(listener_id) + if not is_enabled: + listener_key += ':' + ovn_const.DISABLED_RESOURCE_SUFFIX + return listener_key + + def _get_pool_key(self, pool_id, is_enabled=True): + pool_key = ovn_const.LB_EXT_IDS_POOL_PREFIX + str(pool_id) + if not is_enabled: + pool_key += ':' + ovn_const.DISABLED_RESOURCE_SUFFIX + return pool_key + + def _extract_member_info(self, member): + mem_info = [] + if member: + for mem in member.split(','): + mem_ip_port = mem.split('_')[2] + mem_info.append(tuple(mem_ip_port.rsplit(':', 1))) + return mem_info + + def _get_member_key(self, member, old_convention=False): + member_info = '' + if isinstance(member, dict): + member_info = '%s%s_%s:%s' % ( + ovn_const.LB_EXT_IDS_MEMBER_PREFIX, + member[constants.ID], + member[constants.ADDRESS], + member[constants.PROTOCOL_PORT]) + if not old_convention and member.get(constants.SUBNET_ID): + member_info += "_" + member[constants.SUBNET_ID] + elif isinstance(member, o_datamodels.Member): + member_info = '%s%s_%s:%s' % ( + ovn_const.LB_EXT_IDS_MEMBER_PREFIX, + member.member_id, + member.address, + member.protocol_port) + if not old_convention and member.subnet_id: + member_info += "_" + member.subnet_id + return member_info + + def _make_listener_key_value(self, listener_port, pool_id): + return str(listener_port) + ':' + pool_id + + def _extract_listener_key_value(self, listener_value): + v = listener_value.split(':') + if len(v) == 2: + return (v[0], v[1]) + else: + return (None, None) + + def _is_listener_disabled(self, listener_key): + v = listener_key.split(':') + if len(v) == 2 and v[1] == ovn_const.DISABLED_RESOURCE_SUFFIX: + return True + + return False + + def _get_pool_listeners(self, ovn_lb, pool_key): + pool_listeners = [] + for k, v in ovn_lb.external_ids.items(): + if ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k: + continue + vip_port, p_key = self._extract_listener_key_value(v) + if pool_key == p_key: + pool_listeners.append( + k[len(ovn_const.LB_EXT_IDS_LISTENER_PREFIX):]) + return pool_listeners + + def _frame_vip_ips(self, lb_external_ids): + vip_ips = {} + # If load balancer is disabled, return + if lb_external_ids.get('enabled') == 'False': + return vip_ips + + lb_vip = lb_external_ids[ovn_const.LB_EXT_IDS_VIP_KEY] + vip_fip = lb_external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) + + for k, v in lb_external_ids.items(): + if (ovn_const.LB_EXT_IDS_LISTENER_PREFIX not in k or + self._is_listener_disabled(k)): + continue + + vip_port, pool_id = self._extract_listener_key_value(v) + if not vip_port or not pool_id: + continue + + if pool_id not in lb_external_ids or not lb_external_ids[pool_id]: + continue + + ips = [] + for member_ip, member_port in self._extract_member_info( + lb_external_ids[pool_id]): + if netaddr.IPNetwork(member_ip).version == 6: + ips.append('[%s]:%s' % (member_ip, member_port)) + else: + ips.append('%s:%s' % (member_ip, member_port)) + + if netaddr.IPNetwork(lb_vip).version == 6: + lb_vip = '[%s]' % lb_vip + vip_ips[lb_vip + ':' + vip_port] = ','.join(ips) + + if vip_fip: + if netaddr.IPNetwork(vip_fip).version == 6: + vip_fip = '[%s]' % vip_fip + vip_ips[vip_fip + ':' + vip_port] = ','.join(ips) + + return vip_ips + + def _refresh_lb_vips(self, ovn_lb_uuid, lb_external_ids): + vip_ips = self._frame_vip_ips(lb_external_ids) + return [self.ovn_nbdb_api.db_clear('Load_Balancer', ovn_lb_uuid, + 'vips'), + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb_uuid, + ('vips', vip_ips))] + + def _is_listener_in_lb(self, lb): + for key in list(lb.external_ids): + if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX): + return True + return False + + def check_lb_protocol(self, lb_id, listener_protocol): + ovn_lb = self._find_ovn_lbs(lb_id, protocol=listener_protocol) + if not ovn_lb: + return False + elif not self._is_listener_in_lb(ovn_lb): + return True + else: + return str(listener_protocol).lower() in ovn_lb.protocol + + def lb_create(self, loadbalancer, protocol=None): + port = None + neutron_client = clients.get_neutron_client() + if loadbalancer.get(constants.VIP_PORT_ID): + # In case we don't have vip_network_id + port = neutron_client.show_port( + loadbalancer[constants.VIP_PORT_ID])['port'] + elif (loadbalancer.get(constants.VIP_NETWORK_ID) and + loadbalancer.get(constants.VIP_ADDRESS)): + ports = neutron_client.list_ports( + network_id=loadbalancer[constants.VIP_NETWORK_ID]) + for p in ports['ports']: + for ip in p['fixed_ips']: + if ip['ip_address'] == loadbalancer[constants.VIP_ADDRESS]: + port = p + break + + # If protocol set make sure its lowercase + protocol = protocol.lower() if protocol else None + # In case port is not found for the vip_address we will see an + # exception when port['id'] is accessed. + external_ids = { + ovn_const.LB_EXT_IDS_VIP_KEY: loadbalancer[constants.VIP_ADDRESS], + ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: + loadbalancer.get(constants.VIP_PORT_ID) or port['id'], + 'enabled': str(loadbalancer[constants.ADMIN_STATE_UP])} + # In case vip_fip was passed - use it. + vip_fip = loadbalancer.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) + if vip_fip: + external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = vip_fip + # In case of lr_ref passed - use it. + lr_ref = loadbalancer.get(ovn_const.LB_EXT_IDS_LR_REF_KEY) + if lr_ref: + external_ids[ovn_const.LB_EXT_IDS_LR_REF_KEY] = lr_ref + + try: + self.ovn_nbdb_api.db_create( + 'Load_Balancer', name=loadbalancer[constants.ID], + protocol=protocol, + external_ids=external_ids).execute(check_error=True) + ovn_lb = self._find_ovn_lbs( + loadbalancer[constants.ID], + protocol=protocol) + ovn_lb = ovn_lb if protocol else ovn_lb[0] + commands = self._update_lb_to_ls_association( + ovn_lb, network_id=port['network_id'], + associate=True) + ls_name = utils.ovn_name(port['network_id']) + ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( + check_error=True) + ovn_lr = self._find_lr_of_ls(ovn_ls) + if ovn_lr: + commands.extend(self._update_lb_to_lr_association( + ovn_lb, ovn_lr)) + + # NOTE(mjozefcz): In case of LS references where passed - + # apply LS to the new LB. That could happend in case we + # need another loadbalancer for other L4 protocol. + ls_refs = loadbalancer.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY) + if ls_refs: + try: + ls_refs = jsonutils.loads(ls_refs) + except ValueError: + ls_refs = {} + for ls in ls_refs: + # Skip previously added LS because we don't want + # to duplicate. + if ls == ovn_ls.name: + continue + commands.extend(self._update_lb_to_ls_association( + ovn_lb, network_id=ls.replace('neutron-', ''), + associate=True)) + + self._execute_commands(commands) + operating_status = constants.ONLINE + # The issue is that since OVN doesnt support any HMs, + # we ideally should never put the status as 'ONLINE' + if not loadbalancer.get(constants.ADMIN_STATE_UP, True): + operating_status = constants.OFFLINE + status = { + constants.LOADBALANCERS: [ + {constants.ID: loadbalancer[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE, + constants.OPERATING_STATUS: operating_status}]} + # If the connection with the OVN NB db server is broken, then + # ovsdbapp will throw either TimeOutException or RunTimeError. + # May be we can catch these specific exceptions. + # It is important to report the status to octavia. We can report + # immediately or reschedule the lb_create request later. + # For now lets report immediately. + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "creation of loadbalancer") + # Any Exception set the status to ERROR + if isinstance(port, dict): + self.delete_vip_port(port.get('id')) + LOG.warning("Deleting the VIP port %s since LB went into " + "ERROR state", str(port.get('id'))) + status = { + constants.LOADBALANCERS: [ + {constants.ID: loadbalancer[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR, + constants.OPERATING_STATUS: constants.ERROR}]} + return status + + def lb_delete(self, loadbalancer): + port_id = None + lbalancer_status = { + constants.ID: loadbalancer[constants.ID], + constants.PROVISIONING_STATUS: constants.DELETED, + constants.OPERATING_STATUS: constants.OFFLINE} + status = { + constants.LOADBALANCERS: [lbalancer_status], + constants.LISTENERS: [], + constants.POOLS: [], + constants.MEMBERS: []} + + ovn_lbs = None + try: + ovn_lbs = self._find_ovn_lbs(loadbalancer[constants.ID]) + except idlutils.RowNotFound: + LOG.warning("Loadbalancer %s not found in OVN Northbound DB. " + "Setting the Loadbalancer status to DELETED " + "in Octavia", str(loadbalancer[constants.ID])) + return status + + try: + port_id = ovn_lbs[0].external_ids[ + ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY] + for ovn_lb in ovn_lbs: + status = self._lb_delete(loadbalancer, ovn_lb, status) + # Clear the status dict of any key having [] value + # Python 3.6 doesnt allow deleting an element in a + # dict while iterating over it. So first get a list of keys. + # https://cito.github.io/blog/never-iterate-a-changing-dict/ + status = {key: value for key, value in status.items() if value} + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of loadbalancer") + lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR + lbalancer_status[constants.OPERATING_STATUS] = constants.ERROR + # Delete VIP port from neutron. + self.delete_vip_port(port_id) + return status + + def _lb_delete(self, loadbalancer, ovn_lb, status): + commands = [] + if loadbalancer['cascade']: + # Delete all pools + for key, value in ovn_lb.external_ids.items(): + if key.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX): + pool_id = key.split('_')[1] + # Delete all members in the pool + if value and len(value.split(',')) > 0: + for mem_info in value.split(','): + status[constants.MEMBERS].append({ + constants.ID: mem_info.split('_')[1], + constants.PROVISIONING_STATUS: + constants.DELETED}) + status[constants.POOLS].append( + {constants.ID: pool_id, + constants.PROVISIONING_STATUS: constants.DELETED}) + + if key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX): + status[constants.LISTENERS].append({ + constants.ID: key.split('_')[1], + constants.PROVISIONING_STATUS: constants.DELETED, + constants.OPERATING_STATUS: constants.OFFLINE}) + ls_refs = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LS_REFS_KEY, {}) + if ls_refs: + try: + ls_refs = jsonutils.loads(ls_refs) + except ValueError: + ls_refs = {} + for ls_name in ls_refs.keys(): + try: + ovn_ls = self.ovn_nbdb_api.ls_get(ls_name).execute( + check_error=True) + commands.append( + self.ovn_nbdb_api.ls_lb_del(ovn_ls.uuid, ovn_lb.uuid)) + except idlutils.RowNotFound: + LOG.warning("LogicalSwitch %s could not be found. Cannot " + "delete Load Balancer from it", ls_name) + # Delete LB from all Networks the LB is indirectly associated + for ls in self._find_lb_in_table(ovn_lb, 'Logical_Switch'): + commands.append( + self.ovn_nbdb_api.ls_lb_del(ls.uuid, ovn_lb.uuid, + if_exists=True)) + lr_ref = ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_LR_REF_KEY, {}) + if lr_ref: + for lr in self.ovn_nbdb_api.tables[ + 'Logical_Router'].rows.values(): + if lr.name == lr_ref: + commands.append(self.ovn_nbdb_api.lr_lb_del( + lr.uuid, ovn_lb.uuid)) + break + # Delete LB from all Routers the LB is indirectly associated + for lr in self._find_lb_in_table(ovn_lb, 'Logical_Router'): + commands.append( + self.ovn_nbdb_api.lr_lb_del(lr.uuid, ovn_lb.uuid, + if_exists=True)) + commands.append(self.ovn_nbdb_api.lb_del(ovn_lb.uuid)) + self._execute_commands(commands) + return status + + def lb_failover(self, loadbalancer): + status = { + constants.LOADBALANCERS: [ + {constants.ID: loadbalancer[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + return status + + def lb_update(self, loadbalancer): + lb_status = {constants.ID: loadbalancer[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE} + status = {constants.LOADBALANCERS: [lb_status]} + if constants.ADMIN_STATE_UP not in loadbalancer: + return status + lb_enabled = loadbalancer[constants.ADMIN_STATE_UP] + + try: + ovn_lbs = self._find_ovn_lbs(loadbalancer[constants.ID]) + # It should be unique for all the LBS for all protocols, + # so we could just easly loop over all defined for given + # Octavia LB. + for ovn_lb in ovn_lbs: + if str(ovn_lb.external_ids['enabled']) != str(lb_enabled): + commands = [] + enable_info = {'enabled': str(lb_enabled)} + ovn_lb.external_ids['enabled'] = str(lb_enabled) + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', enable_info)) + ) + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, + ovn_lb.external_ids)) + self._execute_commands(commands) + if lb_enabled: + operating_status = constants.ONLINE + else: + operating_status = constants.OFFLINE + lb_status[constants.OPERATING_STATUS] = operating_status + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "update of loadbalancer") + lb_status[constants.PROVISIONING_STATUS] = constants.ERROR + lb_status[constants.OPERATING_STATUS] = constants.ERROR + return status + + def listener_create(self, listener): + ovn_lb = self._get_or_create_ovn_lb( + listener[constants.LOADBALANCER_ID], + listener[constants.PROTOCOL], + listener[constants.ADMIN_STATE_UP]) + + external_ids = copy.deepcopy(ovn_lb.external_ids) + listener_key = self._get_listener_key( + listener[constants.ID], + is_enabled=listener[constants.ADMIN_STATE_UP]) + + if listener.get(constants.DEFAULT_POOL_ID): + pool_key = self._get_pool_key(listener[constants.DEFAULT_POOL_ID]) + else: + pool_key = '' + external_ids[listener_key] = self._make_listener_key_value( + listener[constants.PROTOCOL_PORT], pool_key) + + listener_info = {listener_key: external_ids[listener_key]} + try: + commands = [] + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', listener_info))) + if not self._is_listener_in_lb(ovn_lb): + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('protocol', + str(listener[constants.PROTOCOL]).lower()))) + commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + self._execute_commands(commands) + + operating_status = constants.ONLINE + if not listener.get(constants.ADMIN_STATE_UP, True): + operating_status = constants.OFFLINE + status = { + constants.LISTENERS: [ + {constants.ID: listener[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE, + constants.OPERATING_STATUS: operating_status}], + constants.LOADBALANCERS: [ + {constants.ID: listener[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "creation of listener") + status = { + constants.LISTENERS: [ + {constants.ID: listener[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR, + constants.OPERATING_STATUS: constants.ERROR}], + constants.LOADBALANCERS: [ + {constants.ID: listener[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + return status + + def listener_delete(self, listener): + status = { + constants.LISTENERS: [ + {constants.ID: listener[constants.ID], + constants.PROVISIONING_STATUS: constants.DELETED, + constants.OPERATING_STATUS: constants.OFFLINE}], + constants.LOADBALANCERS: [ + {constants.ID: listener[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + try: + ovn_lb = self._find_ovn_lbs( + listener[constants.LOADBALANCER_ID], + protocol=listener[constants.PROTOCOL]) + except idlutils.RowNotFound: + # Listener already deleted. + return status + + external_ids = copy.deepcopy(ovn_lb.external_ids) + listener_key = self._get_listener_key(listener[constants.ID]) + if listener_key in external_ids: + try: + commands = [] + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (listener_key))) + # Drop current listener from LB. + del external_ids[listener_key] + + # Set LB protocol to undefined only if there are no more + # listeners and pools defined in the LB. + cmds, lb_to_delete = self._clean_lb_if_empty( + ovn_lb, listener[constants.LOADBALANCER_ID], external_ids) + commands.extend(cmds) + # Do not refresh vips if OVN LB for given protocol + # has pending delete operation. + if not lb_to_delete: + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + self._execute_commands(commands) + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of listener") + status = { + constants.LISTENERS: [ + {constants.ID: listener[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR, + constants.OPERATING_STATUS: constants.ERROR}], + constants.LOADBALANCERS: [ + {constants.ID: listener[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + return status + + def listener_update(self, listener): + # NOTE(mjozefcz): Based on + # https://docs.openstack.org/api-ref/load-balancer/v2/?expanded=update-a-listener-detail + # there is no possibility to update listener protocol or port. + listener_status = {constants.ID: listener[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE} + lbalancer_status = { + constants.ID: listener[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE} + pool_status = [] + status = { + constants.LISTENERS: [listener_status], + constants.LOADBALANCERS: [lbalancer_status], + constants.POOLS: pool_status} + + try: + ovn_lb = self._find_ovn_lbs( + listener[constants.LOADBALANCER_ID], + protocol=listener[constants.PROTOCOL]) + except idlutils.RowNotFound: + LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener") + # LB row not found during update of a listener. That is a problem. + listener_status[constants.PROVISIONING_STATUS] = constants.ERROR + lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR + return status + + l_key_when_enabled = self._get_listener_key(listener[constants.ID]) + l_key_when_disabled = self._get_listener_key( + listener[constants.ID], is_enabled=False) + + external_ids = copy.deepcopy(ovn_lb.external_ids) + if constants.ADMIN_STATE_UP not in listener and ( + constants.DEFAULT_POOL_ID not in listener): + return status + + l_key_to_add = {} + if l_key_when_enabled in external_ids: + present_l_key = l_key_when_enabled + elif l_key_when_disabled in external_ids: + present_l_key = l_key_when_disabled + else: + # Something is terribly wrong. This cannot happen. + return status + + try: + commands = [] + new_l_key = None + l_key_to_remove = None + if constants.ADMIN_STATE_UP in listener: + if listener[constants.ADMIN_STATE_UP]: + # We need to enable the listener + new_l_key = l_key_when_enabled + listener_status[constants.OPERATING_STATUS] = ( + constants.ONLINE) + else: + # We need to disable the listener + new_l_key = l_key_when_disabled + listener_status[constants.OPERATING_STATUS] = ( + constants.OFFLINE) + + if present_l_key != new_l_key: + external_ids[new_l_key] = external_ids[present_l_key] + l_key_to_add[new_l_key] = external_ids[present_l_key] + del external_ids[present_l_key] + l_key_to_remove = present_l_key + + if l_key_to_remove: + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (l_key_to_remove))) + else: + new_l_key = present_l_key + + if constants.DEFAULT_POOL_ID in listener: + pool_key = self._get_pool_key( + listener[constants.DEFAULT_POOL_ID]) + l_key_value = self._make_listener_key_value( + listener[constants.PROTOCOL_PORT], pool_key) + l_key_to_add[new_l_key] = l_key_value + external_ids[new_l_key] = l_key_value + pool_status.append( + {constants.ID: listener[constants.DEFAULT_POOL_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}) + + if l_key_to_add: + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', l_key_to_add))) + + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + self._execute_commands(commands) + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "update of listener") + status = { + constants.LISTENERS: [ + {constants.ID: listener[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR}], + constants.LOADBALANCERS: [ + {constants.ID: listener[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + return status + + def pool_create(self, pool): + ovn_lb = self._get_or_create_ovn_lb( + pool[constants.LOADBALANCER_ID], + pool[constants.PROTOCOL], + pool[constants.ADMIN_STATE_UP]) + external_ids = copy.deepcopy(ovn_lb.external_ids) + pool_key = self._get_pool_key( + pool[constants.ID], is_enabled=pool[constants.ADMIN_STATE_UP]) + external_ids[pool_key] = '' + if pool[constants.LISTENER_ID]: + listener_key = self._get_listener_key(pool[constants.LISTENER_ID]) + if listener_key in ovn_lb.external_ids: + external_ids[listener_key] = str( + external_ids[listener_key]) + str(pool_key) + try: + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', external_ids)).execute(check_error=True) + # Pool status will be set to Online after a member is added to it. + operating_status = constants.OFFLINE + + status = { + constants.POOLS: [ + {constants.ID: pool[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE, + constants.OPERATING_STATUS: operating_status}], + constants.LOADBALANCERS: [ + {constants.ID: pool[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + if pool[constants.LISTENER_ID]: + listener_status = [ + {constants.ID: pool[constants.LISTENER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}] + status[constants.LISTENERS] = listener_status + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "creation of pool") + status = { + constants.POOLS: [ + {constants.ID: pool[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR}], + constants.LOADBALANCERS: [ + {constants.ID: pool[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + if pool[constants.LISTENER_ID]: + listener_status = [ + {constants.ID: pool[constants.LISTENER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}] + status[constants.LISTENERS] = listener_status + + return status + + def pool_delete(self, pool): + status = { + constants.POOLS: [ + {constants.ID: pool[constants.ID], + constants.PROVISIONING_STATUS: constants.DELETED}], + constants.LOADBALANCERS: [ + {constants.ID: pool[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + try: + ovn_lb = self._find_ovn_lbs( + pool[constants.LOADBALANCER_ID], + pool[constants.PROTOCOL]) + except idlutils.RowNotFound: + # LB row not found that means pool is deleted. + return status + + pool_key = self._get_pool_key(pool[constants.ID]) + commands = [] + external_ids = copy.deepcopy(ovn_lb.external_ids) + try: + if pool_key in ovn_lb.external_ids: + commands.append( + self.ovn_nbdb_api.db_remove('Load_Balancer', ovn_lb.uuid, + 'external_ids', (pool_key))) + del external_ids[pool_key] + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + # Remove Pool from Listener if it is associated + listener_id = None + for key, value in ovn_lb.external_ids.items(): + if (key.startswith(ovn_const.LB_EXT_IDS_LISTENER_PREFIX) and + pool_key in value): + external_ids[key] = value.split(':')[0] + ':' + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', external_ids))) + listener_id = key.split('_')[1] + + pool_key_when_disabled = self._get_pool_key(pool[constants.ID], + is_enabled=False) + if pool_key_when_disabled in ovn_lb.external_ids: + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, + 'external_ids', (pool_key_when_disabled))) + + commands.extend( + self._clean_lb_if_empty( + ovn_lb, pool[constants.LOADBALANCER_ID], external_ids)[0]) + self._execute_commands(commands) + + if listener_id: + status[constants.LISTENERS] = [ + {constants.ID: listener_id, + constants.PROVISIONING_STATUS: constants.ACTIVE}] + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of pool") + status = { + constants.POOLS: [ + {constants.ID: pool[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR}], + constants.LOADBALANCERS: [ + {constants.ID: pool[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + + return status + + def pool_update(self, pool): + pool_status = {constants.ID: pool[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE} + lbalancer_status = {constants.ID: pool[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE} + status = { + constants.POOLS: [pool_status], + constants.LOADBALANCERS: [lbalancer_status]} + if constants.ADMIN_STATE_UP not in pool: + return status + try: + ovn_lb = self._find_ovn_lbs( + pool[constants.LOADBALANCER_ID], + protocol=pool[constants.PROTOCOL]) + except idlutils.RowNotFound: + LOG.exception(ovn_const.EXCEPTION_MSG, "update of pool") + # LB row not found during update of a listener. That is a problem. + pool_status[constants.PROVISIONING_STATUS] = constants.ERROR + lbalancer_status[constants.PROVISIONING_STATUS] = constants.ERROR + return status + + pool_key = self._get_pool_key(pool[constants.ID]) + p_key_when_disabled = self._get_pool_key(pool[constants.ID], + is_enabled=False) + external_ids = copy.deepcopy(ovn_lb.external_ids) + p_key_to_remove = None + p_key_to_add = {} + + try: + if pool[constants.ADMIN_STATE_UP]: + if p_key_when_disabled in external_ids: + p_key_to_add[pool_key] = external_ids[p_key_when_disabled] + external_ids[pool_key] = external_ids[p_key_when_disabled] + del external_ids[p_key_when_disabled] + p_key_to_remove = p_key_when_disabled + else: + if pool_key in external_ids: + p_key_to_add[p_key_when_disabled] = external_ids[pool_key] + external_ids[p_key_when_disabled] = external_ids[pool_key] + del external_ids[pool_key] + p_key_to_remove = pool_key + + if p_key_to_remove: + commands = [] + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (p_key_to_remove))) + + commands.append( + self.ovn_nbdb_api.db_set( + 'Load_Balancer', ovn_lb.uuid, + ('external_ids', p_key_to_add))) + + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + self._execute_commands(commands) + if pool[constants.ADMIN_STATE_UP]: + operating_status = constants.ONLINE + else: + operating_status = constants.OFFLINE + pool_status[constants.OPERATING_STATUS] = operating_status + + pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) + listener_status = [] + for listener in pool_listeners: + listener_status.append( + {constants.ID: listener, + constants.PROVISIONING_STATUS: constants.ACTIVE}) + status[constants.LISTENERS] = listener_status + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "update of pool") + status = { + constants.POOLS: [ + {constants.ID: pool[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR}], + constants.LOADBALANCERS: [ + {constants.ID: pool[constants.LOADBALANCER_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + + return status + + def _add_member(self, member, ovn_lb, pool_key): + external_ids = copy.deepcopy(ovn_lb.external_ids) + existing_members = external_ids[pool_key] + if existing_members: + existing_members = existing_members.split(",") + member_info = self._get_member_key(member) + # TODO(mjozefcz): Remove this workaround in W release. + member_info_old = self._get_member_key(member, old_convention=True) + member_found = [x for x in existing_members + if re.match(member_info_old, x)] + if member_found: + # Member already present + return + if existing_members: + existing_members.append(member_info) + pool_data = {pool_key: ",".join(existing_members)} + else: + pool_data = {pool_key: member_info} + + commands = [] + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', pool_data))) + + external_ids[pool_key] = pool_data[pool_key] + commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + commands.extend( + self._update_lb_to_ls_association( + ovn_lb, subnet_id=member[constants.SUBNET_ID], associate=True)) + self._execute_commands(commands) + + def member_create(self, member): + try: + pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( + member[constants.POOL_ID]) + self._add_member(member, ovn_lb, pool_key) + pool = {constants.ID: member[constants.POOL_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE, + constants.OPERATING_STATUS: constants.ONLINE} + status = { + constants.POOLS: [pool], + constants.MEMBERS: [ + {constants.ID: member[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}], + constants.LOADBALANCERS: [ + {constants.ID: ovn_lb.name, + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) + listener_status = [] + for listener in pool_listeners: + listener_status.append( + {constants.ID: listener, + constants.PROVISIONING_STATUS: constants.ACTIVE}) + status[constants.LISTENERS] = listener_status + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "creation of member") + status = { + constants.POOLS: [ + {constants.ID: member[constants.POOL_ID], + constants.PROVISIONING_STATUS: constants.ERROR}], + constants.MEMBERS: [ + {constants.ID: member[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}], + constants.LOADBALANCERS: [ + {constants.ID: ovn_lb.name, + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + + return status + + def _remove_member(self, member, ovn_lb, pool_key): + external_ids = copy.deepcopy(ovn_lb.external_ids) + existing_members = external_ids[pool_key].split(",") + # TODO(mjozefcz): Delete this workaround in W release. + # To support backward compatibility member + # could be defined as `member`_`id`_`ip`:`port`_`subnet_id` + # or defined as `member`_`id`_`ip`:`port + member_info_old = self._get_member_key(member, old_convention=True) + + member_found = [x for x in existing_members + if re.match(member_info_old, x)] + if member_found: + commands = [] + existing_members.remove(member_found[0]) + + if not existing_members: + pool_status = constants.OFFLINE + else: + pool_status = constants.ONLINE + pool_data = {pool_key: ",".join(existing_members)} + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', pool_data))) + external_ids[pool_key] = ",".join(existing_members) + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + commands.extend( + self._update_lb_to_ls_association( + ovn_lb, subnet_id=member.get(constants.SUBNET_ID), + associate=False)) + self._execute_commands(commands) + return pool_status + else: + msg = "Member %s not found in the pool" % member[constants.ID] + raise driver_exceptions.DriverError( + user_fault_string=msg, + operator_fault_string=msg) + + def member_delete(self, member): + try: + pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( + member[constants.POOL_ID]) + pool_status = self._remove_member(member, ovn_lb, pool_key) + pool = {constants.ID: member[constants.POOL_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE, + constants.OPERATING_STATUS: pool_status} + status = { + constants.POOLS: [pool], + constants.MEMBERS: [ + {constants.ID: member[constants.ID], + constants.PROVISIONING_STATUS: constants.DELETED}], + constants.LOADBALANCERS: [ + {constants.ID: ovn_lb.name, + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) + listener_status = [] + for listener in pool_listeners: + listener_status.append( + {constants.ID: listener, + constants.PROVISIONING_STATUS: constants.ACTIVE}) + status[constants.LISTENERS] = listener_status + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "deletion of member") + status = { + constants.POOLS: [ + {constants.ID: member[constants.POOL_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}], + constants.MEMBERS: [ + {constants.ID: member[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR}], + constants.LOADBALANCERS: [ + {constants.ID: ovn_lb.name, + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + + return status + + def _update_member(self, member, ovn_lb, pool_key): + commands = [] + external_ids = copy.deepcopy(ovn_lb.external_ids) + existing_members = external_ids[pool_key].split(",") + member_info = self._get_member_key(member) + for mem in existing_members: + if (member_info.split('_')[1] == mem.split('_')[1] and + mem != member_info): + existing_members.remove(mem) + existing_members.append(member_info) + pool_data = {pool_key: ",".join(existing_members)} + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', pool_data))) + external_ids[pool_key] = ",".join(existing_members) + commands.extend( + self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + self._execute_commands(commands) + + def member_update(self, member): + try: + pool_key, ovn_lb = self._find_ovn_lb_by_pool_id( + member[constants.POOL_ID]) + member_status = {constants.ID: member[constants.ID], + constants.PROVISIONING_STATUS: constants.ACTIVE} + status = { + constants.POOLS: [ + {constants.ID: member[constants.POOL_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}], + constants.MEMBERS: [member_status], + constants.LOADBALANCERS: [ + {constants.ID: ovn_lb.name, + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + self._update_member(member, ovn_lb, pool_key) + if constants.ADMIN_STATE_UP in member: + if member[constants.ADMIN_STATE_UP]: + member_status[constants.OPERATING_STATUS] = ( + constants.ONLINE) + else: + member_status[constants.OPERATING_STATUS] = ( + constants.OFFLINE) + + pool_listeners = self._get_pool_listeners(ovn_lb, pool_key) + listener_status = [] + for listener in pool_listeners: + listener_status.append( + {constants.ID: listener, + constants.PROVISIONING_STATUS: constants.ACTIVE}) + status[constants.LISTENERS] = listener_status + except Exception: + LOG.exception(ovn_const.EXCEPTION_MSG, "update of member") + status = { + constants.POOLS: [ + {constants.ID: member[constants.POOL_ID], + constants.PROVISIONING_STATUS: constants.ACTIVE}], + constants.MEMBERS: [ + {constants.ID: member[constants.ID], + constants.PROVISIONING_STATUS: constants.ERROR}], + constants.LOADBALANCERS: [ + {constants.ID: ovn_lb.name, + constants.PROVISIONING_STATUS: constants.ACTIVE}]} + return status + + def _get_existing_pool_members(self, pool_id): + pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(pool_id) + if not ovn_lb: + msg = _("Loadbalancer with pool %s does not exist") % pool_key + raise driver_exceptions.DriverError(msg) + external_ids = dict(ovn_lb.external_ids) + return external_ids[pool_key] + + def get_pool_member_id(self, pool_id, mem_addr_port=None): + '''Gets Member information + + :param pool_id: ID of the Pool whose member information is reqd. + :param mem_addr_port: Combination of Member Address+Port. Default=None + :returns: UUID -- ID of the Member if member exists in pool. + :returns: None -- if no member exists in the pool + :raises: Exception if Loadbalancer is not found for a Pool ID + ''' + existing_members = self._get_existing_pool_members(pool_id) + # Members are saved in OVN in the form of + # member1_UUID_IP:Port, member2_UUID_IP:Port + # Match the IP:Port for all members with the mem_addr_port + # information and return the UUID. + for meminf in existing_members.split(','): + if mem_addr_port == meminf.split('_')[2]: + return meminf.split('_')[1] + + def create_vip_port(self, project_id, lb_id, vip_d): + port = {'port': {'name': ovn_const.LB_VIP_PORT_PREFIX + str(lb_id), + 'network_id': vip_d[constants.VIP_NETWORK_ID], + 'fixed_ips': [{'subnet_id': vip_d['vip_subnet_id']}], + 'admin_state_up': True, + 'project_id': project_id}} + try: + port['port']['fixed_ips'][0]['ip_address'] = ( + vip_d[constants.VIP_ADDRESS]) + except KeyError: + pass + neutron_client = clients.get_neutron_client() + try: + return neutron_client.create_port(port) + except n_exc.IpAddressAlreadyAllocatedClient: + # Sometimes the VIP is already created (race-conditions) + # Lets get the it from Neutron API. + ports = neutron_client.list_ports( + network_id=vip_d[constants.VIP_NETWORK_ID], + name='%s%s' % (ovn_const.LB_VIP_PORT_PREFIX, lb_id)) + if not ports['ports']: + LOG.error('Cannot create/get LoadBalancer VIP port with ' + 'fixed IP: %s', vip_d[constants.VIP_ADDRESS]) + status = { + constants.LOADBALANCERS: [{ + constants.ID: lb_id, + constants.PROVISIONING_STATUS: constants.ERROR, + constants.OPERATING_STATUS: constants.ERROR}]} + self._update_status_to_octavia(status) + return + # there should only be one port returned + port = ports['ports'][0] + LOG.debug('VIP Port already exists, uuid: %s', port['id']) + return {'port': port} + + def delete_vip_port(self, port_id): + neutron_client = clients.get_neutron_client() + try: + neutron_client.delete_port(port_id) + except n_exc.PortNotFoundClient: + LOG.warning("Port %s could not be found. Please " + "check Neutron logs. Perhaps port " + "was already deleted.", port_id) + + def handle_vip_fip(self, fip_info): + ovn_lb = fip_info['ovn_lb'] + external_ids = copy.deepcopy(ovn_lb.external_ids) + commands = [] + + if fip_info['action'] == ovn_const.REQ_INFO_ACTION_ASSOCIATE: + external_ids[ovn_const.LB_EXT_IDS_VIP_FIP_KEY] = ( + fip_info['vip_fip']) + vip_fip_info = { + ovn_const.LB_EXT_IDS_VIP_FIP_KEY: fip_info['vip_fip']} + commands.append( + self.ovn_nbdb_api.db_set('Load_Balancer', ovn_lb.uuid, + ('external_ids', vip_fip_info))) + else: + external_ids.pop(ovn_const.LB_EXT_IDS_VIP_FIP_KEY) + commands.append( + self.ovn_nbdb_api.db_remove( + 'Load_Balancer', ovn_lb.uuid, 'external_ids', + (ovn_const.LB_EXT_IDS_VIP_FIP_KEY))) + + commands.extend(self._refresh_lb_vips(ovn_lb.uuid, external_ids)) + self._execute_commands(commands) + + def handle_member_dvr(self, info): + pool_key, ovn_lb = self._find_ovn_lb_by_pool_id(info['pool_id']) + if not ovn_lb.external_ids.get(ovn_const.LB_EXT_IDS_VIP_FIP_KEY): + LOG.debug("LB %(lb)s has no FIP on VIP configured. " + "There is no need to centralize member %(member)s " + "traffic.", + {'lb': ovn_lb.uuid, 'member': info['id']}) + return + + # Find out if member has FIP assigned. + neutron_client = clients.get_neutron_client() + try: + subnet = neutron_client.show_subnet(info['subnet_id']) + ls_name = utils.ovn_name(subnet['subnet']['network_id']) + except n_exc.NotFound: + LOG.exception('Subnet %s not found while trying to ' + 'fetch its data.', info['subnet_id']) + return + + try: + ls = self.ovn_nbdb_api.lookup('Logical_Switch', ls_name) + except idlutils.RowNotFound: + LOG.warning("Logical Switch %s not found. " + "Cannot verify member FIP configuration.", + ls_name) + return + + fip = None + f = utils.remove_macs_from_lsp_addresses + for port in ls.ports: + if info['address'] in f(port.addresses): + # We found particular port + fip = self.ovn_nbdb_api.db_find_rows( + 'NAT', ('external_ids', '=', { + ovn_const.OVN_FIP_PORT_EXT_ID_KEY: port.name}) + ).execute(check_error=True) + fip = fip[0] if fip else fip + break + + if not fip: + LOG.debug('Member %s has no FIP assigned. ' + 'There is no need to modify its NAT.', + info['id']) + return + + if info['action'] == ovn_const.REQ_INFO_MEMBER_ADDED: + LOG.info('Member %(member)s is added to Load Balancer %(lb)s ' + 'and both have FIP assigned. Member FIP %(fip)s ' + 'needs to be centralized in those conditions. ' + 'Deleting external_mac/logical_port from it.', + {'member': info['id'], + 'lb': ovn_lb.uuid, + 'fip': fip.external_ip}) + self.ovn_nbdb_api.db_clear( + 'NAT', fip.uuid, 'external_mac').execute(check_error=True) + self.ovn_nbdb_api.db_clear( + 'NAT', fip.uuid, 'logical_port').execute(check_error=True) + else: + LOG.info('Member %(member)s is deleted from Load Balancer ' + '%(lb)s and both have FIP assigned. Member FIP %(fip)s ' + 'can be decentralized now if environment has DVR ' + 'enabled. Updating FIP object for recomputation.', + {'member': info['id'], + 'lb': ovn_lb.uuid, + 'fip': fip.external_ip}) + # NOTE(mjozefcz): We don't know if this env is DVR or not. + # We should call neutron API to do 'empty' update of the FIP. + # It will bump revision number and do recomputation of the FIP. + try: + fip_info = neutron_client.show_floatingip( + fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY]) + empty_update = { + "floatingip": { + 'description': fip_info['floatingip']['description']}} + neutron_client.update_floatingip( + fip.external_ids[ovn_const.OVN_FIP_EXT_ID_KEY], + empty_update) + except n_exc.NotFound: + LOG.warning('Members %(member)s FIP %(fip)s not found in ' + 'Neutron. Cannot update it.', + {'member': info['id'], + 'fip': fip.external_ip}) diff --git a/ovn_octavia_provider/tests/functional/test_driver.py b/ovn_octavia_provider/tests/functional/test_driver.py index 3cc9613f..449a1ebd 100644 --- a/ovn_octavia_provider/tests/functional/test_driver.py +++ b/ovn_octavia_provider/tests/functional/test_driver.py @@ -35,6 +35,7 @@ from ovn_octavia_provider import agent as ovn_agent from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import driver as ovn_driver +from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.ovsdb import impl_idl_ovn LR_REF_KEY_HEADER = 'neutron-' @@ -1179,7 +1180,7 @@ class TestOvnOctaviaProviderAgent(TestOvnOctaviaBase): # with IDL running, but to make it easier for now # we can initialize this IDL here instead spawning # another process. - da_helper = ovn_driver.OvnProviderHelper() + da_helper = ovn_helper.OvnProviderHelper() events = [ovn_driver.LogicalRouterPortEvent(da_helper), ovn_driver.LogicalSwitchPortUpdateEvent(da_helper)] ovn_nb_idl_for_events = impl_idl_ovn.OvnNbIdlForLb( diff --git a/ovn_octavia_provider/tests/unit/test_driver.py b/ovn_octavia_provider/tests/unit/test_driver.py index 18e9dccd..0ae54bbc 100644 --- a/ovn_octavia_provider/tests/unit/test_driver.py +++ b/ovn_octavia_provider/tests/unit/test_driver.py @@ -27,6 +27,7 @@ from ovn_octavia_provider import agent as ovn_agent from ovn_octavia_provider.common import clients from ovn_octavia_provider.common import constants as ovn_const from ovn_octavia_provider import driver as ovn_driver +from ovn_octavia_provider import helper as ovn_helper from ovn_octavia_provider.tests.unit import fakes @@ -68,7 +69,7 @@ class TestOvnProviderDriver(TestOvnOctaviaBase): def setUp(self): super(TestOvnProviderDriver, self).setUp() self.driver = ovn_driver.OvnProviderDriver() - add_req_thread = mock.patch.object(ovn_driver.OvnProviderHelper, + add_req_thread = mock.patch.object(ovn_helper.OvnProviderHelper, 'add_request') self.member_line = ( 'member_%s_%s:%s_%s' % @@ -207,11 +208,11 @@ class TestOvnProviderDriver(TestOvnOctaviaBase): vip_address=self.vip_address, vip_network_id=self.vip_network_id) mock.patch.object( - ovn_driver.OvnProviderHelper, '_find_ovn_lbs', + ovn_helper.OvnProviderHelper, '_find_ovn_lbs', side_effect=lambda x, protocol=None: self.ovn_lb if protocol else [self.ovn_lb]).start() self.mock_find_lb_pool_key = mock.patch.object( - ovn_driver.OvnProviderHelper, + ovn_helper.OvnProviderHelper, '_find_ovn_lb_with_pool_key', return_value=self.ovn_lb).start() @@ -612,7 +613,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): def setUp(self): super(TestOvnProviderHelper, self).setUp() - self.helper = ovn_driver.OvnProviderHelper() + self.helper = ovn_helper.OvnProviderHelper() mock.patch.object(self.helper, '_update_status_to_octavia').start() self.listener = {'id': self.listener_id, 'loadbalancer_id': self.loadbalancer_id, @@ -642,7 +643,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): 'admin_state_up': True} self.ovn_nbdb_api = mock.patch.object(self.helper, 'ovn_nbdb_api') self.ovn_nbdb_api.start() - add_req_thread = mock.patch.object(ovn_driver.OvnProviderHelper, + add_req_thread = mock.patch.object(ovn_helper.OvnProviderHelper, 'add_request') self.mock_add_request = add_req_thread.start() self.ovn_lb = mock.MagicMock() @@ -669,7 +670,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): return_value=self.ovn_lb).start() self.mock_find_ovn_lbs = mock.patch.object( - ovn_driver.OvnProviderHelper, '_find_ovn_lbs', + ovn_helper.OvnProviderHelper, '_find_ovn_lbs', side_effect=lambda x, protocol=None: self.ovn_lb if protocol else [self.ovn_lb]) self.mock_find_ovn_lbs.start() @@ -810,7 +811,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): protocol='TCP', admin_state_up='True') - @mock.patch.object(ovn_driver.OvnProviderHelper, 'lb_create') + @mock.patch.object(ovn_helper.OvnProviderHelper, 'lb_create') def test__get_or_create_ovn_lb_required_proto_not_found(self, lbc): udp_lb = copy.copy(self.ovn_lb) udp_lb.protocol = ['udp'] @@ -929,7 +930,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): mock.call(self.ovn_lb, associate=True, network_id='foo')]) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') - @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_create_exception(self, del_port, net_cli): self.helper._find_ovn_lbs.side_effect = [RuntimeError] net_cli.return_value.list_ports.return_value = self.ports @@ -941,7 +942,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): del_port.assert_called_once_with(self.ports.get('ports')[0]['id']) @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') - @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_delete(self, del_port, net_cli): net_cli.return_value.delete_port.return_value = None status = self.helper.lb_delete(self.ovn_lb) @@ -953,7 +954,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.ovn_lb.uuid) del_port.assert_called_once_with('foo_port') - @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_delete_row_not_found(self, del_port): self.helper._find_ovn_lbs.side_effect = [idlutils.RowNotFound] status = self.helper.lb_delete(self.lb) @@ -964,7 +965,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.helper.ovn_nbdb_api.lb_del.assert_not_called() del_port.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_delete_exception(self, del_port): self.helper.ovn_nbdb_api.lb_del.side_effect = [RuntimeError] status = self.helper.lb_delete(self.lb) @@ -977,7 +978,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): del_port.assert_called_once_with('foo_port') @mock.patch('ovn_octavia_provider.common.clients.get_neutron_client') - @mock.patch.object(ovn_driver.OvnProviderHelper, 'delete_vip_port') + @mock.patch.object(ovn_helper.OvnProviderHelper, 'delete_vip_port') def test_lb_delete_port_not_found(self, del_port, net_cli): net_cli.return_value.delete_port.side_effect = ( [n_exc.PortNotFoundClient]) @@ -1044,7 +1045,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.assertEqual(status['loadbalancers'][0]['provisioning_status'], constants.ACTIVE) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_disabled(self, refresh_vips): self.lb['admin_state_up'] = False status = self.helper.lb_update(self.lb) @@ -1058,7 +1059,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'False'})) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_enabled(self, refresh_vips): # Change the mock, its enabled by default. self.ovn_lb.external_ids.update({'enabled': False}) @@ -1074,7 +1075,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): 'Load_Balancer', self.ovn_lb.uuid, ('external_ids', {'enabled': 'True'})) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_lb_update_enabled_multiple_protocols(self, refresh_vips): self.mock_find_ovn_lbs.stop() self.ovn_lb.external_ids.update({'enabled': 'False'}) @@ -1116,7 +1117,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): constants.ACTIVE) self.helper._find_ovn_lbs.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_create_disabled(self, refresh_vips): self.ovn_lb.external_ids.pop('listener_%s' % self.listener_id) status = self.helper.listener_create(self.listener) @@ -1143,7 +1144,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.assertEqual(status['listeners'][0]['operating_status'], constants.OFFLINE) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_create_enabled(self, refresh_vips): self.listener['admin_state_up'] = True status = self.helper.listener_create(self.listener) @@ -1221,7 +1222,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): constants.ERROR) self.helper.ovn_nbdb_api.db_set.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_exception(self, refresh_vips): refresh_vips.side_effect = [RuntimeError] status = self.helper.listener_update(self.listener) @@ -1230,7 +1231,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.assertEqual(status['listeners'][0]['provisioning_status'], constants.ERROR) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_listener_enabled(self, refresh_vips): self.listener['admin_state_up'] = True # Update the listener port. @@ -1256,7 +1257,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_listener_disabled(self, refresh_vips): self.listener['admin_state_up'] = False status = self.helper.listener_update(self.listener) @@ -1278,7 +1279,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_no_admin_state_up(self, refresh_vips): self.listener.pop('admin_state_up') status = self.helper.listener_update(self.listener) @@ -1292,7 +1293,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_update_no_admin_state_up_or_default_pool_id( self, refresh_vips): self.listener.pop('admin_state_up') @@ -1336,7 +1337,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.assertEqual(status['listeners'][0]['operating_status'], constants.ERROR) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_refresh_lb_vips') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_refresh_lb_vips') def test_listener_delete_external_id(self, refresh_vips): status = self.helper.listener_delete(self.listener) self.assertEqual(status['loadbalancers'][0]['provisioning_status'], @@ -1352,7 +1353,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): refresh_vips.assert_called_once_with( self.ovn_lb.uuid, self.ovn_lb.external_ids) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_not_empty(self, lb_empty): lb_empty.return_value = False self.helper.listener_delete(self.listener) @@ -1361,7 +1362,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): 'external_ids', 'listener_%s' % self.listener_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_empty_octavia_lb_empty(self, lb_empty): """That test situation when the OVN and Octavia LBs are empty. @@ -1379,7 +1380,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): mock.call('Load_Balancer', self.ovn_lb.uuid, ('protocol', []))]) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_listener_delete_ovn_lb_empty_octavia_lb_not_empty(self, lb_empty): """We test if we can remove one LB with not used protocol""" ovn_lb_udp = copy.copy(self.ovn_lb) @@ -1586,7 +1587,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): 'Load_Balancer', self.ovn_lb.uuid, 'external_ids', 'pool_%s:D' % self.pool_id) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_not_empty(self, lb_empty): lb_empty.return_value = False self.helper.pool_delete(self.pool) @@ -1595,7 +1596,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): 'external_ids', 'pool_%s' % self.pool_id) self.helper.ovn_nbdb_api.lb_del.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_empty_lb_empty(self, lb_empty): lb_empty.return_value = True self.helper.pool_delete(self.pool) @@ -1608,7 +1609,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): 'Load_Balancer', self.ovn_lb.uuid, ('protocol', [])) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_is_lb_empty') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_is_lb_empty') def test_pool_delete_ovn_lb_empty_lb_not_empty(self, lb_empty): ovn_lb_udp = copy.copy(self.ovn_lb) self.mock_find_ovn_lbs.stop() @@ -1640,7 +1641,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.assertEqual(status['members'][0]['provisioning_status'], constants.ACTIVE) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_add_member') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_add_member') def test_member_create_exception(self, mock_add_member): mock_add_member.side_effect = [RuntimeError] status = self.helper.member_create(self.member) @@ -1762,7 +1763,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.assertEqual(status['listeners'][0]['id'], 'listener1') - @mock.patch.object(ovn_driver.OvnProviderHelper, '_update_member') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_update_member') def test_member_update_exception(self, mock_update_member): mock_update_member.side_effect = [RuntimeError] status = self.helper.member_update(self.member) @@ -1810,7 +1811,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.helper.ovn_nbdb_api.db_set.assert_has_calls( expected_calls) - @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_refresh_lb_vips') def test_member_delete(self, mock_vip_command): status = self.helper.member_delete(self.member) @@ -1851,7 +1852,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): ('vips', {}))] self.helper.ovn_nbdb_api.db_set.has_calls(expected_calls) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_remove_member') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_remove_member') def test_member_delete_exception(self, mock_remove_member): mock_remove_member.side_effect = [RuntimeError] status = self.helper.member_delete(self.member) @@ -1962,8 +1963,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.helper.lb_delete_lrp_assoc_handler(lrp) self.mock_add_request.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, - '_execute_commands') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc_no_net_lb_no_r_lb(self, mock_execute): info = { 'network': self.network, @@ -1975,8 +1975,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.helper._update_lb_to_lr_association.assert_not_called() mock_execute.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, - '_execute_commands') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc_no_net_lb_r_lb(self, mock_execute): info = { 'network': self.network, @@ -1993,8 +1992,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.helper._update_lb_to_lr_association.assert_not_called() mock_execute.assert_called_once_with(expected) - @mock.patch.object(ovn_driver.OvnProviderHelper, - '_execute_commands') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc_net_lb_no_r_lb(self, mock_execute): info = { 'network': self.network, @@ -2007,8 +2005,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.network.load_balancer[0], self.router, delete=True ) - @mock.patch.object(ovn_driver.OvnProviderHelper, - '_execute_commands') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_delete_lrp_assoc(self, mock_execute): info = { 'network': self.network, @@ -2047,8 +2044,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.helper.lb_create_lrp_assoc_handler(lrp) self.mock_add_request.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, - '_execute_commands') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_create_lrp_assoc(self, mock_execute): info = { 'network': self.network, @@ -2066,8 +2062,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): ] mock_execute.assert_called_once_with(expected) - @mock.patch.object(ovn_driver.OvnProviderHelper, - '_execute_commands') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_execute_commands') def test_lb_create_lrp_assoc_uniq_lb(self, mock_execute): info = { 'network': self.network, @@ -2142,9 +2137,9 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.assertListEqual([], result) @mock.patch.object( - ovn_driver.OvnProviderHelper, '_del_lb_to_lr_association') + ovn_helper.OvnProviderHelper, '_del_lb_to_lr_association') @mock.patch.object( - ovn_driver.OvnProviderHelper, '_add_lb_to_lr_association') + ovn_helper.OvnProviderHelper, '_add_lb_to_lr_association') def test__update_lb_to_lr_association(self, add, delete): self._update_lb_to_lr_association.stop() self.helper._update_lb_to_lr_association(self.ref_lb1, self.router) @@ -2154,9 +2149,9 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): delete.assert_not_called() @mock.patch.object( - ovn_driver.OvnProviderHelper, '_del_lb_to_lr_association') + ovn_helper.OvnProviderHelper, '_del_lb_to_lr_association') @mock.patch.object( - ovn_driver.OvnProviderHelper, '_add_lb_to_lr_association') + ovn_helper.OvnProviderHelper, '_add_lb_to_lr_association') def test__update_lb_to_lr_association_delete(self, add, delete): self._update_lb_to_lr_association.stop() self.helper._update_lb_to_lr_association( @@ -2206,7 +2201,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.router.uuid, self.ref_lb1.uuid, if_exists=True) self.helper.ovn_nbdb_api.db_set.assert_not_called() - @mock.patch.object(ovn_driver.OvnProviderHelper, '_find_ls_for_lr') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__del_lb_to_lr_association_from_ls(self, f_ls): # This test if LB is deleted from Logical_Router_Port # Logical_Switch. @@ -2216,7 +2211,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): (mock.call('neutron-xyz', self.ref_lb1.uuid, if_exists=True)), (mock.call('neutron-qwr', self.ref_lb1.uuid, if_exists=True))]) - @mock.patch.object(ovn_driver.OvnProviderHelper, '_find_ls_for_lr') + @mock.patch.object(ovn_helper.OvnProviderHelper, '_find_ls_for_lr') def test__add_lb_to_lr_association(self, f_ls): lr_ref = 'foo' f_ls.return_value = ['neutron-xyz', 'neutron-qwr'] @@ -2488,7 +2483,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.switch_port_event.run(mock.ANY, row, mock.ANY) self.mock_add_request.assert_not_called() - @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_vip_port_update_handler_lb_not_found(self, lb): lb.side_effect = [idlutils.RowNotFound for _ in range(5)] @@ -2502,7 +2497,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): self.switch_port_event.run(mock.ANY, row, mock.ANY) self.mock_add_request.assert_not_called() - @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_vip_port_update_handler_multiple_lbs(self, lb): lb1 = mock.MagicMock() @@ -2528,7 +2523,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): mock.call(expected_call(lb1)), mock.call(expected_call(lb2))]) - @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_disassociate(self, flb): lb = mock.MagicMock() @@ -2545,7 +2540,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): mock.call.db_set('Load_Balancer', lb.uuid, ('vips', {}))] self.helper.ovn_nbdb_api.assert_has_calls(calls) - @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_handle_vip_fip_associate(self, fb): lb = mock.MagicMock() @@ -2713,10 +2708,10 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): net_cli, action=ovn_const.REQ_INFO_MEMBER_DELETED) def test_ovsdb_connections(self): - ovn_driver.OvnProviderHelper.ovn_nbdb_api = None - ovn_driver.OvnProviderHelper.ovn_nbdb_api_for_events = None - prov_helper1 = ovn_driver.OvnProviderHelper() - prov_helper2 = ovn_driver.OvnProviderHelper() + ovn_helper.OvnProviderHelper.ovn_nbdb_api = None + ovn_helper.OvnProviderHelper.ovn_nbdb_api_for_events = None + prov_helper1 = ovn_helper.OvnProviderHelper() + prov_helper2 = ovn_helper.OvnProviderHelper() # One connection for API requests self.assertIs(prov_helper1.ovn_nbdb_api, prov_helper2.ovn_nbdb_api) @@ -2822,7 +2817,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): ret = self.helper._get_existing_pool_members(self.pool_id) self.assertEqual(ret, self.member_line) - @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lb_by_pool_id') def test__get_existing_pool_members_exception(self, folbpi): folbpi.return_value = (None, None) @@ -2874,7 +2869,7 @@ class TestOvnProviderHelper(TestOvnOctaviaBase): ret = self.helper.check_lb_protocol(self.listener_id, 'TCP') self.assertTrue(ret) - @mock.patch('ovn_octavia_provider.driver.OvnProviderHelper.' + @mock.patch('ovn_octavia_provider.helper.OvnProviderHelper.' '_find_ovn_lbs') def test_check_lb_protocol_no_lb(self, fol): fol.return_value = None