From 3c07d6bc364dd743508545f7f89e00cf8f0e493a Mon Sep 17 00:00:00 2001 From: Kobi Samoray Date: Mon, 29 Jun 2020 17:18:32 +0300 Subject: [PATCH] NSXP: Update Octavia with object statuses Send object updates about statuses: loadbalancers, listeners, pools, and members. Change-Id: Ifd893818c2ddb1325f3bed9f618b72754ed0689f --- vmware_nsx/plugins/nsx_p/plugin.py | 6 +- vmware_nsx/services/lbaas/lb_const.py | 3 + .../nsx_p/implementation/loadbalancer_mgr.py | 114 ++++++++++++++---- .../services/lbaas/octavia/octavia_driver.py | 16 +++ .../lbaas/octavia/octavia_listener.py | 20 ++- 5 files changed, 127 insertions(+), 32 deletions(-) diff --git a/vmware_nsx/plugins/nsx_p/plugin.py b/vmware_nsx/plugins/nsx_p/plugin.py index ff72f81aff..d6e8fabddf 100644 --- a/vmware_nsx/plugins/nsx_p/plugin.py +++ b/vmware_nsx/plugins/nsx_p/plugin.py @@ -527,6 +527,9 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base): def _get_octavia_stats_getter(self): return listener_mgr.stats_getter + def _get_octavia_status_getter(self): + return loadbalancer_mgr.status_getter + def _init_lb_profiles(self): ssl_profile_client = self.nsxpolicy.load_balancer.client_ssl_profile with locking.LockManager.get_lock('nsxp_lb_profiles_init'): @@ -553,7 +556,8 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base): self.octavia_stats_collector = ( octavia_listener.NSXOctaviaStatisticsCollector( self, - self._get_octavia_stats_getter())) + self._get_octavia_stats_getter(), + self._get_octavia_status_getter())) def _init_octavia(self): octavia_objects = self._get_octavia_objects() diff --git a/vmware_nsx/services/lbaas/lb_const.py b/vmware_nsx/services/lbaas/lb_const.py index 0e3d7cc471..5f7dc5d838 100644 --- a/vmware_nsx/services/lbaas/lb_const.py +++ b/vmware_nsx/services/lbaas/lb_const.py @@ -132,5 +132,8 @@ OFFLINE = 'OFFLINE' DEGRADED = 'DEGRADED' ENABLED = 'ENABLED' DISABLED = 'DISABLED' +ACTIVE = 'ACTIVE' +ERROR = 'ERROR' +UNKNOWN = 'UNKNOWN' VMWARE_LB_VIP_OWNER = 'vmware-lb-vip' diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py index 754a8bb472..cfb8d027d9 100644 --- a/vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/loadbalancer_mgr.py @@ -276,20 +276,11 @@ class EdgeLoadBalancerManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): return {} # get the loadbalancer status from the LB service - lb_status = lb_const.ONLINE lb_status_results = service_status.get('results') + lb_status = lb_const.ONLINE if lb_status_results: result = lb_status_results[0] - if result.get('service_status'): - # Use backend service_status - lb_status = self._nsx_status_to_lb_status( - result['service_status']) - elif result.get('alarm'): - # No status, but has alarms -> ERROR - lb_status = lb_const.OFFLINE - else: - # Unknown - assume it is ok - lb_status = lb_const.ONLINE + lb_status = _get_octavia_lb_status(result) statuses = {lb_const.LOADBALANCERS: [{'id': id, 'status': lb_status}], lb_const.LISTENERS: [], @@ -300,21 +291,92 @@ class EdgeLoadBalancerManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): # to add the listeners statuses from the virtual servers statuses return statuses - def _nsx_status_to_lb_status(self, nsx_status): - if not nsx_status: - # default fallback - return lb_const.ONLINE - - # Statuses that are considered ONLINE: - if nsx_status.upper() in ['UP', 'UNKNOWN', 'PARTIALLY_UP', - 'NO_STANDBY']: - return lb_const.ONLINE - # Statuses that are considered OFFLINE: - if nsx_status.upper() in ['PRIMARY_DOWN', 'DETACHED', 'DOWN', 'ERROR']: - return lb_const.OFFLINE - if nsx_status.upper() == 'DISABLED': - return lb_const.DISABLED +def _nsx_status_to_lb_status(nsx_status): + if not nsx_status: # default fallback - LOG.debug("NSX LB status %s - interpreted as ONLINE", nsx_status) return lb_const.ONLINE + + # Statuses that are considered ONLINE: + if nsx_status.upper() in ['UP', 'UNKNOWN', 'PARTIALLY_UP', + 'NO_STANDBY']: + return lb_const.ONLINE + # Statuses that are considered OFFLINE: + if nsx_status.upper() in ['PRIMARY_DOWN', 'DETACHED', 'DOWN', 'ERROR']: + return lb_const.OFFLINE + if nsx_status.upper() == 'DISABLED': + return lb_const.DISABLED + + # default fallback + LOG.debug("NSX LB status %s - interpreted as ONLINE", nsx_status) + return lb_const.ONLINE + + +def _get_octavia_lb_status(result): + if result.get('service_status'): + # Use backend service_status + lb_status = _nsx_status_to_lb_status( + result['service_status']) + elif result.get('alarm'): + # No status, but has alarms -> ERROR + lb_status = lb_const.OFFLINE + else: + # Unknown - assume it is ok + lb_status = lb_const.ONLINE + return lb_status + + +def status_getter(context, core_plugin): + nsxlib_lb = core_plugin.nsxpolicy.load_balancer + lb_client = nsxlib_lb.lb_service + lbs = lb_client.list() + lb_statuses = [] + lsn_statuses = [] + pool_statuses = [] + member_statuses = [] + for lb in lbs: + try: + service_status = lb_client.get_status(lb['id']) + if not isinstance(service_status, dict): + service_status = {} + except nsxlib_exc.ManagerError: + LOG.warning("LB service %(lbs)s is not found", + {'lbs': id}) + service_status = {} + lb_status_results = service_status.get('results') + if lb_status_results: + result = lb_status_results[0] + lb_operating_status = _get_octavia_lb_status(result) + for vs_status in result.get('virtual_servers', []): + vs_id = lib_p_utils.path_to_id( + vs_status['virtual_server_path']) + lsn_statuses.append({ + 'id': vs_id, + 'operating_status': _nsx_status_to_lb_status( + vs_status['status'])}) + for pool_status in result.get('pools', []): + pool_id = lib_p_utils.path_to_id(pool_status['pool_path']) + pool_statuses.append({ + 'id': pool_id, + 'operating_status': _nsx_status_to_lb_status( + pool_status['status'])}) + for member in pool_status.get('members', []): + member_statuses.append({ + 'pool_id': pool_id, + 'member_ip': member.get('ip_address'), + 'operating_status': _nsx_status_to_lb_status( + member['status'])}) + + else: + lb_operating_status = lb_const.OFFLINE + + for tag in lb['tags']: + if tag['scope'] == 'loadbalancer_id': + lb_statuses.append( + {'id': tag['tag'], + 'operating_status': lb_operating_status}) + + return {lb_const.LOADBALANCERS: lb_statuses, + lb_const.LISTENERS: lsn_statuses, + lb_const.POOLS: pool_statuses, + lb_const.MEMBERS: member_statuses} diff --git a/vmware_nsx/services/lbaas/octavia/octavia_driver.py b/vmware_nsx/services/lbaas/octavia/octavia_driver.py index e1a86bdf64..e6264d94c9 100644 --- a/vmware_nsx/services/lbaas/octavia/octavia_driver.py +++ b/vmware_nsx/services/lbaas/octavia/octavia_driver.py @@ -545,10 +545,26 @@ class NSXOctaviaDriver(driver_base.ProviderDriver): class NSXOctaviaDriverEndpoint(driver_lib.DriverLibrary): target = messaging.Target(namespace="control", version='1.0') + def __init__(self, status_socket=driver_lib.DEFAULT_STATUS_SOCKET, + stats_socket=driver_lib.DEFAULT_STATS_SOCKET, **kwargs): + super(NSXOctaviaDriverEndpoint, self).__init__( + status_socket, stats_socket, **kwargs) + self.repositories = repositories.Repositories() + @log_helpers.log_method_call def update_loadbalancer_status(self, ctxt, status): # refresh the driver lib session self.db_session = db_apis.get_session() + for member in status.get('members', []): + if member.get('id'): + pass + elif member.get('member_ip') and member.get('pool_id'): + db_member = self.repositories.member.get( + self.db_session, + pool_id=member['pool_id'], + ip_address=member['member_ip']) + if db_member: + member['id'] = db_member.id try: return super(NSXOctaviaDriverEndpoint, self).update_loadbalancer_status(status) diff --git a/vmware_nsx/services/lbaas/octavia/octavia_listener.py b/vmware_nsx/services/lbaas/octavia/octavia_listener.py index 3d03897e66..be9494e5de 100644 --- a/vmware_nsx/services/lbaas/octavia/octavia_listener.py +++ b/vmware_nsx/services/lbaas/octavia/octavia_listener.py @@ -300,6 +300,10 @@ class NSXOctaviaListenerEndpoint(object): kw = {'statistics': statistics} self.client.cast({}, 'update_listener_statistics', **kw) + def update_loadbalancer_status(self, status): + kw = {'status': status} + self.client.cast({}, 'update_loadbalancer_status', **kw) + @log_helpers.log_method_call def loadbalancer_create(self, ctxt, loadbalancer): ctx = neutron_context.Context(None, loadbalancer['project_id']) @@ -625,9 +629,11 @@ class NSXOctaviaListenerEndpoint(object): class NSXOctaviaStatisticsCollector(object): - def __init__(self, core_plugin, listener_stats_getter): + def __init__(self, core_plugin, listener_stats_getter, + loadbalancer_status_getter=None): self.core_plugin = core_plugin self.listener_stats_getter = listener_stats_getter + self.loadbalancer_status_getter = loadbalancer_status_getter if cfg.CONF.octavia_stats_interval: eventlet.spawn_n(self.thread_runner, cfg.CONF.octavia_stats_interval) @@ -646,8 +652,12 @@ class NSXOctaviaStatisticsCollector(object): listeners_stats = self.listener_stats_getter( context, self.core_plugin) - if not listeners_stats: + if listeners_stats: # Avoid sending empty stats - return - stats = {'listeners': listeners_stats} - endpoint.update_listener_statistics(stats) + stats = {'listeners': listeners_stats} + endpoint.update_listener_statistics(stats) + + if self.loadbalancer_status_getter: + loadbalancer_status = self.loadbalancer_status_getter( + context, self.core_plugin) + endpoint.update_loadbalancer_status(loadbalancer_status)