[NSXP,NSXT] orphaned LBs handling to nsxadmin

Add the options to detect and cleanup loadbalancer services which are
allocated in NSX but do not exist in Octavia.
The orphaned loadbalancer services prevents routers from being deleted
and therefore should be cleaned up prior to the router deletion.

Change-Id: Ic0ad5175214cff034bd76a16fc11dbea3ccd6b13
This commit is contained in:
Kobi Samoray 2021-10-18 14:03:54 +03:00
parent f3be2be987
commit 5b2151d976
6 changed files with 196 additions and 13 deletions

View File

@ -344,6 +344,7 @@ V2T migration
- Get compute ports vif ids mapping for the migration::
nsxadmin -r ports -o list (--property map-file=<filename>)
Config
~~~~~~
@ -579,6 +580,14 @@ LBaaS
nsxadmin -r lb-services -o list
- List orphaned NSX LB services::
nsxadmin -r lb-services -o list-orphaned
- Clean orphaned NSX LB services::
nsxadmin -r lb-services -o clean-orphaned
- List NSX LB virtual servers::
nsxadmin -r lb-virtual-servers -o list
@ -604,7 +613,7 @@ Rate Limit
- Update the NSX rate limit::
nsxadmin -r rate-limit -o nsx-update --property value=<>
nsxadmin -r rate-limit -o nsx-update --property value=<>
Cluster
~~~~~~~
@ -696,15 +705,25 @@ NSX Policy Plugin
nsxadmin -r routers -o recover-tier0 --property tier0=<id> --property az=<name>
- Migrate networks DHCP from MP to Policy (for NSX 3.0 upgrades)::
nsxadmin -r dhcp-binding -o migrate-to-policy --property dhcp-config=<id>
- Bind the specified dhcp profile to the edge clusters of the specified tier0 GW::
nsxadmin -r dhcp-binding -o update-dhcp-profile-edge --property dhcp-profile=<id> --property tier0=<id>
- Update tags on a loadbalancer service
- Update tags on a loadbalancer service::
nsxadmin -r lb-services -o nsx-update-tags
- List orphaned NSX LB services::
nsxadmin -r lb-services -o list-orphaned
- Clean orphaned NSX LB services::
nsxadmin -r lb-services -o clean-orphaned
- Delete DB tables related to the MP plugin after migration from MP plugin to policy::
nsxadmin -r nsx-migrate-t2p -o clean-all
@ -714,6 +733,7 @@ NSX Policy Plugin
nsxadmin -r nsx-migrate-v2t -o clean-all
- Disable/Restore Tier0 redistribution of tier1 routes during the V2T migration::
nsxadmin -r nsx-migrate-v2t -o nsx-redistribute --property action=disable/restore --property tier0s=a,b,c
- Validate external subnets cidrs before V2T migration::

View File

@ -37,6 +37,18 @@ LOG = logging.getLogger(__name__)
STATUS_CHECKER_COUNT = 10
def get_octavia_rpc_client():
if cfg.CONF.api_replay_mode:
topic = constants.DRIVER_TO_OCTAVIA_MIGRATION_TOPIC
else:
topic = constants.DRIVER_TO_OCTAVIA_TOPIC
transport = messaging.get_rpc_transport(cfg.CONF)
target = messaging.Target(topic=topic, exchange="common",
namespace='control', fanout=False,
version='1.0')
return messaging.RPCClient(transport, target)
class NSXOctaviaListener(object):
@log_helpers.log_method_call
def __init__(self, loadbalancer=None, listener=None, pool=None,
@ -46,15 +58,7 @@ class NSXOctaviaListener(object):
loadbalancer, member, pool)
def _init_rpc_messaging(self):
if cfg.CONF.api_replay_mode:
topic = constants.DRIVER_TO_OCTAVIA_MIGRATION_TOPIC
else:
topic = constants.DRIVER_TO_OCTAVIA_TOPIC
transport = messaging.get_rpc_transport(cfg.CONF)
target = messaging.Target(topic=topic, exchange="common",
namespace='control', fanout=False,
version='1.0')
self.client = messaging.RPCClient(transport, target)
self.client = get_octavia_rpc_client()
def _init_rpc_listener(self, healthmonitor, l7policy, l7rule, listener,
loadbalancer, member, pool):

View File

@ -17,6 +17,7 @@ from neutron_lib import exceptions as n_exc
from oslo_log import log as logging
from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils
from vmware_nsx.services.lbaas.octavia import octavia_listener
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as p_utils
@ -57,6 +58,86 @@ def update_lb_service_tags(resource, event, trigger, **kwargs):
LOG.info("Done updating %s Lb services.", n_updated)
def _orphaned_loadbalancer_handler(handler_callback):
# Retrieve Octavia loadbalancers
client = octavia_listener.get_octavia_rpc_client()
o_endpoint = octavia_listener.NSXOctaviaListenerEndpoint(client=client)
octavia_lb_ids = o_endpoint.get_active_loadbalancers()
# Retrieve NSX list of LB services
nsxpolicy = p_utils.get_connected_nsxpolicy()
service_client = nsxpolicy.load_balancer.lb_service
services = service_client.list()
for lb_service in services:
is_orphan = True
for tag in lb_service.get('tags', []):
if (tag['scope'] == 'loadbalancer_id' and
tag['tag'] in octavia_lb_ids):
is_orphan = False
break
if is_orphan:
handler_callback(lb_service)
@admin_utils.output_header
@admin_utils.unpack_payload
def list_orphaned_loadbalancers(resource, event, trigger, **kwargs):
def _orphan_handler(lb_service):
LOG.warning('NSX loadbalancer service %s has no valid Octavia '
'loadbalancers', lb_service['id'])
_orphaned_loadbalancer_handler(_orphan_handler)
@admin_utils.output_header
@admin_utils.unpack_payload
def clean_orphaned_loadbalancers(resource, event, trigger, **kwargs):
def _orphan_handler(lb_service):
nsxpolicy = p_utils.get_connected_nsxpolicy()
nsxp_lb = nsxpolicy.load_balancer
service_client = nsxp_lb.lb_service
# Cleanup virtual servers
vs_client = nsxp_lb.virtual_server
vs_list = vs_client.list()
for vs in vs_list:
if (vs.get('lb_service_path') and
vs['lb_service_path'] == lb_service.get('path')):
try:
vs_client.delete(vs['id'])
except Exception as e:
LOG.error('Failed to delete virtual server %s from NSX '
'loadbalancer service %s with exception (%s)',
vs['id'], lb_service['id'], e)
# Detach LB service from router
try:
service_client.update(lb_service['id'], connectivity_path=None)
except Exception as e:
LOG.error('Failed to clean up NSX loadbalancer service %s with '
'exception (%s)', lb_service['id'], e)
# Delete LB service
try:
service_client.delete(lb_service['id'])
LOG.info('Cleaned up NSX loadbalancer service %s from router',
lb_service['id'])
except Exception as e:
LOG.error('Failed to clean up NSX loadbalancer service %s with '
'exception (%s)', lb_service['id'], e)
_orphaned_loadbalancer_handler(_orphan_handler)
registry.subscribe(update_lb_service_tags,
constants.LB_SERVICES,
shell.Operations.NSX_UPDATE_TAGS.value)
registry.subscribe(list_orphaned_loadbalancers,
constants.LB_SERVICES,
shell.Operations.LIST_ORPHANED.value)
registry.subscribe(clean_orphaned_loadbalancers,
constants.LB_SERVICES,
shell.Operations.CLEAN_ORPHANED.value)

View File

@ -19,6 +19,7 @@ from neutron_lib import context as neutron_context
from vmware_nsx.db import db as nsx_db
from vmware_nsx.services.lbaas.nsx_v3.implementation import lb_utils
from vmware_nsx.services.lbaas.octavia import octavia_listener
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import formatters
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
@ -124,6 +125,75 @@ def nsx_update_router_lb_advertisement(resource, event, trigger, **kwargs):
LOG.info("Done.")
def _orphaned_loadbalancer_handler(handler_callback):
# Retrieve Octavia loadbalancers
client = octavia_listener.get_octavia_rpc_client()
o_endpoint = octavia_listener.NSXOctaviaListenerEndpoint(client=client)
octavia_lb_ids = o_endpoint.get_active_loadbalancers()
nsxlib = utils.get_connected_nsxlib()
nsxlib_lb = nsxlib.load_balancer
lb_services = nsxlib_lb.service.list()
vs_client = nsxlib_lb.virtual_server
for lb_service in lb_services.get('results', []):
is_orphan = True
for vs_id in lb_service.get('virtual_server_ids', []):
vs = vs_client.get(vs_id)
for tag in vs.get('tags', []):
if tag['scope'] == 'os-lbaas-lb-id':
lb_id = tag['tag']
if lb_id in octavia_lb_ids:
is_orphan = False
break
if is_orphan:
handler_callback(lb_service)
@admin_utils.output_header
@admin_utils.unpack_payload
def list_orphaned_loadbalancers(resource, event, trigger, **kwargs):
def _orphan_handler(lb_service):
LOG.warning('NSX loadbalancer service %s has no valid Octavia '
'loadbalancers', lb_service['id'])
_orphaned_loadbalancer_handler(_orphan_handler)
@admin_utils.output_header
@admin_utils.unpack_payload
def clean_orphaned_loadbalancers(resource, event, trigger, **kwargs):
def _orphan_handler(lb_service):
nsxlib = utils.get_connected_nsxlib()
nsxlib_lb = nsxlib.load_balancer
if lb_service.get('attachment'):
try:
nsxlib_lb.service.update(lb_service['id'], attachment=None)
except Exception as e:
LOG.error('Failed to detach NSX loadbalancer service %s with '
'error %s', lb_service['id'], e)
try:
nsxlib_lb.service.delete(lb_service['id'])
LOG.info('Cleaned up NSX loadbalancer service %s',
lb_service['id'])
except Exception as e:
LOG.error('Failed to cleanup NSX loadbalancer service %s with '
'error %s', lb_service['id'], e)
_orphaned_loadbalancer_handler(_orphan_handler)
registry.subscribe(nsx_update_router_lb_advertisement,
constants.LB_ADVERTISEMENT,
shell.Operations.NSX_UPDATE.value)
registry.subscribe(list_orphaned_loadbalancers,
constants.LB_SERVICES,
shell.Operations.LIST_ORPHANED.value)
registry.subscribe(clean_orphaned_loadbalancers,
constants.LB_SERVICES,
shell.Operations.CLEAN_ORPHANED.value)

View File

@ -40,6 +40,8 @@ class Operations(enum.Enum):
LIST_MISMATCHES = 'list-mismatches'
FIX_MISMATCH = 'fix-mismatch'
LIST_UNUSED = 'list-unused'
LIST_ORPHANED = 'list-orphaned'
CLEAN_ORPHANED = 'clean-orphaned'
NEUTRON_LIST = 'neutron-list'
NEUTRON_CLEAN = 'neutron-clean'
@ -151,7 +153,9 @@ nsxv3_resources = {
[Operations.LIST.value,
Operations.NSX_CLEAN.value]),
constants.LB_SERVICES: Resource(constants.LB_SERVICES,
[Operations.LIST.value]),
[Operations.LIST.value,
Operations.LIST_ORPHANED.value,
Operations.CLEAN_ORPHANED.value]),
constants.LB_VIRTUAL_SERVERS: Resource(constants.LB_VIRTUAL_SERVERS,
[Operations.LIST.value]),
constants.LB_POOLS: Resource(constants.LB_POOLS,
@ -297,7 +301,9 @@ nsxp_resources = {
Operations.RECOVER_TIER0.value,
Operations.UPDATE_FIREWALL_MATCH.value]),
constants.LB_SERVICES: Resource(constants.LB_SERVICES,
[Operations.NSX_UPDATE_TAGS.value]),
[Operations.NSX_UPDATE_TAGS.value,
Operations.LIST_ORPHANED.value,
Operations.CLEAN_ORPHANED.value]),
constants.CERTIFICATE: Resource(constants.CERTIFICATE,
[Operations.GENERATE.value,
Operations.SHOW.value,

View File

@ -33,6 +33,7 @@ from vmware_nsx._i18n import _
from vmware_nsx.common import config # noqa
from vmware_nsx.db import nsxv_db
from vmware_nsx.dvs import dvs_utils
from vmware_nsx.services.lbaas.octavia import octavia_listener
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as nsxp_utils
from vmware_nsx.shell.admin.plugins.nsxv.resources import migration
@ -76,6 +77,7 @@ class AbstractTestAdminUtils(base.BaseTestCase, metaclass=abc.ABCMeta):
mock_query = mock.patch(
"vmware_nsx.shell.admin.plugins.common.utils.query_yes_no")
mock_query.start()
octavia_listener.get_octavia_rpc_client = mock.Mock()
@abc.abstractmethod
def _get_plugin_name(self):