From 8bb72b792cfba988bc95fd8f893cfd0b71ab58cf Mon Sep 17 00:00:00 2001 From: Yan Wang Date: Tue, 31 Aug 2021 18:07:01 +0800 Subject: [PATCH] Add function to nsxadmin tool Add these functions to auto-recover the nsxt tier1 router's binding info, segment's metadata proxy config and dhcp profile edge cluster binding. Change-Id: Id4c2d98c9666ba69da06a3375bf8284cd64f5ef4 --- doc/source/admin_util.rst | 12 +++ .../admin/plugins/nsxp/resources/networks.py | 94 +++++++++++++++++++ .../admin/plugins/nsxp/resources/routers.py | 94 ++++++++++++++++++- vmware_nsx/shell/resources.py | 29 +++--- .../tests/unit/shell/test_admin_utils.py | 6 +- 5 files changed, 222 insertions(+), 13 deletions(-) diff --git a/doc/source/admin_util.rst b/doc/source/admin_util.rst index 56f9d92a88..9df80ac666 100644 --- a/doc/source/admin_util.rst +++ b/doc/source/admin_util.rst @@ -671,6 +671,10 @@ NSX Policy Plugin nsxadmin -r networks -o nsx-update-state +- Update the metadata proxy configuration of all segments for specified availability zone:: + + nsxadmin -r networks -o update-metadata --property metadata-proxy= --property az= + - List all the neutron routers together with their NSX Policy objects and realization state:: nsxadmin -r routers -o list @@ -687,9 +691,17 @@ NSX Policy Plugin nsxadmin -r routers -o update-nat-firewall-match --property firewall-match=external/internal +- Reconfigure the tier1 routers with tier0 GW at NSX backend and update the network binding neutron DB with new tier0 GW for an availability zone:: + + nsxadmin -r routers -o recover-tier0 --property tier0= --property az= + - Migrate networks DHCP from MP to Policy (for NSX 3.0 upgrades):: nsxadmin -r dhcp-binding -o migrate-to-policy --property dhcp-config= +- Bind the specified dhcp profile to the edge clusters of the specified tier0 GW:: + + nsxadmin -r dhcp-binding -o update-dhcp-profile-edge --property dhcp-profile= --property tier0= + - Update tags on a loadbalancer service nsxadmin -r lb-services -o nsx-update-tags diff --git a/vmware_nsx/shell/admin/plugins/nsxp/resources/networks.py b/vmware_nsx/shell/admin/plugins/nsxp/resources/networks.py index caf3015cf4..e7caf7b0ce 100644 --- a/vmware_nsx/shell/admin/plugins/nsxp/resources/networks.py +++ b/vmware_nsx/shell/admin/plugins/nsxp/resources/networks.py @@ -143,6 +143,92 @@ def update_admin_state(resource, event, trigger, **kwargs): nsxpolicy.segment_port.set_admin_state(seg_id, port['id'], False) +@admin_utils.output_header +def update_metadata(resource, event, trigger, **kwargs): + """ + Update the metadata proxy configuration of segments + """ + errmsg = ("Need to specify metadata proxy ID and availability-zone. " + "Add --property metadata-proxy= --property az=") + if not kwargs.get('property'): + LOG.error("%s", errmsg) + return + properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) + metaproxy = properties.get('metadata-proxy') + az = properties.get('az') + if not metaproxy or not az: + LOG.error("%s", errmsg) + raise SystemExit(errmsg) + nsxpolicy = p_utils.get_connected_nsxpolicy() + try: + nsxpolicy.md_proxy.get(metaproxy) + except Exception as e: + LOG.error("Error while retrieving NSX metadata proxy %s: %s", + metaproxy, e) + raise SystemExit(e) + ctx = context.get_admin_context() + with p_utils.NsxPolicyPluginWrapper() as plugin: + nets = plugin.get_networks(ctx) + for net in nets: + if plugin._network_is_external(ctx, net['id']): + continue + network_az = plugin.get_network_az_by_net_id(ctx, net['id']) + if az == network_az.name: + seg_id = plugin._get_network_nsx_segment_id(ctx, net['id']) + try: + nsxpolicy.segment.update(seg_id, + metadata_proxy_id=metaproxy) + except Exception as e: + LOG.error("Failed to update segment %s metadata proxy" + " configuration: %s", + seg_id, e) + raise SystemExit(e) + else: + LOG.info("Updated segment %s to metadata proxy %s", + seg_id, metaproxy) + LOG.info("Successfully updated all the networks' metadata proxy" + " configuration.") + + +@admin_utils.output_header +def update_dhcp_profile_edge(resource, event, trigger, **kwargs): + """ + Bind the specified dhcp profile to the edge clusters of tier0 GW + """ + errmsg = ("Need to specify dhcp profile ID and tier0 GW ID. Add " + "--property dhcp-profile= --property tier0=") + if not kwargs.get('property'): + LOG.error("%s", errmsg) + return + properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) + dhcp_profile = properties.get('dhcp-profile') + tier0 = properties.get('tier0') + if not dhcp_profile or not tier0: + LOG.error("%s", errmsg) + raise SystemExit(errmsg) + nsxpolicy = p_utils.get_connected_nsxpolicy() + try: + nsxpolicy.tier0.get(tier0) + except Exception as e: + LOG.error("Tier0 logical router %s was not found: %s", tier0, e) + raise SystemExit(e) + edge_path = nsxpolicy.tier0.get_edge_cluster_path(tier0) + if edge_path: + try: + nsxpolicy.dhcp_server_config.update(dhcp_profile, + edge_cluster_path=edge_path) + except Exception as e: + LOG.error("Failed to bind dhcp profile %s to edge cluster %s: %s", + dhcp_profile, edge_path, e) + raise SystemExit(e) + else: + LOG.info("Successfully updated dhcp profile %s to edge cluster %s", + dhcp_profile, edge_path) + else: + LOG.error("Tier0 logical router %s miss the edge clusters binding." + "Skip the dhcp profile update action", tier0) + + registry.subscribe(update_admin_state, constants.NETWORKS, shell.Operations.NSX_UPDATE_STATE.value) @@ -150,3 +236,11 @@ registry.subscribe(update_admin_state, registry.subscribe(migrate_dhcp_to_policy, constants.DHCP_BINDING, shell.Operations.MIGRATE_TO_POLICY.value) + +registry.subscribe(update_metadata, + constants.NETWORKS, + shell.Operations.UPDATE_METADATA.value) + +registry.subscribe(update_dhcp_profile_edge, + constants.DHCP_BINDING, + shell.Operations.UPDATE_DHCP_EDGE.value) diff --git a/vmware_nsx/shell/admin/plugins/nsxp/resources/routers.py b/vmware_nsx/shell/admin/plugins/nsxp/resources/routers.py index cc9bea3f27..4432f19140 100644 --- a/vmware_nsx/shell/admin/plugins/nsxp/resources/routers.py +++ b/vmware_nsx/shell/admin/plugins/nsxp/resources/routers.py @@ -119,6 +119,94 @@ def update_tier0(resource, event, trigger, **kwargs): LOG.info("Done.") +@admin_utils.output_header +def recover_tier0(resource, event, trigger, **kwargs): + """ + Reconfigure the tier1 routers with tier0 GW at NSX backend and update the + neutron external network's physical network binding + """ + errmsg = ("Need to specify tier0 ID and availability-zone. " + "Add --property tier0= --property az=") + if not kwargs.get('property'): + LOG.error("%s", errmsg) + return + properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) + tier0 = properties.get('tier0') + az = properties.get('az') + if not tier0 or not az: + LOG.error("%s", errmsg) + raise SystemExit(errmsg) + # Verify the id of the tier0 + nsxpolicy = p_utils.get_connected_nsxpolicy() + try: + nsxpolicy.tier0.get(tier0) + except Exception as e: + LOG.error("An error occurred while retrieving Tier0 gw router %s: %s", + tier0, e) + raise SystemExit(e) + tier0_edge_cluster = nsxpolicy.tier0.get_edge_cluster_path(tier0) + if not tier0_edge_cluster: + LOG.error("Tier0 gw router %s does not have an edge cluster " + "configured", tier0) + return + ctx = context.get_admin_context() + plugin = RoutersPlugin() + neutron_routers = plugin.get_routers(ctx) + if not neutron_routers: + LOG.info("There are not any neutron routers found") + with p_utils.NsxPolicyPluginWrapper() as core_plugin: + for router in neutron_routers: + router_obj = core_plugin._get_router(ctx, router['id']) + router_az = core_plugin._get_router_az_obj(router_obj) + if router_obj.gw_port_id and az == router_az.name: + old_tier0_path = nsxpolicy.tier1.get(router['id']).\ + get('tier0_path') + if old_tier0_path: + old_tier0_edge_cluster_path = nsxpolicy.tier0.\ + get_edge_cluster_path(old_tier0_path.split('/')[-1]) + # Update tier1 routers GW to point to the tier0 in the backend + try: + nsxpolicy.tier1.update(router['id'], tier0=tier0) + except Exception as e: + LOG.error("Failed to update T0 uplink for router %s: %s", + router['id'], e) + raise SystemExit(e) + else: + LOG.info("Updated router %s uplink port", router['id']) + # Update tier1 routers' edge cluster information to new + # tier0's edge cluster only if the tier1 router's old edge + # cluster bind to the same edge cluster of old tier0 router + old_tier1_edge_cluster_path = nsxpolicy.tier1.\ + get_edge_cluster_path(router['id']) + if old_tier1_edge_cluster_path and \ + (old_tier1_edge_cluster_path == + old_tier0_edge_cluster_path): + try: + nsxpolicy.tier1.\ + set_edge_cluster_path(router['id'], + tier0_edge_cluster) + except Exception as e: + LOG.error("Failed to update router %s edge cluster:" + " %s", router['id'], e) + raise SystemExit(e) + else: + LOG.info("Updated router %s edge cluster", + router['id']) + + # Update Neutron external network's physical network binding + nets = core_plugin.get_networks(ctx) + for net in nets: + network_az = core_plugin.get_network_az_by_net_id(ctx, net['id']) + if az == network_az.name and net.get('router:external'): + with ctx.session.begin(subtransactions=True): + bindings = ctx.session.query(nsx_models.TzNetworkBinding).\ + filter_by(network_id=net['id']).first() + bindings.phy_uuid = tier0 + LOG.info("Updated neutron external network %s binding " + "physical network", net['id']) + LOG.info("Successfully updated all the tier0 GW binding information.") + + @admin_utils.output_header def update_nat_firewall_match(resource, event, trigger, **kwargs): """Update the firewall_match value in neutron nat rules with a new value""" @@ -130,7 +218,7 @@ def update_nat_firewall_match(resource, event, trigger, **kwargs): properties = admin_utils.parse_multi_keyval_opt(kwargs['property']) firewall_match_str = properties.get('firewall-match') if (not firewall_match_str or - firewall_match_str.lower() not in ('internal', 'external')): + firewall_match_str.lower() not in ('internal', 'external')): LOG.error("%s", errmsg) return @@ -184,6 +272,10 @@ registry.subscribe(update_tier0, constants.ROUTERS, shell.Operations.UPDATE_TIER0.value) +registry.subscribe(recover_tier0, + constants.ROUTERS, + shell.Operations.RECOVER_TIER0.value) + registry.subscribe(update_nat_firewall_match, constants.ROUTERS, shell.Operations.UPDATE_FIREWALL_MATCH.value) diff --git a/vmware_nsx/shell/resources.py b/vmware_nsx/shell/resources.py index 571f8bdb38..54d92be480 100644 --- a/vmware_nsx/shell/resources.py +++ b/vmware_nsx/shell/resources.py @@ -76,6 +76,9 @@ class Operations(enum.Enum): VALIDATE = 'validate' REUSE = 'reuse' UPDATE_TIER0 = 'update-tier0' + RECOVER_TIER0 = 'recover-tier0' + UPDATE_METADATA = 'update-metadata' + UPDATE_DHCP_EDGE = 'update-dhcp-profile-edge' UPDATE_FIREWALL_MATCH = 'update-nat-firewall-match' SET_STATUS_ERROR = 'set-status-error' CHECK_COMPUTE_CLUSTERS = 'check-compute-clusters' @@ -122,7 +125,8 @@ nsxv3_resources = { Operations.NSX_UPDATE_RULES.value, Operations.NSX_UPDATE_DHCP_RELAY.value, Operations.NSX_ENABLE_STANDBY_RELOCATION.value, - Operations.UPDATE_TIER0.value]), + Operations.UPDATE_TIER0.value, + Operations.RECOVER_TIER0.value]), constants.DHCP_BINDING: Resource(constants.DHCP_BINDING, [Operations.LIST.value, Operations.NSX_RECREATE.value]), @@ -282,12 +286,15 @@ nsxp_resources = { [Operations.LIST.value]), constants.NETWORKS: Resource(constants.NETWORKS, [Operations.LIST.value, - Operations.NSX_UPDATE_STATE.value]), + Operations.NSX_UPDATE_STATE.value, + Operations.UPDATE_METADATA.value]), constants.DHCP_BINDING: Resource(constants.DHCP_BINDING, - [Operations.MIGRATE_TO_POLICY.value]), + [Operations.MIGRATE_TO_POLICY.value, + Operations.UPDATE_DHCP_EDGE.value]), constants.ROUTERS: Resource(constants.ROUTERS, [Operations.LIST.value, Operations.UPDATE_TIER0.value, + Operations.RECOVER_TIER0.value, Operations.UPDATE_FIREWALL_MATCH.value]), constants.LB_SERVICES: Resource(constants.LB_SERVICES, [Operations.NSX_UPDATE_TAGS.value]), @@ -348,11 +355,11 @@ def _get_choices(): def _get_resources(): plugin = get_plugin() if plugin == 'nsxv3': - return 'NSX-V3 resources: %s' % (', '.join(nsxv3_resources_names)) + return f"NSX-V3 resources: {(', '.join(nsxv3_resources_names))}" if plugin == 'nsxv': - return 'NSX-V resources: %s' % (', '.join(nsxv_resources_names)) + return f"NSX-V resources: {(', '.join(nsxv_resources_names))}" if plugin == 'nsxtvd': - return 'NSX-TVD resources: %s' % (', '.join(nsxtvd_resources_names)) + return f"NSX-TVD resources: {(', '.join(nsxtvd_resources_names))}" cli_opts = [cfg.StrOpt('fmt', @@ -366,8 +373,8 @@ cli_opts = [cfg.StrOpt('fmt', help=_get_resources()), cfg.StrOpt('operation', short='o', - help='Supported list of operations: {}' - .format(', '.join(ops))), + help=f"Supported list of operations:" + f" {(', '.join(ops))}"), cfg.StrOpt('plugin', help='nsxv or nsxv3 if the tvd plugin is used'), cfg.BoolOpt('force', @@ -414,11 +421,11 @@ def init_resource_plugin(plugin_name, plugin_dir): continue # load the resource importlib.import_module( - "vmware_nsx.shell.admin.plugins." - "{}.resources.".format(plugin_name) + resource) + f"vmware_nsx.shell.admin.plugins." + f"{plugin_name}.resources." + resource) def get_plugin_dir(plugin_name): plugin_dir = (os.path.dirname(os.path.realpath(__file__)) + "/admin/plugins") - return '{}/{}/resources'.format(plugin_dir, plugin_name) + return f"{plugin_dir}/{plugin_name}/resources" diff --git a/vmware_nsx/tests/unit/shell/test_admin_utils.py b/vmware_nsx/tests/unit/shell/test_admin_utils.py index 7c55a3da3a..ac36193cb6 100644 --- a/vmware_nsx/tests/unit/shell/test_admin_utils.py +++ b/vmware_nsx/tests/unit/shell/test_admin_utils.py @@ -415,7 +415,11 @@ class TestNsxpAdminUtils(AbstractTestAdminUtils, "dhcp-config=dumyuuid", "old-tier0=olduuid", "new-tier0=newuuid", - "firewall-match=internal"] + "firewall-match=internal", + "tier0=newuuid", + "metadata-proxy=mdproxy", + "az=default", + "dhcp-profile=openstack_dchp_profile"] # Create some neutron objects for the utilities to run on self._create_router() with self._create_l3_ext_network() as network: