Add function to nsxadmin tool

Add these functions to auto-recover the nsxt tier1 router's binding info,
segment's metadata proxy config and dhcp profile edge cluster binding.

Change-Id: Id4c2d98c9666ba69da06a3375bf8284cd64f5ef4
This commit is contained in:
Yan Wang 2021-08-31 18:07:01 +08:00
parent 83d59be2c8
commit 8b3be7b332
5 changed files with 214 additions and 5 deletions

View File

@ -671,6 +671,10 @@ NSX Policy Plugin
nsxadmin -r networks -o nsx-update-state
- Update the metadata proxy configuration of all segments for specified availability zone::
nsxadmin -r networks -o update-metadata --property metadata-proxy=<id> --property az=<name>
- List all the neutron routers together with their NSX Policy objects and realization state::
nsxadmin -r routers -o list
@ -687,9 +691,17 @@ NSX Policy Plugin
nsxadmin -r routers -o update-nat-firewall-match --property firewall-match=external/internal
- Reconfigure the tier1 routers with tier0 GW at NSX backend and update the network binding neutron DB with new tier0 GW for an availability zone::
nsxadmin -r routers -o recover-tier0 --property tier0=<id> --property az=<name>
- Migrate networks DHCP from MP to Policy (for NSX 3.0 upgrades)::
nsxadmin -r dhcp-binding -o migrate-to-policy --property dhcp-config=<id>
- Bind the specified dhcp profile to the edge clusters of the specified tier0 GW::
nsxadmin -r dhcp-binding -o update-dhcp-profile-edge --property dhcp-profile=<id> --property tier0=<id>
- Update tags on a loadbalancer service
nsxadmin -r lb-services -o nsx-update-tags

View File

@ -143,6 +143,92 @@ def update_admin_state(resource, event, trigger, **kwargs):
nsxpolicy.segment_port.set_admin_state(seg_id, port['id'], False)
@admin_utils.output_header
def update_metadata(resource, event, trigger, **kwargs):
"""
Update the metadata proxy configuration of segments
"""
errmsg = ("Need to specify metadata proxy ID and availability-zone. "
"Add --property metadata-proxy=<id> --property az=<name>")
if not kwargs.get('property'):
LOG.error("%s", errmsg)
return
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
metaproxy = properties.get('metadata-proxy')
az = properties.get('az')
if not metaproxy or not az:
LOG.error("%s", errmsg)
raise SystemExit(errmsg)
nsxpolicy = p_utils.get_connected_nsxpolicy()
try:
nsxpolicy.md_proxy.get(metaproxy)
except Exception as e:
LOG.error("Error while retrieving NSX metadata proxy %s: %s",
metaproxy, e)
raise SystemExit(e)
ctx = context.get_admin_context()
with p_utils.NsxPolicyPluginWrapper() as plugin:
nets = plugin.get_networks(ctx)
for net in nets:
if plugin._network_is_external(ctx, net['id']):
continue
network_az = plugin.get_network_az_by_net_id(ctx, net['id'])
if az == network_az.name:
seg_id = plugin._get_network_nsx_segment_id(ctx, net['id'])
try:
nsxpolicy.segment.update(seg_id,
metadata_proxy_id=metaproxy)
except Exception as e:
LOG.error("Failed to update segment %s metadata proxy"
" configuration: %s",
seg_id, e)
raise SystemExit(e)
else:
LOG.info("Updated segment %s to metadata proxy %s",
seg_id, metaproxy)
LOG.info("Successfully updated all the networks' metadata proxy"
" configuration.")
@admin_utils.output_header
def update_dhcp_profile_edge(resource, event, trigger, **kwargs):
"""
Bind the specified dhcp profile to the edge clusters of tier0 GW
"""
errmsg = ("Need to specify dhcp profile ID and tier0 GW ID. Add "
"--property dhcp-profile=<id> --property tier0=<id>")
if not kwargs.get('property'):
LOG.error("%s", errmsg)
return
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
dhcp_profile = properties.get('dhcp-profile')
tier0 = properties.get('tier0')
if not dhcp_profile or not tier0:
LOG.error("%s", errmsg)
raise SystemExit(errmsg)
nsxpolicy = p_utils.get_connected_nsxpolicy()
try:
nsxpolicy.tier0.get(tier0)
except Exception as e:
LOG.error("Tier0 logical router %s was not found: %s", tier0, e)
raise SystemExit(e)
edge_path = nsxpolicy.tier0.get_edge_cluster_path(tier0)
if edge_path:
try:
nsxpolicy.dhcp_server_config.update(dhcp_profile,
edge_cluster_path=edge_path)
except Exception as e:
LOG.error("Failed to bind dhcp profile %s to edge cluster %s: %s",
dhcp_profile, edge_path, e)
raise SystemExit(e)
else:
LOG.info("Successfully updated dhcp profile %s to edge cluster %s",
dhcp_profile, edge_path)
else:
LOG.error("Tier0 logical router %s miss the edge clusters binding."
"Skip the dhcp profile update action", tier0)
registry.subscribe(update_admin_state,
constants.NETWORKS,
shell.Operations.NSX_UPDATE_STATE.value)
@ -150,3 +236,11 @@ registry.subscribe(update_admin_state,
registry.subscribe(migrate_dhcp_to_policy,
constants.DHCP_BINDING,
shell.Operations.MIGRATE_TO_POLICY.value)
registry.subscribe(update_metadata,
constants.NETWORKS,
shell.Operations.UPDATE_METADATA.value)
registry.subscribe(update_dhcp_profile_edge,
constants.DHCP_BINDING,
shell.Operations.UPDATE_DHCP_EDGE.value)

View File

@ -119,6 +119,94 @@ def update_tier0(resource, event, trigger, **kwargs):
LOG.info("Done.")
@admin_utils.output_header
def recover_tier0(resource, event, trigger, **kwargs):
"""
Reconfigure the tier1 routers with tier0 GW at NSX backend and update the
neutron external network's physical network binding
"""
errmsg = ("Need to specify tier0 ID and availability-zone. "
"Add --property tier0=<id> --property az=<name>")
if not kwargs.get('property'):
LOG.error("%s", errmsg)
return
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
tier0 = properties.get('tier0')
az = properties.get('az')
if not tier0 or not az:
LOG.error("%s", errmsg)
raise SystemExit(errmsg)
# Verify the id of the tier0
nsxpolicy = p_utils.get_connected_nsxpolicy()
try:
nsxpolicy.tier0.get(tier0)
except Exception as e:
LOG.error("An error occurred while retrieving Tier0 gw router %s: %s",
tier0, e)
raise SystemExit(e)
tier0_edge_cluster = nsxpolicy.tier0.get_edge_cluster_path(tier0)
if not tier0_edge_cluster:
LOG.error("Tier0 gw router %s does not have an edge cluster "
"configured", tier0)
return
ctx = context.get_admin_context()
plugin = RoutersPlugin()
neutron_routers = plugin.get_routers(ctx)
if not neutron_routers:
LOG.info("There are not any neutron routers found")
with p_utils.NsxPolicyPluginWrapper() as core_plugin:
for router in neutron_routers:
router_obj = core_plugin._get_router(ctx, router['id'])
router_az = core_plugin._get_router_az_obj(router_obj)
if router_obj.gw_port_id and az == router_az.name:
old_tier0_path = nsxpolicy.tier1.get(router['id']).\
get('tier0_path')
if old_tier0_path:
old_tier0_edge_cluster_path = nsxpolicy.tier0.\
get_edge_cluster_path(old_tier0_path.split('/')[-1])
# Update tier1 routers GW to point to the tier0 in the backend
try:
nsxpolicy.tier1.update(router['id'], tier0=tier0)
except Exception as e:
LOG.error("Failed to update T0 uplink for router %s: %s",
router['id'], e)
raise SystemExit(e)
else:
LOG.info("Updated router %s uplink port", router['id'])
# Update tier1 routers' edge cluster information to new
# tier0's edge cluster only if the tier1 router's old edge
# cluster bind to the same edge cluster of old tier0 router
old_tier1_edge_cluster_path = nsxpolicy.tier1.\
get_edge_cluster_path(router['id'])
if old_tier1_edge_cluster_path and \
(old_tier1_edge_cluster_path ==
old_tier0_edge_cluster_path):
try:
nsxpolicy.tier1.\
set_edge_cluster_path(router['id'],
tier0_edge_cluster)
except Exception as e:
LOG.error("Failed to update router %s edge cluster:"
" %s", router['id'], e)
raise SystemExit(e)
else:
LOG.info("Updated router %s edge cluster",
router['id'])
# Update Neutron external network's physical network binding
nets = core_plugin.get_networks(ctx)
for net in nets:
network_az = core_plugin.get_network_az_by_net_id(ctx, net['id'])
if az == network_az.name and net.get('router:external'):
with ctx.session.begin(subtransactions=True):
bindings = ctx.session.query(nsx_models.TzNetworkBinding).\
filter_by(network_id=net['id']).first()
bindings.phy_uuid = tier0
LOG.info("Updated neutron external network %s binding "
"physical network", net['id'])
LOG.info("Successfully updated all the tier0 GW binding information.")
@admin_utils.output_header
def update_nat_firewall_match(resource, event, trigger, **kwargs):
"""Update the firewall_match value in neutron nat rules with a new value"""
@ -130,7 +218,7 @@ def update_nat_firewall_match(resource, event, trigger, **kwargs):
properties = admin_utils.parse_multi_keyval_opt(kwargs['property'])
firewall_match_str = properties.get('firewall-match')
if (not firewall_match_str or
firewall_match_str.lower() not in ('internal', 'external')):
firewall_match_str.lower() not in ('internal', 'external')):
LOG.error("%s", errmsg)
return
@ -184,6 +272,10 @@ registry.subscribe(update_tier0,
constants.ROUTERS,
shell.Operations.UPDATE_TIER0.value)
registry.subscribe(recover_tier0,
constants.ROUTERS,
shell.Operations.RECOVER_TIER0.value)
registry.subscribe(update_nat_firewall_match,
constants.ROUTERS,
shell.Operations.UPDATE_FIREWALL_MATCH.value)

View File

@ -76,6 +76,9 @@ class Operations(enum.Enum):
VALIDATE = 'validate'
REUSE = 'reuse'
UPDATE_TIER0 = 'update-tier0'
RECOVER_TIER0 = 'recover-tier0'
UPDATE_METADATA = 'update-metadata'
UPDATE_DHCP_EDGE = 'update-dhcp-profile-edge'
UPDATE_FIREWALL_MATCH = 'update-nat-firewall-match'
SET_STATUS_ERROR = 'set-status-error'
CHECK_COMPUTE_CLUSTERS = 'check-compute-clusters'
@ -122,7 +125,8 @@ nsxv3_resources = {
Operations.NSX_UPDATE_RULES.value,
Operations.NSX_UPDATE_DHCP_RELAY.value,
Operations.NSX_ENABLE_STANDBY_RELOCATION.value,
Operations.UPDATE_TIER0.value]),
Operations.UPDATE_TIER0.value,
Operations.RECOVER_TIER0.value]),
constants.DHCP_BINDING: Resource(constants.DHCP_BINDING,
[Operations.LIST.value,
Operations.NSX_RECREATE.value]),
@ -282,12 +286,15 @@ nsxp_resources = {
[Operations.LIST.value]),
constants.NETWORKS: Resource(constants.NETWORKS,
[Operations.LIST.value,
Operations.NSX_UPDATE_STATE.value]),
Operations.NSX_UPDATE_STATE.value,
Operations.UPDATE_METADATA.value]),
constants.DHCP_BINDING: Resource(constants.DHCP_BINDING,
[Operations.MIGRATE_TO_POLICY.value]),
[Operations.MIGRATE_TO_POLICY.value,
Operations.UPDATE_DHCP_EDGE.value]),
constants.ROUTERS: Resource(constants.ROUTERS,
[Operations.LIST.value,
Operations.UPDATE_TIER0.value,
Operations.RECOVER_TIER0.value,
Operations.UPDATE_FIREWALL_MATCH.value]),
constants.LB_SERVICES: Resource(constants.LB_SERVICES,
[Operations.NSX_UPDATE_TAGS.value]),

View File

@ -415,7 +415,11 @@ class TestNsxpAdminUtils(AbstractTestAdminUtils,
"dhcp-config=dumyuuid",
"old-tier0=olduuid",
"new-tier0=newuuid",
"firewall-match=internal"]
"firewall-match=internal",
"tier0=newuuid",
"metadata-proxy=mdproxy",
"az=default",
"dhcp-profile=openstack_dchp_profile"]
# Create some neutron objects for the utilities to run on
self._create_router()
with self._create_l3_ext_network() as network: