diff --git a/.coveragerc b/.coveragerc index fbb4cf113c..297011e10c 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,7 +1,7 @@ [run] branch = True source = vmware_nsx -omit = vmware_nsx/tests/*,vmware_nsx/*dvs*,vmware_nsx/api_replay/*,vmware_nsx/dhcp_meta/*,vmware_nsx/nsxlib/*,vmware_nsx/*lsn*,vmware_nsx/*tv*,vmware_nsx/api_client/*,vmware_nsx/common/profile*,vmware_nsx/shell/nsx_instance_if_migrate*,vmware_nsx/plugins/nsx_v/vshield/vcns.*,vmware_nsx/db/migration/alembic_migrations/* +omit = vmware_nsx/tests/*,vmware_nsx/*dvs*,vmware_nsx/api_replay/*,vmware_nsx/dhcp_meta/*,vmware_nsx/nsxlib/*,vmware_nsx/*lsn*,vmware_nsx/*tv*,vmware_nsx/api_client/*,vmware_nsx/common/profile*,vmware_nsx/shell/nsx_instance_if_migrate*,vmware_nsx/plugins/nsx_v/vshield/vcns.*,vmware_nsx/db/migration/alembic_migrations/*,vmware_nsx/shell/admin/plugins/nsxv3/resources/migration* [report] ignore_errors = True diff --git a/doc/source/admin_util.rst b/doc/source/admin_util.rst index 4c077455a4..367b316058 100644 --- a/doc/source/admin_util.rst +++ b/doc/source/admin_util.rst @@ -592,6 +592,17 @@ Config nsxadmin -r config -o validate +T2P migration +~~~~~~~~~~~~~ + +- Migrate NSX resources and neutron DB from NSX-T (MP) to Policy:: + + nsxadmin -r nsx-migrate-t2p -o import (--verbose) + +- Delete DB tables related to the MP plugin after migration:: + + nsxadmin -r nsx-migrate-t2p -o clean-all + NSXtvd Plugin ------------- @@ -643,6 +654,10 @@ NSX Policy Plugin - Update tags on a loadbalancer service nsxadmin -r lb-services -o nsx-update-tags +- Delete DB tables related to the MP plugin after migration from MP plugin to policy:: + + nsxadmin -r nsx-migrate-t2p -o clean-all + Client Certificate ~~~~~~~~~~~~~~~~~~ diff --git a/lower-constraints.txt b/lower-constraints.txt index d0b9e95b5d..0fe306206e 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -28,6 +28,7 @@ oslo.utils==3.33.0 oslo.vmware==2.17.0 oslotest==3.2.0 osc-lib==1.14.0 +paramiko==2.4.0 pbr==4.0.0 pika-pool==0.1.3 pika==0.10.0 diff --git a/requirements.txt b/requirements.txt index 57bcb655ab..ce1c512b38 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,6 +26,7 @@ oslo.serialization>=2.28.1 # Apache-2.0 oslo.service>=1.31.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oslo.vmware>=2.17.0 # Apache-2.0 +paramiko>=2.4.0 # LGPLv2.1+ PrettyTable<0.8,>=0.7.2 # BSD tooz>=1.58.0 # Apache-2.0 decorator>=4.4.1 # BSD diff --git a/vmware_nsx/common/utils.py b/vmware_nsx/common/utils.py index a04ed3ed59..04dddfeaec 100644 --- a/vmware_nsx/common/utils.py +++ b/vmware_nsx/common/utils.py @@ -104,6 +104,11 @@ def is_nsx_version_3_0_0(nsx_version): version.LooseVersion(v3_const.NSX_VERSION_3_0_0)) +def is_nsx_version_3_1_0(nsx_version): + return (version.LooseVersion(nsx_version) >= + version.LooseVersion(v3_const.NSX_VERSION_3_1_0)) + + def is_nsxv_version_6_2(nsx_version): return (version.LooseVersion(nsx_version) >= version.LooseVersion('6.2')) diff --git a/vmware_nsx/plugins/common_v3/utils.py b/vmware_nsx/plugins/common_v3/utils.py index fa79790a19..2c6645842a 100644 --- a/vmware_nsx/plugins/common_v3/utils.py +++ b/vmware_nsx/plugins/common_v3/utils.py @@ -159,35 +159,44 @@ def get_nsxlib_wrapper(nsx_username=None, nsx_password=None, basic_auth=False, def get_nsxpolicy_wrapper(nsx_username=None, nsx_password=None, - basic_auth=False): + basic_auth=False, conf_path=None): + if not conf_path: + conf_path = cfg.CONF.nsx_p client_cert_provider = None if not basic_auth: # if basic auth requested, dont use cert file even if provided client_cert_provider = get_client_cert_provider( - conf_path=cfg.CONF.nsx_p) + conf_path=conf_path) nsxlib_config = config.NsxLibConfig( - username=nsx_username or cfg.CONF.nsx_p.nsx_api_user, - password=nsx_password or cfg.CONF.nsx_p.nsx_api_password, + username=nsx_username or conf_path.nsx_api_user, + password=nsx_password or conf_path.nsx_api_password, client_cert_provider=client_cert_provider, - retries=cfg.CONF.nsx_p.http_retries, - insecure=cfg.CONF.nsx_p.insecure, - ca_file=cfg.CONF.nsx_p.ca_file, - concurrent_connections=cfg.CONF.nsx_p.concurrent_connections, - http_timeout=cfg.CONF.nsx_p.http_timeout, - http_read_timeout=cfg.CONF.nsx_p.http_read_timeout, - conn_idle_timeout=cfg.CONF.nsx_p.conn_idle_timeout, + retries=conf_path.http_retries, + insecure=conf_path.insecure, + ca_file=conf_path.ca_file, + concurrent_connections=conf_path.concurrent_connections, + http_timeout=conf_path.http_timeout, + http_read_timeout=conf_path.http_read_timeout, + conn_idle_timeout=conf_path.conn_idle_timeout, http_provider=None, - max_attempts=cfg.CONF.nsx_p.retries, - nsx_api_managers=cfg.CONF.nsx_p.nsx_api_managers, + max_attempts=conf_path.retries, + nsx_api_managers=conf_path.nsx_api_managers, plugin_scope=OS_NEUTRON_ID_SCOPE, plugin_tag=NSX_NEUTRON_PLUGIN, plugin_ver=n_version.version_info.release_string(), - dns_nameservers=cfg.CONF.nsx_p.nameservers, - dns_domain=cfg.CONF.nsx_p.dns_domain, - allow_passthrough=cfg.CONF.nsx_p.allow_passthrough, - realization_max_attempts=cfg.CONF.nsx_p.realization_max_attempts, - realization_wait_sec=cfg.CONF.nsx_p.realization_wait_sec) + dns_nameservers=conf_path.nameservers, + dns_domain=conf_path.dns_domain, + allow_passthrough=(conf_path.allow_passthrough + if hasattr(conf_path, 'allow_passthrough') + else False), + realization_max_attempts=(conf_path.realization_max_attempts + if hasattr(conf_path, + 'realization_max_attempts') + else 50), + realization_wait_sec=(conf_path.realization_wait_sec + if hasattr(conf_path, 'realization_wait_sec') + else 1)) return policy.NsxPolicyLib(nsxlib_config) diff --git a/vmware_nsx/plugins/nsx_p/plugin.py b/vmware_nsx/plugins/nsx_p/plugin.py index 823f3cb586..ceab0e0eb7 100644 --- a/vmware_nsx/plugins/nsx_p/plugin.py +++ b/vmware_nsx/plugins/nsx_p/plugin.py @@ -820,13 +820,19 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base): # MP MD proxy when this network is created. # If not - the port will not be found, and it is ok. # Note(asarfaty): In the future this code can be removed. - if not is_external_net and cfg.CONF.nsx_p.allow_passthrough: + # TODO(asarfaty): For migrated networks when the DB was not cleaned up + # This may actually delete a port the policy now control + if (not is_external_net and not is_nsx_net and + cfg.CONF.nsx_p.allow_passthrough): self._delete_nsx_port_by_network(network_id) # Delete the network segment from the backend if not is_external_net and not is_nsx_net: try: self.nsxpolicy.segment.delete(network_id) + # In case of migrated network, a dhcp server config with + # the same id should also be deleted + self.nsxpolicy.dhcp_server_config.delete(network_id) except nsx_lib_exc.ResourceNotFound: # If the resource was not found on the backend do not worry # about it. The conditions has already been logged, so there @@ -3919,7 +3925,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base): rule_entry = self._create_security_group_backend_rule( context, sg_id, rule, secgroup_logging, is_provider_sg=is_provider_sg, - create_related_resource=False) + create_related_resource=True) backend_rules.append(rule_entry) # Update the policy with all the rules. diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py index 0be2caa342..f9829223f9 100644 --- a/vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/l7policy_mgr.py @@ -52,6 +52,7 @@ class EdgeL7PolicyManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server policy_name = utils.get_name_and_uuid(old_policy['name'] or 'policy', old_policy['id']) + short_name = utils.get_name_short_uuid(old_policy['id']) rule_body = lb_utils.convert_l7policy_to_lb_rule( self.core_plugin.nsxpolicy, new_policy) try: @@ -59,6 +60,7 @@ class EdgeL7PolicyManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): new_policy['listener_id'], policy_name, position=new_policy.get('position', 0) - 1, + compare_name_suffix=short_name, **rule_body) except Exception as e: @@ -70,11 +72,11 @@ class EdgeL7PolicyManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): def delete(self, context, policy, completor): vs_client = self.core_plugin.nsxpolicy.load_balancer.virtual_server - policy_name = utils.get_name_and_uuid(policy['name'] or 'policy', - policy['id']) + policy_name = utils.get_name_short_uuid(policy['id']) try: vs_client.remove_lb_rule(policy['listener_id'], - policy_name) + policy_name, + check_name_suffix=True) except nsxlib_exc.ResourceNotFound: pass except nsxlib_exc.ManagerError: diff --git a/vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py b/vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py index d818ec2d99..4cc1c07649 100644 --- a/vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py +++ b/vmware_nsx/services/lbaas/nsx_p/implementation/l7rule_mgr.py @@ -29,6 +29,7 @@ class EdgeL7RuleManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): policy = rule['policy'] policy_name = utils.get_name_and_uuid(policy['name'] or 'policy', policy['id']) + short_name = utils.get_name_short_uuid(policy['id']) if delete: lb_utils.remove_rule_from_policy(rule) else: @@ -39,6 +40,7 @@ class EdgeL7RuleManagerFromDict(base_mgr.NsxpLoadbalancerBaseManager): vs_client.update_lb_rule(policy['listener_id'], policy_name, position=policy.get('position', 0) - 1, + compare_name_suffix=short_name, **rule_body) except Exception as e: with excutils.save_and_reraise_exception(): diff --git a/vmware_nsx/shell/admin/plugins/common/constants.py b/vmware_nsx/shell/admin/plugins/common/constants.py index a4b3e19040..945f055b14 100644 --- a/vmware_nsx/shell/admin/plugins/common/constants.py +++ b/vmware_nsx/shell/admin/plugins/common/constants.py @@ -51,6 +51,7 @@ LB_ADVERTISEMENT = 'lb-advertisement' RATE_LIMIT = 'rate-limit' CLUSTER = 'cluster' ORPHANED_FIREWALL_SECTIONS = 'orphaned-firewall-sections' +NSX_MIGRATE_T_P = 'nsx-migrate-t2p' # NSXV only Resource Constants EDGES = 'edges' diff --git a/vmware_nsx/shell/admin/plugins/nsxp/migration.py b/vmware_nsx/shell/admin/plugins/nsxp/migration.py new file mode 100644 index 0000000000..c064b8aa3e --- /dev/null +++ b/vmware_nsx/shell/admin/plugins/nsxp/migration.py @@ -0,0 +1,31 @@ +# Copyright 2020 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron_lib.callbacks import registry + +from vmware_nsx.shell.admin.plugins.common import constants +from vmware_nsx.shell.admin.plugins.common import utils as admin_utils +from vmware_nsx.shell.admin.plugins.nsxv3.resources import migration +from vmware_nsx.shell import resources as shell + + +@admin_utils.output_header +def cleanup_db_mappings(resource, event, trigger, **kwargs): + """Delete all entries from nsx-t mapping tables in DB""" + return migration.cleanup_db_mappings(resource, event, trigger, **kwargs) + + +registry.subscribe(cleanup_db_mappings, + constants.NSX_MIGRATE_T_P, + shell.Operations.CLEAN_ALL.value) diff --git a/vmware_nsx/shell/admin/plugins/nsxp/resources/utils.py b/vmware_nsx/shell/admin/plugins/nsxp/resources/utils.py index da7982ce43..de968c4fa7 100644 --- a/vmware_nsx/shell/admin/plugins/nsxp/resources/utils.py +++ b/vmware_nsx/shell/admin/plugins/nsxp/resources/utils.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import logging from oslo_config import cfg from neutron.db import l3_dvr_db # noqa @@ -19,7 +20,6 @@ from neutron import manager from neutron_lib import context from neutron_lib.plugins import constants as const from neutron_lib.plugins import directory -from oslo_log import log as logging from vmware_nsx.common import config from vmware_nsx.plugins.common_v3 import utils as v3_utils @@ -41,16 +41,28 @@ def get_nsxp_client(nsx_username=None, nsx_password=None, def get_connected_nsxpolicy(nsx_username=None, nsx_password=None, - use_basic_auth=False): + use_basic_auth=False, conf_path=None, + verbose=False): global _NSXPOLICY - # for non-default agruments, initiate new lib + if not verbose: + # Suppress logs for nsxpolicy init + logging.disable(logging.INFO) + + # for non-default arguments, initiate new lib if nsx_username or use_basic_auth: + if not verbose: + # Return logs to normal + logging.disable(logging.NOTSET) return v3_utils.get_nsxpolicy_wrapper(nsx_username, nsx_password, - use_basic_auth) + use_basic_auth, + conf_path=conf_path) if _NSXPOLICY is None: - _NSXPOLICY = v3_utils.get_nsxpolicy_wrapper() + _NSXPOLICY = v3_utils.get_nsxpolicy_wrapper(conf_path=conf_path) + if not verbose: + # Return logs to normal + logging.disable(logging.NOTSET) return _NSXPOLICY @@ -76,13 +88,21 @@ def get_realization_info(resource, *realization_args): class NsxPolicyPluginWrapper(plugin.NsxPolicyPlugin): - def __init__(self): + def __init__(self, verbose=False): + if not verbose: + # Suppress logs for plugin init + logging.disable(logging.INFO) + # initialize the availability zones config.register_nsxp_azs(cfg.CONF, cfg.CONF.nsx_p.availability_zones) super(NsxPolicyPluginWrapper, self).__init__() self.context = context.get_admin_context() admin_utils._init_plugin_mock_quota() + if not verbose: + # Return logs to normal + logging.disable(logging.NOTSET) + def __enter__(self): directory.add_plugin(const.CORE, self) return self diff --git a/vmware_nsx/shell/admin/plugins/nsxv3/resources/migration.py b/vmware_nsx/shell/admin/plugins/nsxv3/resources/migration.py new file mode 100644 index 0000000000..6b8dd0d3be --- /dev/null +++ b/vmware_nsx/shell/admin/plugins/nsxv3/resources/migration.py @@ -0,0 +1,1447 @@ +# Copyright 2020 VMware, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +import paramiko +import tenacity + +from neutron_lib.callbacks import registry +from neutron_lib import context +from oslo_config import cfg +from oslo_log import log as logging + +from vmware_nsx.common import utils as nsx_utils +from vmware_nsx.db import db +from vmware_nsx.db import nsx_models +from vmware_nsx.plugins.nsx_p import plugin as p_plugin +from vmware_nsx.plugins.nsx_v3 import cert_utils +from vmware_nsx.plugins.nsx_v3 import plugin as v3_plugin +from vmware_nsx.plugins.nsx_v3 import utils as v3_plugin_utils +from vmware_nsx.services.lbaas import lb_const +from vmware_nsx.services.lbaas.nsx_p.implementation import lb_utils +from vmware_nsx.shell.admin.plugins.common import constants +from vmware_nsx.shell.admin.plugins.common import utils as admin_utils +from vmware_nsx.shell.admin.plugins.nsxp.resources import utils as p_utils +from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils +from vmware_nsx.shell import resources as shell + +from vmware_nsxlib.v3 import core_resources as nsx_resources +from vmware_nsxlib.v3 import exceptions as nsxlib_exc +from vmware_nsxlib.v3 import load_balancer as nsxlib_lb +from vmware_nsxlib.v3.policy import constants as policy_constants +from vmware_nsxlib.v3.policy import core_resources as policy_resources +from vmware_nsxlib.v3.policy import utils as policy_utils + +LOG = logging.getLogger(__name__) +POLICY_API_STATUS_FAILED = 'FAILED' +POLICY_API_STATUS_SUCCESS = 'SUCCESS' +POLICY_API_STATUS_IN_PROGRESS = 'PAUSING' +POLICY_API_STATUS_PAUSED = 'PAUSED' +POLICY_API_STATUS_READY = 'NOT_STARTED' + +STATUS_ALLOW_MIGRATION_REQ = set([ + POLICY_API_STATUS_SUCCESS, + POLICY_API_STATUS_READY +]) + +MIGRATE_LIMIT_NO_LIMIT = 0 +MIGRATE_LIMIT_TIER0 = 1 +MIGRATE_LIMIT_TIER0_PORTS = 1000 +MIGRATE_LIMIT_TIER1 = 500 +MIGRATE_LIMIT_TIER1_PORTS = 5 +MIGRATE_LIMIT_NAT = 1500 +MIGRATE_LIMIT_DHCP_SERVER = 1500 +MIGRATE_LIMIT_MD_PROXY = 1500 +MIGRATE_LIMIT_SWITCH_PROFILE = 1500 +MIGRATE_LIMIT_LOGICAL_SWITCH = 500 +MIGRATE_LIMIT_LOGICAL_PORT = 1500 +MIGRATE_LIMIT_NS_GROUP = 2000 +MIGRATE_LIMIT_SECTION_AND_RULES = 1500 +MIGRATE_LIMIT_LB_SERVICE = 2000 +MIGRATE_LIMIT_LB_VIRTUAL_SERVER = 2000 +MIGRATE_LIMIT_LB_MONITOR = 1500 +MIGRATE_LIMIT_LB_POOL = 1500 +MIGRATE_LIMIT_LB_APP_PROFILE = 2000 +MIGRATE_LIMIT_LB_PER_PROFILE = 2000 + +COMPONENT_STATUS_ALREADY_MIGRATED = 1 +COMPONENT_STATUS_OK = 2 + +ROLLBACK_DATA = [] +EDGE_FW_SEQ = 1 +DFW_SEQ = 1 + + +def start_migration_process(nsxlib): + """Notify the manager that the migration process is starting""" + return nsxlib.client.url_post( + "migration/mp-to-policy/workflow?action=INITIATE", None) + + +def end_migration_process(nsxlib): + """Notify the manager that the migration process has ended""" + return nsxlib.client.url_post( + "migration/mp-to-policy/workflow?action=DONE", None) + + +def send_migration_request(nsxlib, body): + return nsxlib.client.url_post("migration/mp-to-policy", body) + + +def send_rollback_request(nsxlib, body): + #TODO(asarfaty): Rollback can take very long, especially for firewall + # sections. In this case backup-restore might be better + return nsxlib.client.url_post("migration/mp-to-policy/rollback", body) + + +def send_migration_plan_action(nsxlib, action): + return nsxlib.client.url_post("migration/plan?action=%s" % action, None) + + +def get_migration_status(nsxlib, silent=False): + return nsxlib.client.get("migration/status-summary", + silent=silent) + + +def change_migration_service_status(start=True, nsxlib=None): + """Enable/Disable the migration service on the NSX manager + using SSH command + """ + # TODO(asarfaty): Is there an api for that? or use sshpass + action = 'start' if start else 'stop' + command = "%s service migration-coordinator" % action + LOG.info("\nGoing to %s the migration service on the NSX manager by " + "SSHing the manager and running '%s'", action, command) + host = cfg.CONF.nsx_v3.nsx_api_managers[0] + user = cfg.CONF.nsx_v3.nsx_api_user[0] + passwd = cfg.CONF.nsx_v3.nsx_api_password[0] + + ssh = paramiko.SSHClient() + ssh.load_system_host_keys() + ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh.connect(host, username=user, password=passwd) + ssh.exec_command(command) + + if start and nsxlib: + LOG.info("Waiting for the service to be up...") + + @tenacity.retry(reraise=True, + retry=tenacity.retry_if_exception_type(Exception), + wait=tenacity.wait_exponential(multiplier=0.5, max=2), + stop=tenacity.stop_after_attempt( + cfg.CONF.nsx_v3.retries)) + def get_migration_status_with_retry(nsxlib): + get_migration_status(nsxlib, silent=True) + + get_migration_status_with_retry(nsxlib) + LOG.info("The service is up") + + +def ensure_migration_state_ready(nsxlib, with_abort=False, verbose=False): + try: + status = get_migration_status(nsxlib, silent=True) + except nsxlib_exc.CannotConnectToServer as e: + if verbose: + LOG.info("Failed to get migration status: %s", e) + if with_abort: + change_migration_service_status(start=True, nsxlib=nsxlib) + return ensure_migration_state_ready(nsxlib, verbose=verbose) + return False + + if status["overall_migration_status"] not in STATUS_ALLOW_MIGRATION_REQ: + LOG.error("Migration status not ready: %s", status) + if with_abort: + send_migration_plan_action(nsxlib, 'abort') + return ensure_migration_state_ready( + nsxlib, with_abort=with_abort, verbose=verbose) + else: + return False + + return True + + +def verify_component_status(nsxlib, component_number): + status = get_migration_status(nsxlib) + if (status['component_status'][component_number]['status'] == + POLICY_API_STATUS_FAILED): + # If it's a duplicate migration request, pass the verification + if ('is duplicate or already migrated' in + status['component_status'][component_number]['details'] and + component_number == 0): + # Success that indicates resource migration is already done + return COMPONENT_STATUS_ALREADY_MIGRATED + # bad state. abort, mark as fail, and go to next request + raise Exception("The migration server returned with FAILURE status. " + "Details: %s", status) + # Success + return COMPONENT_STATUS_OK + + +def wait_on_overall_migration_status_to_pause(nsxlib): + while True: + status = get_migration_status(nsxlib) + migration_status = status.get('overall_migration_status') + if (migration_status == POLICY_API_STATUS_PAUSED or + migration_status == POLICY_API_STATUS_SUCCESS): + break + time.sleep(1) + + +def get_resource_migration_data(nsxlib_resource, neutron_id_tags, + resource_type, resource_condition=None, + printable_name=None, policy_resource_get=None, + policy_id_callback=None, + metadata_callback=None, + skip_policy_path_check=False, + verbose=False): + if not printable_name: + printable_name = resource_type + if verbose: + LOG.info("Getting data for MP %s", printable_name) + + resources = nsxlib_resource.list() + if not isinstance(resources, list): + # the nsxlib resources list return inconsistent type of result + resources = resources.get('results', []) + entries = [] + for resource in resources: + name_or_id = resource.get('display_name', resource['id']) + policy_id = resource['id'] + # Go over tags and find the neutron id + neutron_id = None + found_policy_path = False + for tag in resource.get('tags', []): + if tag['scope'] == 'policyPath': + # This is already a policy resource + found_policy_path = True + if neutron_id_tags and tag['scope'] in neutron_id_tags: + neutron_id = tag['tag'] + if not skip_policy_path_check and found_policy_path: + if verbose: + LOG.info("Skipping %s %s as it is already a policy " + "resource", printable_name, name_or_id) + continue + if neutron_id_tags: + if not neutron_id: + # Not a neutron resource + if verbose: + LOG.info("Skipping %s %s as it is not a neutron resource", + printable_name, name_or_id) + continue + policy_id = neutron_id + if resource_condition: + if not resource_condition(resource): + if verbose: + LOG.info("Skipping %s %s as it does not match the neutron " + "condition", printable_name, name_or_id) + continue + if policy_id_callback: + # Callback to change the policy id + policy_id = policy_id_callback(resource, policy_id) + if policy_id and policy_resource_get: + # filter out resources that already exit on policy! + try: + policy_resource_get(policy_id, silent=True) + except nsxlib_exc.ResourceNotFound: + pass + else: + if verbose: + LOG.info("Skipping %s %s as it already exists on the " + "policy backend", printable_name, name_or_id) + continue + if verbose: + LOG.info("Adding data for %s manager-id %s, policy-id %s", + printable_name, resource['id'], policy_id) + entry = {'manager_id': resource['id']} + if policy_id: + entry['policy_id'] = policy_id + if metadata_callback: + metadata_callback(entry, policy_id, resource) + entries.append(entry) + return entries + + +def migrate_objects(nsxlib, data, use_admin=False, verbose=False): + if not ensure_migration_state_ready(nsxlib, verbose=verbose): + raise Exception("The migration server is not ready") + + if verbose: + LOG.info("Migrating %d %s objects: %s", len(data['resource_ids']), + data['type'], data) + else: + LOG.info("Migrating %d %s objects", len(data['resource_ids']), + data['type']) + + migration_body = {"migration_data": [data]} + + # Update the principal identity for the policy resources + # use 'admin' for predefined objects, and the opestack configured + # user/identity for openstack resources + if use_admin: + user = 'admin' + elif cfg.CONF.nsx_v3.nsx_use_client_auth: + user = cert_utils.NSX_OPENSTACK_IDENTITY + else: + user = cfg.CONF.nsx_v3.nsx_api_user[0] + migration_body['setup_details'] = { + 'principal_identity': user} + + send_migration_request(nsxlib, migration_body) + # send the start action + send_migration_plan_action(nsxlib, 'start') + + # wait until the overall_migration_status is SUCCESS + wait_on_overall_migration_status_to_pause(nsxlib) + + # verify first component status + success_code = verify_component_status(nsxlib, 0) + if success_code == COMPONENT_STATUS_ALREADY_MIGRATED: + return True + + # send the continue action + send_migration_plan_action(nsxlib, 'continue') + + # wait until the overall_migration_status is SUCCESS + wait_on_overall_migration_status_to_pause(nsxlib) + + # verify second component status (Will raise in case of error) + try: + verify_component_status(nsxlib, 1) + except Exception as e: + raise e + else: + global ROLLBACK_DATA + ROLLBACK_DATA.append(data) + + return True + + +def migrate_resource(nsxlib, resource_type, entries, + limit=MIGRATE_LIMIT_NO_LIMIT, + count_internals=False, use_admin=False, + verbose=False): + # Call migrate_resource with the part of resources we need by the limit + if not entries: + LOG.info("No %s to migrate", resource_type) + return + + LOG.info("Going to migrate %d %s objects in groups of max %s", + len(entries), resource_type, limit) + + if limit == MIGRATE_LIMIT_NO_LIMIT: + migrate_objects(nsxlib, {'type': resource_type, + 'resource_ids': entries}, + use_admin=use_admin, + verbose=verbose) + else: + if count_internals: + # Limit the total number of resources, including internal ones + counter = 0 + entries_to_migrate = [] + for index in range(0, len(entries)): + addition_size = 1 + len(entries[index].get('linked_ids', [])) + if addition_size > limit: + # Unsupported size of resource + raise Exception("%s size is over the allowed limit of " + "%s" % (resource_type, limit)) + if counter + addition_size > limit: + # Migrate what was accumulated so far + migrate_objects(nsxlib, + {'type': resource_type, + 'resource_ids': entries_to_migrate}, + use_admin=use_admin, + verbose=verbose) + # Start a new accumulation + counter = addition_size + entries_to_migrate = [entries[index]] + else: + # Keep accumulating + counter = counter + addition_size + entries_to_migrate.append(entries[index]) + if entries_to_migrate: + # Migrate the left overs + migrate_objects(nsxlib, + {'type': resource_type, + 'resource_ids': entries_to_migrate}, + use_admin=use_admin, + verbose=verbose) + else: + for index in range(0, len(entries), limit): + migrate_objects(nsxlib, + {'type': resource_type, + 'resource_ids': entries[index:index + limit]}, + use_admin=use_admin, + verbose=verbose) + + +def get_configured_values(plugin, az_attribute): + values = [] + for az in plugin.get_azs_list(): + values.append(getattr(az, az_attribute)) + return values + + +def migrate_tier0s(nsxlib, nsxpolicy, plugin, verbose=False): + # First prepare a list of neutron related tier0s from the config + neutron_t0s = get_configured_values(plugin, '_default_tier0_router') + # Add tier0s used specifically in external networks + ctx = context.get_admin_context() + with ctx.session.begin(subtransactions=True): + bindings = ctx.session.query( + nsx_models.TzNetworkBinding).filter_by( + binding_type='l3_ext').all() + for bind in bindings: + if bind.phy_uuid not in neutron_t0s: + neutron_t0s.append(bind.phy_uuid) + + def cond(resource): + return (resource.get('router_type', '') == 'TIER0' and + resource.get('id') in neutron_t0s) + + entries = get_resource_migration_data( + nsxlib.logical_router, None, + 'TIER0', resource_condition=cond, + policy_resource_get=nsxpolicy.tier0.get, + verbose=verbose) + migrate_resource(nsxlib, 'TIER0', entries, MIGRATE_LIMIT_TIER0, + use_admin=True, verbose=verbose) + migrated_tier0s = [entry['manager_id'] for entry in entries] + + # Create a list of public switches connected to the tier0s to migrate later + public_switches = [] + for tier0 in neutron_t0s: + uplink_port = nsxlib.logical_router_port.get_tier0_uplink_port(tier0) + if uplink_port: + # Get the external LS id from the uplink port + port_id = uplink_port['linked_logical_switch_port_id']['target_id'] + port = nsxlib.logical_port.get(port_id) + public_switches.append(port['logical_switch_id']) + + return public_switches, migrated_tier0s + + +def is_neutron_resource(resource): + # Return True if the resource has the neutron marking tag + for tag in resource.get('tags', []): + if tag.get('scope') == 'os-api-version': + return True + return False + + +def migrate_switch_profiles(nsxlib, nsxpolicy, plugin, verbose=False): + """Return all types of neutron switching profiles""" + + # Build a condition for each type of switching profiles. + # Note(asarfaty): system owned profiles should also be migrated as they are + # missing from policy + + # Include switch profiles that are in the nsx.ini + conf_profiles = get_configured_values(plugin, 'switching_profiles') + # Black list neuron & system profiles that should not be migrated + names_black_list = [v3_plugin_utils.NSX_V3_DHCP_PROFILE_NAME, + 'ServiceInsertion_MacManagement_Profile'] + + def get_cond(resource_type): + def cond(resource): + return (resource.get('resource_type') == resource_type and + resource.get('display_name') not in names_black_list and + (resource.get('id') in conf_profiles or + resource.get('_system_owned', True) or + is_neutron_resource(resource))) + return cond + + def get_policy_id_callback(res, policy_id): + # In case of plugin init profiles: give it the id the policy plugin + # will use + mapping = {v3_plugin.NSX_V3_MAC_LEARNING_PROFILE_NAME: + p_plugin.MAC_DISCOVERY_PROFILE_ID, + v3_plugin_utils.NSX_V3_PSEC_PROFILE_NAME: + p_plugin.SPOOFGUARD_PROFILE_ID} + + if mapping.get(res.get('display_name')): + return mapping[res['display_name']] + + # QoS profiles should get the neutron policy id + for tag in res.get('tags', []): + if tag['scope'] == 'os-neutron-qos-id': + policy_id = tag['tag'] + + return policy_id + + entries = get_resource_migration_data( + nsxlib.switching_profile, None, + 'SPOOFGUARD_PROFILES', + resource_condition=get_cond( + nsx_resources.SwitchingProfileTypes.SPOOF_GUARD), + policy_resource_get=nsxpolicy.spoofguard_profile.get, + policy_id_callback=get_policy_id_callback, + verbose=verbose) + migrate_resource(nsxlib, 'SPOOFGUARD_PROFILES', entries, + MIGRATE_LIMIT_SWITCH_PROFILE, verbose=verbose) + + entries = get_resource_migration_data( + nsxlib.switching_profile, None, + 'MACDISCOVERY_PROFILES', + resource_condition=get_cond( + nsx_resources.SwitchingProfileTypes.MAC_LEARNING), + policy_resource_get=nsxpolicy.mac_discovery_profile.get, + policy_id_callback=get_policy_id_callback, + verbose=verbose) + migrate_resource(nsxlib, 'MACDISCOVERY_PROFILES', entries, + MIGRATE_LIMIT_SWITCH_PROFILE, verbose=verbose) + + entries = get_resource_migration_data( + nsxlib.switching_profile, None, + 'SEGMENT_SECURITY_PROFILES', + resource_condition=get_cond( + nsx_resources.SwitchingProfileTypes.SWITCH_SECURITY), + policy_resource_get=nsxpolicy.segment_security_profile.get, + policy_id_callback=get_policy_id_callback, + verbose=verbose) + migrate_resource(nsxlib, 'SEGMENT_SECURITY_PROFILES', entries, + MIGRATE_LIMIT_SWITCH_PROFILE, verbose=verbose) + + entries = get_resource_migration_data( + nsxlib.switching_profile, None, + 'QOS_PROFILES', + resource_condition=get_cond( + nsx_resources.SwitchingProfileTypes.QOS), + policy_resource_get=nsxpolicy.qos_profile.get, + policy_id_callback=get_policy_id_callback, + verbose=verbose) + migrate_resource(nsxlib, 'QOS_PROFILES', entries, + MIGRATE_LIMIT_SWITCH_PROFILE, verbose=verbose) + + entries = get_resource_migration_data( + nsxlib.switching_profile, None, + 'IPDISCOVERY_PROFILES', + resource_condition=get_cond( + nsx_resources.SwitchingProfileTypes.IP_DISCOVERY), + policy_resource_get=nsxpolicy.ip_discovery_profile.get, + policy_id_callback=get_policy_id_callback, + verbose=verbose) + migrate_resource(nsxlib, 'IPDISCOVERY_PROFILES', entries, + MIGRATE_LIMIT_SWITCH_PROFILE, verbose=verbose) + + +def migrate_md_proxies(nsxlib, nsxpolicy, plugin, verbose=False): + neutron_md = get_configured_values(plugin, '_native_md_proxy_uuid') + + def cond(resource): + return resource.get('id') in neutron_md + + entries = get_resource_migration_data( + nsxlib.native_md_proxy, None, + 'METADATA_PROXY', + resource_condition=cond, + policy_resource_get=nsxpolicy.md_proxy.get, + verbose=verbose) + migrate_resource(nsxlib, 'METADATA_PROXY', entries, + MIGRATE_LIMIT_MD_PROXY, verbose=verbose) + + +def migrate_networks(nsxlib, nsxpolicy, public_switches, verbose=False): + + # Get a list of nsx-net provider networks to migrate + # Those networks have no tags, and should keep the same id in policy + nsx_networks = [] + ctx = context.get_admin_context() + with ctx.session.begin(subtransactions=True): + bindings = ctx.session.query( + nsx_models.TzNetworkBinding).filter_by( + binding_type=nsx_utils.NsxV3NetworkTypes.NSX_NETWORK).all() + for bind in bindings: + nsx_networks.append(bind.phy_uuid) + + def cond(resource): + return (resource.get('id', '') in nsx_networks or + resource.get('id', '') in public_switches or + is_neutron_resource(resource)) + + def get_policy_id(resource, policy_id): + if resource['id'] in nsx_networks: + # Keep original ID + return resource['id'] + if resource['id'] in public_switches: + # Keep original ID + return resource['id'] + for tag in resource.get('tags', []): + # Use the neutron ID + if tag['scope'] == 'os-neutron-net-id': + return tag['tag'] + + entries = get_resource_migration_data( + nsxlib.logical_switch, [], + 'LOGICAL_SWITCH', + resource_condition=cond, + policy_resource_get=nsxpolicy.segment.get, + policy_id_callback=get_policy_id, + verbose=verbose) + migrate_resource(nsxlib, 'LOGICAL_SWITCH', entries, + MIGRATE_LIMIT_LOGICAL_SWITCH, verbose=verbose) + + +def migrate_ports(nsxlib, nsxpolicy, plugin, verbose=False): + # For nsx networks support, keep a mapping of neutron id and MP id + nsx_networks = {} + ctx = context.get_admin_context() + with ctx.session.begin(subtransactions=True): + bindings = ctx.session.query( + nsx_models.TzNetworkBinding).filter_by( + binding_type='nsx-net').all() + for bind in bindings: + nsx_networks[bind.network_id] = bind.phy_uuid + + def get_policy_port(port_id, silent=False): + # Get the segment id from neutron + ctx = context.get_admin_context() + neutron_port = plugin.get_port(ctx, port_id) + net_id = neutron_port['network_id'] + if net_id in nsx_networks: + segment_id = nsx_networks[net_id] + else: + segment_id = net_id + return nsxpolicy.segment_port.get(segment_id, port_id, silent=silent) + + def add_metadata(entry, policy_id, resource): + # Add binding maps with 'DEFAULT' key + entry['metadata'] = [{'key': 'security-profile-binding-maps-id', + 'value': policy_resources.DEFAULT_MAP_ID}, + {'key': 'discovery-profile-binding-maps-id', + 'value': policy_resources.DEFAULT_MAP_ID}, + {'key': 'qos-profile-binding-maps-id', + 'value': policy_resources.DEFAULT_MAP_ID}] + + entries = get_resource_migration_data( + nsxlib.logical_port, ['os-neutron-port-id'], + 'LOGICAL_PORT', + policy_resource_get=get_policy_port, + metadata_callback=add_metadata, + verbose=verbose) + migrate_resource(nsxlib, 'LOGICAL_PORT', entries, + MIGRATE_LIMIT_LOGICAL_PORT, verbose=verbose) + + +def migrate_routers(nsxlib, nsxpolicy, verbose=False): + + entries = get_resource_migration_data( + nsxlib.logical_router, + ['os-neutron-router-id'], + 'TIER1', + policy_resource_get=nsxpolicy.tier1.get, + verbose=verbose) + migrate_resource(nsxlib, 'TIER1', entries, MIGRATE_LIMIT_TIER1, + verbose=verbose) + migrated_routers = [entry['manager_id'] for entry in entries] + return migrated_routers + + +def _get_subnet_by_cidr(subnets, cidr): + for subnet in subnets: + if subnet['cidr'] == cidr: + return subnet['id'] + + +def migrate_routers_config(nsxlib, nsxpolicy, plugin, migrated_routers, + verbose=False): + """Migrate advanced configuration of neutron Tier-1s + This will use the list of Tier-1s migrated earlier + """ + # Migrate all the centralized router ports and static routes for tier1 + # routers without specifying ids + def get_policy_id(resource, policy_id): + # No policy id needed here + return + + def cond(resource): + # Import ports only for the routers that were currently migrated + # because there is no easy way to verify what was already migrated + return resource['id'] in migrated_routers + + def add_metadata(entry, policy_id, resource): + # Add router interfaces Ids + ctx = context.get_admin_context() + metadata = [] + mp_rtr_id = resource['id'] + router_ports = nsxlib.logical_router_port.get_by_router_id(mp_rtr_id) + for port in router_ports: + if 'linked_logical_switch_port_id' in port: + lsp_id = port['linked_logical_switch_port_id']['target_id'] + lsp = nsxlib.logical_port.get(lsp_id) + ls_id = lsp['logical_switch_id'] + if ls_id: + neutron_net_ids = plugin._get_neutron_net_ids_by_nsx_id( + ctx, ls_id) + if neutron_net_ids: + # Should be only 1 + metadata.append({'key': port['id'], + 'value': neutron_net_ids[0]}) + # Add static routes ids + static_routes = nsxlib.logical_router.list_static_routes( + mp_rtr_id)['results'] + for route in static_routes: + policy_id = "%s-%s" % (route['network'].replace('/', '_'), + route['next_hops'][0]['ip_address']) + metadata.append({'key': route['id'], + 'value': policy_id}) + + # Add locale-service id as -0 + if resource.get('edge_cluster_id'): + policy_id = None + for tag in resource.get('tags', []): + if tag['scope'] == 'os-neutron-router-id': + policy_id = tag['tag'] + if policy_id: + metadata.append({'key': 'localeServiceId', + 'value': "%s-0" % policy_id}) + + entry['metadata'] = metadata + + entries = get_resource_migration_data( + nsxlib.logical_router, + ['os-neutron-router-id'], + 'TIER1_LOGICAL_ROUTER_PORT', + policy_id_callback=get_policy_id, + resource_condition=cond, + metadata_callback=add_metadata, + skip_policy_path_check=True, + verbose=verbose) + migrate_resource(nsxlib, 'TIER1_LOGICAL_ROUTER_PORT', entries, + MIGRATE_LIMIT_TIER1_PORTS, verbose=verbose) + + # Migrate NAT rules per neutron tier1 + entries = [] + tier1s = nsxlib.logical_router.list()['results'] + ctx = context.get_admin_context() + for tier1 in tier1s: + # skip routers that were not migrated in this script call + tier1_mp_id = tier1['id'] + if tier1_mp_id not in migrated_routers: + continue + # skip non-neutron routers + tier1_neutron_id = None + for tag in tier1.get('tags', []): + if tag['scope'] == 'os-neutron-router-id': + tier1_neutron_id = tag['tag'] + break + if not tier1_neutron_id: + continue + # Migrate each existing NAT rule, with the parameters the policy + # plugin would have set + router_subnets = plugin._load_router_subnet_cidrs_from_db( + ctx, tier1_neutron_id) + nat_rules = nsxlib.logical_router.list_nat_rules( + tier1_mp_id)['results'] + for rule in nat_rules: + # NO_DNAT rules for subnets + if rule['action'] == 'NO_DNAT': + seq_num = p_plugin.NAT_RULE_PRIORITY_GW + cidr = rule['match_destination_network'] + subnet_id = _get_subnet_by_cidr(router_subnets, cidr) + if not subnet_id: + LOG.error("Could not find subnet with cidr %s matching " + "NO_DNAT rule %s tier1 %s", + cidr, rule['id'], tier1_neutron_id) + continue + policy_id = 'ND-' + subnet_id + # SNAT rules for subnet or fip + elif rule['action'] == 'SNAT': + cidr = rule['match_source_network'] + if '/' in cidr: + seq_num = p_plugin.NAT_RULE_PRIORITY_GW + subnet_id = _get_subnet_by_cidr(router_subnets, cidr) + if not subnet_id: + LOG.error("Could not find subnet with cidr %s " + "matching SNAT rule %s tier1 %s", + cidr, rule['id'], tier1_neutron_id) + continue + policy_id = 'S-' + subnet_id + else: + # FIP rule + seq_num = p_plugin.NAT_RULE_PRIORITY_FIP + fip_ip = rule['translated_network'] + filters = {'floating_ip_address': [fip_ip]} + fips = plugin.get_floatingips(ctx, filters) + if not fips: + LOG.error("Could not find FIP with ip %s matching " + "SNAT rule %s tier1 %s", + fip_ip, rule['id'], tier1_neutron_id) + continue + policy_id = 'S-' + fips[0]['id'] + # DNAT rules for fip + elif rule['action'] == 'DNAT': + # FIP rule + seq_num = p_plugin.NAT_RULE_PRIORITY_FIP + fip_ip = rule['match_destination_network'] + filters = {'floating_ip_address': [fip_ip]} + fips = plugin.get_floatingips(ctx, filters) + if not fips: + LOG.error("Could not find FIP with ip %s matching DNAT " + "rule %s tier1 %s", + fip_ip, rule['id'], tier1_neutron_id) + continue + policy_id = 'D-' + fips[0]['id'] + else: + LOG.error("Unknown NAT action %s for rule %s tier1 %s", + rule['action'], rule['id'], tier1_neutron_id) + continue + + entry = {'manager_id': rule['id'], + 'policy_id': policy_id, + 'metadata': [{'key': 'SEQUENCE_NUMBER', + 'value': seq_num}], + 'linked_ids': [{'key': 'TIER1', + 'value': tier1_mp_id}]} + entries.append(entry) + migrate_resource(nsxlib, 'NAT', entries, + MIGRATE_LIMIT_NAT, verbose=verbose) + + +def migrate_tier0_config(nsxlib, nsxpolicy, tier0s, verbose=None): + """Migrate ports and config for the already migrated Tier0s""" + + entries = [] + for tier0 in tier0s: + uplink_port = nsxlib.logical_router_port.get_tier0_uplink_port(tier0) + if uplink_port: + entries.append({'manager_id': uplink_port['id']}) + + migrate_resource(nsxlib, 'TIER0_LOGICAL_ROUTER_PORT', entries, + MIGRATE_LIMIT_TIER0_PORTS, use_admin=True, + verbose=verbose) + + def get_policy_id(resource, policy_id): + # No policy id needed here + return + + def cond(resource): + # Import config only for the routers that were currently migrated + # because there is no easy way to verify what was already migrated + return resource['id'] in tier0s + + entries = get_resource_migration_data( + nsxlib.logical_router, [], + 'TIER0_LOGICAL_ROUTER_CONFIG', + policy_id_callback=get_policy_id, + resource_condition=cond, + skip_policy_path_check=True, + verbose=verbose) + migrate_resource(nsxlib, 'TIER0_LOGICAL_ROUTER_CONFIG', entries, + MIGRATE_LIMIT_TIER0, use_admin=True, + verbose=verbose) + + +def migrate_groups(nsxlib, nsxpolicy, verbose=False): + """Migrate NS groups of neutron defined security groups and predefined at + plugin init + """ + def get_policy_id_callback(res, policy_id): + # In case of plugin init groups: give it the id the policy plugin + # will use + if res.get('display_name') == \ + v3_plugin.NSX_V3_FW_DEFAULT_NS_GROUP: + return p_plugin.NSX_P_DEFAULT_GROUP + + if res.get('display_name') == \ + v3_plugin.NSX_V3_EXCLUDED_PORT_NSGROUP_NAME: + return p_plugin.NSX_P_EXCLUDE_LIST_GROUP + + return policy_id + + def get_policy_group(group_id, silent=False): + return nsxpolicy.group.get(policy_constants.DEFAULT_DOMAIN, group_id, + silent=silent) + + entries = get_resource_migration_data( + nsxlib.ns_group, + ['os-neutron-secgr-id', 'os-neutron-id'], + 'NS_GROUP', + policy_resource_get=get_policy_group, + policy_id_callback=get_policy_id_callback, + verbose=verbose) + migrate_resource(nsxlib, 'NS_GROUP', entries, MIGRATE_LIMIT_NS_GROUP, + verbose=verbose) + + +def migrate_dfw_sections(nsxlib, nsxpolicy, plugin, verbose=False): + def get_policy_id_callback(res, policy_id): + # In case of plugin init section: give it the id the policy plugin + # will use + if res.get('display_name') == \ + v3_plugin.NSX_V3_FW_DEFAULT_SECTION: + return p_plugin.NSX_P_DEFAULT_SECTION + + return policy_id + + def cond(resource): + return (resource.get('enforced_on') == 'VIF' and + resource.get('category') == 'Default' and + resource.get('section_type') == 'LAYER3' and + not resource.get('is_default') and + # Migrate only DFW sections only and no edge FW sections + resource['applied_tos'][0].get('target_type', '') == 'NSGroup') + + def add_metadata(entry, policy_id, resource): + # Add category, sequence, domain, and rule ids + ctx = context.get_admin_context() + category = p_plugin.NSX_P_REGULAR_SECTION_CATEGORY + if policy_id == p_plugin.NSX_P_DEFAULT_SECTION: + category = p_plugin.NSX_P_DEFAULT_SECTION_CATEGORY + else: + sg = plugin.get_security_group(ctx, policy_id) + provider = sg.get('provider') + if provider: + category = p_plugin.NSX_P_PROVIDER_SECTION_CATEGORY + + global DFW_SEQ + metadata = [{'key': "category", 'value': category}, + {'key': "sequence", 'value': str(DFW_SEQ)}] + DFW_SEQ = DFW_SEQ + 1 + + # Add the rules + rules = nsxlib.firewall_section.get_rules(resource['id'])['results'] + linked_ids = [] + seq = 1 + for rule in rules: + linked_ids.append({'key': rule['id'], 'value': str(seq)}) + if policy_id == p_plugin.NSX_P_DEFAULT_SECTION: + # Default section rule ids are their seq numbers + linked_ids.append({'key': "%s-policyid" % rule['id'], + 'value': seq}) + else: + # The display name of the MP rule is the neutron id, and this + # will become the policy id + linked_ids.append({'key': "%s-policyid" % rule['id'], + 'value': rule['display_name']}) + seq = seq + 1 + entry['metadata'] = metadata + entry['linked_ids'] = linked_ids + + def get_policy_section(sec_id, silent=False): + return nsxpolicy.comm_map.get(policy_constants.DEFAULT_DOMAIN, sec_id, + silent=silent) + + entries = get_resource_migration_data( + nsxlib.firewall_section, + ['os-neutron-secgr-id', 'os-neutron-id'], + 'DFW_SECTION', resource_condition=cond, + policy_resource_get=get_policy_section, + policy_id_callback=get_policy_id_callback, + metadata_callback=add_metadata, + verbose=verbose) + migrate_resource(nsxlib, 'DFW_SECTION', entries, + MIGRATE_LIMIT_SECTION_AND_RULES, + count_internals=True, + verbose=verbose) + + +def migrate_dhcp_servers(nsxlib, nsxpolicy, verbose=False): + # Each MP DHCP server will be migrated to a policy DHCP server config + # which will be used by a segment later. It will get the neutron network id + entries = get_resource_migration_data( + nsxlib.dhcp_server, + ['os-neutron-net-id'], + 'DHCP_SERVER', + policy_resource_get=nsxpolicy.dhcp_server_config.get, + verbose=verbose) + migrate_resource(nsxlib, 'DHCP_SERVER', entries, + MIGRATE_LIMIT_DHCP_SERVER, + count_internals=True, + verbose=verbose) + + +def migrate_lb_resources(nsxlib, nsxpolicy, verbose=False): + migrate_lb_monitors(nsxlib, nsxpolicy, verbose=verbose) + migrate_lb_pools(nsxlib, nsxpolicy, verbose=verbose) + migrate_lb_profiles(nsxlib, nsxpolicy, verbose=verbose) + migrate_lb_listeners(nsxlib, nsxpolicy, verbose=verbose) + migrate_lb_services(nsxlib, nsxpolicy, verbose=verbose) + + +def _migrate_lb_resource(nsxlib, nsxpolicy, neutron_tag, api_name, + migration_name, limit, + policy_api_name=None, + policy_id_callback=None, verbose=False): + if not policy_api_name: + policy_api_name = api_name + entries = get_resource_migration_data( + getattr(nsxlib.load_balancer, api_name), + [neutron_tag], + migration_name, + policy_resource_get=getattr(nsxpolicy.load_balancer, + policy_api_name).get, + policy_id_callback=policy_id_callback, + verbose=verbose) + migrate_resource(nsxlib, migration_name, entries, + limit, + verbose=verbose) + + +def migrate_lb_listeners(nsxlib, nsxpolicy, verbose=False): + _migrate_lb_resource(nsxlib, nsxpolicy, + lb_const.LB_LISTENER_TYPE, + 'virtual_server', + 'LB_VIRTUAL_SERVER', + MIGRATE_LIMIT_LB_VIRTUAL_SERVER, + verbose=verbose) + + +def migrate_lb_pools(nsxlib, nsxpolicy, verbose=False): + _migrate_lb_resource(nsxlib, nsxpolicy, + lb_const.LB_POOL_TYPE, + 'pool', + 'LB_POOL', + MIGRATE_LIMIT_LB_POOL, + policy_api_name='lb_pool', + verbose=verbose) + + +def migrate_lb_monitors(nsxlib, nsxpolicy, verbose=False): + _migrate_lb_resource(nsxlib, nsxpolicy, + lb_const.LB_HM_TYPE, + 'monitor', + 'LB_MONITOR', + MIGRATE_LIMIT_LB_MONITOR, + policy_api_name='lb_monitor_profile_http', + verbose=verbose) + + +def migrate_lb_profiles(nsxlib, nsxpolicy, verbose=False): + _migrate_lb_resource(nsxlib, nsxpolicy, + lb_const.LB_LISTENER_TYPE, + 'application_profile', + 'LB_APPLICATION_PROFILE', + MIGRATE_LIMIT_LB_APP_PROFILE, + policy_api_name='lb_http_profile', + verbose=verbose) + + def get_policy_id_callback(res, policy_id): + # The input policy id is the pool id + # Need to add a suffix regarding the type of persistence + if (res.get('resource_type') == + nsxlib_lb.PersistenceProfileTypes.SOURCE_IP): + return "%s_%s" % (policy_id, 'sourceip') + else: + return "%s_%s" % (policy_id, 'cookie') + + _migrate_lb_resource(nsxlib, nsxpolicy, + lb_const.LB_POOL_TYPE, + 'persistence_profile', + 'LB_PERSISTENCE_PROFILE', + MIGRATE_LIMIT_LB_PER_PROFILE, + policy_api_name='lb_persistence_profile', + policy_id_callback=get_policy_id_callback, + verbose=verbose) + + +def migrate_lb_services(nsxlib, nsxpolicy, verbose=False): + + def get_policy_id_callback(res, policy_id): + # LB service is shared between few octavia loadbalancers + # so the policy id is not the LB id, and those should be marked + # in the tags of the policy resource. + # Keep the same id as MP so later we can search the MP DB + # and update the tags + return res['id'] + + entries = get_resource_migration_data( + nsxlib.load_balancer.service, + ['os-api-version'], + 'LB_SERVICE', + policy_resource_get=nsxpolicy.load_balancer.lb_service.get, + policy_id_callback=get_policy_id_callback, + verbose=verbose) + migrate_resource(nsxlib, 'LB_SERVICE', entries, + MIGRATE_LIMIT_LB_SERVICE, + verbose=verbose) + + +def migrate_fwaas_resources(nsxlib, nsxpolicy, migrated_routers, + verbose=False): + def get_policy_id_callback(res, policy_id): + # Policy id should be the Policy tier1 id (=neutron id) + ctx = context.get_admin_context() + nsx_id = res['applied_tos'][0]['target_id'] + return db.get_neutron_from_nsx_router_id( + ctx.session, nsx_id) + + def cond(resource): + # Migrate only Edge firewalls related to the migrated tier1s + return (resource.get('display_name') == 'Default LR Layer3 Section' and + resource.get('enforced_on') == 'LOGICALROUTER' and + resource.get('category') == 'Default' and + resource.get('section_type') == 'LAYER3' and + resource['applied_tos'][0].get( + 'target_id', '') in migrated_routers) + + def add_metadata(entry, policy_id, resource): + # Add category, sequence and rule ids + global EDGE_FW_SEQ + metadata = [{'key': 'category', + 'value': policy_constants.CATEGORY_LOCAL_GW}, + {'key': 'sequence', 'value': str(EDGE_FW_SEQ)}] + EDGE_FW_SEQ = EDGE_FW_SEQ + 1 + + # Add the rules + rules = nsxlib.firewall_section.get_rules(resource['id'])['results'] + linked_ids = [] + seq = 1 + for rule in rules: + linked_ids.append({'key': rule['id'], 'value': str(seq)}) + # The id of the policy rule will be random + seq = seq + 1 + entry['metadata'] = metadata + entry['linked_ids'] = linked_ids + + def get_policy_section(sec_id, silent=False): + return nsxpolicy.gateway_policy.get( + policy_constants.DEFAULT_DOMAIN, sec_id, silent=silent) + + entries = get_resource_migration_data( + nsxlib.firewall_section, None, + 'EDGE_FIREWALL_SECTION', resource_condition=cond, + policy_resource_get=get_policy_section, + policy_id_callback=get_policy_id_callback, + metadata_callback=add_metadata, + verbose=verbose) + # Edge firewall migration is not supported yet + migrate_resource(nsxlib, 'EDGE_FIREWALL_SECTION', entries, + MIGRATE_LIMIT_SECTION_AND_RULES, + count_internals=True, + verbose=verbose) + + +def migrate_t_resources_2_p(nsxlib, nsxpolicy, plugin, verbose=False): + """Create policy resources for all MP resources used by neutron""" + + nsx_version = nsxlib.get_version() + if not nsx_utils.is_nsx_version_3_1_0(nsx_version): + LOG.error("Migration not supported for NSX %s", nsx_version) + return False + + # Initialize the migration process + if not ensure_migration_state_ready( + nsxlib, with_abort=True, verbose=verbose): + return False + + try: + LOG.info("\n\nStarting resources migration") + + start_migration_process(nsxlib) + + # Migration order derives from the dependencies between resources + public_switches, tier0s = migrate_tier0s(nsxlib, nsxpolicy, plugin, + verbose=verbose) + migrate_md_proxies(nsxlib, nsxpolicy, plugin, verbose=verbose) + migrate_switch_profiles(nsxlib, nsxpolicy, plugin, verbose=verbose) + migrate_groups(nsxlib, nsxpolicy, verbose=verbose) + migrate_dhcp_servers(nsxlib, nsxpolicy, verbose=verbose) + mp_routers = migrate_routers(nsxlib, nsxpolicy, verbose=verbose) + migrate_networks(nsxlib, nsxpolicy, public_switches, verbose=verbose) + migrate_ports(nsxlib, nsxpolicy, plugin, verbose=verbose) + migrate_routers_config(nsxlib, nsxpolicy, plugin, mp_routers, + verbose=verbose) + migrate_tier0_config(nsxlib, nsxpolicy, tier0s, verbose=verbose) + migrate_lb_resources(nsxlib, nsxpolicy, verbose=verbose) + + # Migrate firewall sections last as those take the longest to rollback + # in case of error + migrate_dfw_sections(nsxlib, nsxpolicy, plugin, verbose=verbose) + migrate_fwaas_resources(nsxlib, nsxpolicy, mp_routers, verbose=verbose) + + # Finalize the migration (cause policy realization) + end_migration_process(nsxlib) + + # Stop the migration service + change_migration_service_status(start=False) + + return True + + except Exception as e: + # Migration failed - abort it + LOG.error("Exception occurred while making the request: %s", e) + try: + LOG.info("Aborting the current request") + send_migration_plan_action(nsxlib, 'abort') + global ROLLBACK_DATA + if ROLLBACK_DATA: + LOG.info("Rolling migration back %s", ROLLBACK_DATA) + send_rollback_request(nsxlib, + {'migration_data': ROLLBACK_DATA}) + # Finalize the migration (Also needed after rollback) + end_migration_process(nsxlib) + # Stop the migration service + change_migration_service_status(start=False) + except Exception as e: + LOG.error("Rollback failed: %s", e) + return False + + +def _get_network_nsx_segment_id(ctx, net_id): + bindings = db.get_network_bindings(ctx.session, net_id) + if (bindings and + bindings[0].binding_type == + nsx_utils.NsxV3NetworkTypes.NSX_NETWORK): + # return the ID of the NSX network + return bindings[0].phy_uuid + return net_id + + +def _delete_segment_profiles_bindings(nsxpolicy, segment_id, verbose=False): + found = False + sec_profiles = nsxpolicy.segment_security_profile_maps.list(segment_id) + for profile in sec_profiles: + found = True + nsxpolicy.segment_security_profile_maps.delete( + segment_id, profile['id']) + + qos_profiles = nsxpolicy.segment_qos_profile_maps.list(segment_id) + for profile in qos_profiles: + found = True + nsxpolicy.segment_qos_profile_maps.delete( + segment_id, profile['id']) + + discovery_profiles = nsxpolicy.segment_discovery_profile_maps.list( + segment_id) + for profile in discovery_profiles: + found = True + nsxpolicy.segment_discovery_profile_maps.delete( + segment_id, profile['id']) + + if verbose and found: + LOG.info("Removed profiles mappings from segment %s", segment_id) + + +def post_migration_actions(nsxlib, nsxpolicy, plugin, verbose=False): + # Update created policy resources that does not match the policy plugins' + # expectations + LOG.info("\n\nStarting post-migration actions") + ctx = context.get_admin_context() + + # -- Update Lb tags on loadbalancer service + pol_lb_services = nsxpolicy.load_balancer.lb_service.list() + for lb_srv in pol_lb_services: + # Verify this is a neutron resource + if not is_neutron_resource(lb_srv): + continue + # Check if it already has the LB id tag + migrated = False + for tag in lb_srv.get('tags', []): + if tag['scope'] == lb_utils.SERVICE_LB_TAG_SCOPE: + migrated = True + break + if migrated: + continue + + # Find the loadbalancers using this service from the DB + lb_mapping = db.get_nsx_lbaas_loadbalancer_binding_by_service( + ctx.session, lb_srv['id']) + if lb_mapping: + if 'tags' not in lb_srv: + lb_srv['tags'] = [] + loadbalancers = [lb_map.loadbalancer_id for lb_map in lb_mapping] + for lb_id in loadbalancers: + lb_srv['tags'].append({'scope': lb_utils.SERVICE_LB_TAG_SCOPE, + 'tag': lb_id}) + nsxpolicy.load_balancer.lb_service.update( + lb_srv['id'], tags=lb_srv['tags']) + if verbose: + LOG.info("Added tags to LB service %s", lb_srv['id']) + + # -- Update Lb L7 rules names + mp_lb_rules = nsxlib.load_balancer.rule.list()['results'] + for mp_rule in mp_lb_rules: + l7pol_id = None + listener_id = None + for tag in mp_rule.get('tags', []): + if tag['scope'] == lb_const.LB_L7POLICY_TYPE: + l7pol_id = tag['tag'] + if tag['scope'] == 'policyPath': + listener_id = policy_utils.path_to_id(tag['tag']) + + if not l7pol_id or not listener_id: + continue + pol_vs = nsxpolicy.load_balancer.virtual_server.get(listener_id) + pol_rules = pol_vs['rules'] + for pol_rule in pol_rules: + if pol_rule['display_name'] == mp_rule['id']: + new_name = nsx_utils.get_name_and_uuid('policy', l7pol_id) + pol_rule['display_name'] = new_name + nsxpolicy.load_balancer.virtual_server.update_lb_rules( + listener_id, pol_rules) + if verbose: + LOG.info("Updated L7 policy %s name on the virtual server", + l7pol_id) + break + + # -- Create DHCP server configs to be used in neutron config + # (The migration does not migrate MP DHCP profiles) + neutron_dhcp = get_configured_values(plugin, '_native_dhcp_profile_uuid') + for mp_dhcp in neutron_dhcp: + # check if it was already migrated + try: + nsxpolicy.dhcp_server_config.get(mp_dhcp, silent=True) + except Exception: + # Create it + mp_obj = nsxlib.native_dhcp_profile.get(mp_dhcp) + nsxpolicy.dhcp_server_config.create_or_overwrite( + mp_obj['display_name'], + config_id=mp_dhcp, + description=mp_obj.get('description', ''), + edge_cluster_path=nsxpolicy.edge_cluster.get_path( + mp_obj['edge_cluster_id'])) + if verbose: + LOG.info("Created DHCP server config %s for plugin config", + mp_dhcp) + + # -- Update Policy segments: + # Set subnets GW for networks without linked routers + # And remove unused segment profiles mappings + networks = plugin.get_networks(ctx) + for net in networks: + if net.get('router:external'): + continue + seg_id = _get_network_nsx_segment_id(ctx, net['id']) + if seg_id == net['id']: + # This is not an nsx-net. Delete the bindings + _delete_segment_profiles_bindings(nsxpolicy, seg_id, verbose) + + if plugin._get_network_router_ids(ctx, net['id']): + continue + # verify that this network has a dhcp subnet + subnets = plugin.get_subnets_by_network(ctx, net['id']) + for subnet in subnets: + if subnet['ip_version'] == 4 and subnet['enable_dhcp']: + # Update backend subnet + segment = nsxpolicy.segment.get(seg_id) + subnets = segment.get('subnets', []) + if subnets and len(subnets) == 1: + cidr_prefix = int(subnet['cidr'].split('/')[1]) + gw = "%s/%s" % (subnet['gateway_ip'], cidr_prefix) + subnets[0]['gateway_address'] = gw + nsxpolicy.segment.update(seg_id, subnets=subnets) + if verbose: + LOG.info("Updated gateway of network %s", net['id']) + break + + # Update tags on DFW sections + routers = plugin.get_routers(ctx) + for rtr in routers: + try: + # Check if the edge firewall section exists + nsxpolicy.gateway_policy.get( + policy_constants.DEFAULT_DOMAIN, map_id=rtr['id'], + silent=True) + except nsxlib_exc.ResourceNotFound: + pass + else: + # Update section tags + tags = nsxpolicy.build_v3_tags_payload( + rtr, resource_type='os-neutron-router-id', + project_name=ctx.tenant_name) + nsxpolicy.gateway_policy.update( + policy_constants.DEFAULT_DOMAIN, + map_id=rtr['id'], + tags=tags) + if verbose: + LOG.info("Updated tags of gateway policy for router %s", + rtr['id']) + + LOG.info("Post-migration actions done.") + + +@admin_utils.output_header +def t_2_p_migration(resource, event, trigger, **kwargs): + """Migrate NSX resources and neutron DB from NSX-T (MP) to Policy""" + + verbose = kwargs.get('verbose', False) + + # Initialize the nsxlib objects, using just one of the managers because + # the migration will be enabled only on one + if len(cfg.CONF.nsx_v3.nsx_api_managers) > 1: + cfg.CONF.set_override( + 'nsx_api_managers', + [cfg.CONF.nsx_v3.nsx_api_managers[0]], + 'nsx_v3') + if (len(cfg.CONF.nsx_v3.nsx_api_user) and + len(cfg.CONF.nsx_v3.nsx_api_password)): + cfg.CONF.set_override( + 'nsx_api_user', + [cfg.CONF.nsx_v3.nsx_api_user[0]], + 'nsx_v3') + cfg.CONF.set_override( + 'nsx_api_password', + [cfg.CONF.nsx_v3.nsx_api_password[0]], + 'nsx_v3') + else: + LOG.error("Please provide nsx_api_user and nsx_api_password " + "in the configuration") + return + + nsxlib = utils.get_connected_nsxlib(verbose=verbose) + nsxpolicy = p_utils.get_connected_nsxpolicy( + conf_path=cfg.CONF.nsx_v3, verbose=verbose) + + with utils.NsxV3PluginWrapper(verbose=verbose) as plugin: + res = migrate_t_resources_2_p(nsxlib, nsxpolicy, plugin, + verbose=verbose) + if not res: + # Failed + LOG.error("\n\nT2P migration failed. Aborting\n\n") + return + + post_migration_actions(nsxlib, nsxpolicy, plugin, verbose=verbose) + + LOG.info("\n\nT2P migration completed successfully\n\n") + + +@admin_utils.output_header +def cleanup_db_mappings(resource, event, trigger, **kwargs): + """Delete all entries from nsx-t mapping tables in DB""" + confirm = admin_utils.query_yes_no( + "Are you sure you want to delete all MP plugin mapping DB tables?", + default="no") + if not confirm: + LOG.info("Deletion aborted by user") + return + + ctx = context.get_admin_context() + mp_mapping_tables = [nsx_models.NeutronNsxFirewallSectionMapping, + nsx_models.NeutronNsxSecurityGroupMapping, + nsx_models.NeutronNsxRuleMapping, + nsx_models.NeutronNsxPortMapping, + nsx_models.NeutronNsxRouterMapping, + nsx_models.NeutronNsxServiceBinding, + nsx_models.NeutronNsxDhcpBinding, + nsx_models.QosPolicySwitchProfile, + nsx_models.NsxLbaasLoadbalancer, + nsx_models.NsxLbaasListener, + nsx_models.NsxLbaasPool, + nsx_models.NsxLbaasMonitor, + nsx_models.NsxLbaasL7Rule, + nsx_models.NsxLbaasL7Policy] + for table in mp_mapping_tables: + ctx.session.query(table).delete() + + LOG.info("Deleted all MP plugin mapping DB tables.") + + +registry.subscribe(t_2_p_migration, + constants.NSX_MIGRATE_T_P, + shell.Operations.IMPORT.value) + +registry.subscribe(cleanup_db_mappings, + constants.NSX_MIGRATE_T_P, + shell.Operations.CLEAN_ALL.value) diff --git a/vmware_nsx/shell/admin/plugins/nsxv3/resources/utils.py b/vmware_nsx/shell/admin/plugins/nsxv3/resources/utils.py index 168e436b9a..06c2f28666 100644 --- a/vmware_nsx/shell/admin/plugins/nsxv3/resources/utils.py +++ b/vmware_nsx/shell/admin/plugins/nsxv3/resources/utils.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import logging from oslo_config import cfg @@ -46,17 +47,29 @@ def get_nsxv3_client(nsx_username=None, nsx_password=None, def get_connected_nsxlib(nsx_username=None, nsx_password=None, use_basic_auth=False, - plugin_conf=None): + plugin_conf=None, + verbose=False): global _NSXLIB - # for non-default agruments, initiate new lib + if not verbose: + # Suppress logs for nsxlib init + logging.disable(logging.INFO) + + # for non-default arguments, initiate new lib if nsx_username or use_basic_auth: + if not verbose: + # Return logs to normal + logging.disable(logging.NOTSET) return v3_utils.get_nsxlib_wrapper(nsx_username, nsx_password, use_basic_auth, plugin_conf) if _NSXLIB is None: _NSXLIB = v3_utils.get_nsxlib_wrapper(plugin_conf=plugin_conf) + + if not verbose: + # Return logs to normal + logging.disable(logging.NOTSET) return _NSXLIB @@ -117,13 +130,21 @@ class NeutronDbClient(db_base_plugin_v2.NeutronDbPluginV2): class NsxV3PluginWrapper(plugin.NsxV3Plugin): - def __init__(self): + def __init__(self, verbose=False): + if not verbose: + # Suppress logs for plugin init + logging.disable(logging.INFO) + # initialize the availability zones config.register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones) super(NsxV3PluginWrapper, self).__init__() self.context = context.get_admin_context() admin_utils._init_plugin_mock_quota() + if not verbose: + # Return logs to normal + logging.disable(logging.NOTSET) + def __enter__(self): directory.add_plugin(const.CORE, self) return self @@ -131,6 +152,10 @@ class NsxV3PluginWrapper(plugin.NsxV3Plugin): def __exit__(self, exc_type, exc_value, traceback): directory.add_plugin(const.CORE, None) + def _cleanup_duplicates(self, ns_group_id, section_id): + # Do not remove DFW sections during dummy plugin initialization + pass + def _init_fwaas_plugin(self, provider, callbacks_class, plugin_callbacks): fwaas_plugin_class = manager.NeutronManager.load_class_for_provider( 'neutron.service_plugins', provider) diff --git a/vmware_nsx/shell/resources.py b/vmware_nsx/shell/resources.py index 0da98016f3..9ddbe56fae 100644 --- a/vmware_nsx/shell/resources.py +++ b/vmware_nsx/shell/resources.py @@ -154,7 +154,10 @@ nsxv3_resources = { constants.LB_ADVERTISEMENT: Resource(constants.LB_ADVERTISEMENT, [Operations.NSX_UPDATE.value]), constants.CLUSTER: Resource(constants.CLUSTER, - [Operations.SHOW.value]) + [Operations.SHOW.value]), + constants.NSX_MIGRATE_T_P: Resource(constants.NSX_MIGRATE_T_P, + [Operations.IMPORT.value, + Operations.CLEAN_ALL.value]), } # Add supported NSX-V resources in this dictionary @@ -276,6 +279,8 @@ nsxp_resources = { Operations.NSX_LIST.value]), constants.SYSTEM: Resource(constants.SYSTEM, [Operations.SET.value]), + constants.NSX_MIGRATE_T_P: Resource(constants.NSX_MIGRATE_T_P, + [Operations.CLEAN_ALL.value]), } nsxv3_resources_names = list(nsxv3_resources.keys()) diff --git a/vmware_nsx/tests/unit/services/lbaas/test_nsxp_driver.py b/vmware_nsx/tests/unit/services/lbaas/test_nsxp_driver.py index bca5eeaf57..7d2f9766af 100644 --- a/vmware_nsx/tests/unit/services/lbaas/test_nsxp_driver.py +++ b/vmware_nsx/tests/unit/services/lbaas/test_nsxp_driver.py @@ -1893,7 +1893,8 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2): ) as mock_vs_remove_rule: self.edge_driver.l7policy.delete( self.context, self.l7policy_dict, self.completor) - mock_vs_remove_rule.assert_called_with(LB_VS_ID, mock.ANY) + mock_vs_remove_rule.assert_called_with(LB_VS_ID, mock.ANY, + check_name_suffix=True) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees) @@ -1903,7 +1904,8 @@ class TestEdgeLbaasV2L7Policy(BaseTestEdgeLbaasV2): ) as mock_vs_remove_rule: self.edge_driver.l7policy.delete_cascade( self.context, self.l7policy_dict, self.completor) - mock_vs_remove_rule.assert_called_with(LB_VS_ID, mock.ANY) + mock_vs_remove_rule.assert_called_with(LB_VS_ID, mock.ANY, + check_name_suffix=True) self.assertTrue(self.last_completor_called) self.assertTrue(self.last_completor_succees)