diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py new file mode 100644 index 00000000000..7776c7c3ca9 --- /dev/null +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py @@ -0,0 +1,1151 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import atexit +import copy +import functools +import operator +import signal +import threading +import types + +from neutron_lib.api.definitions import portbindings +from neutron_lib.callbacks import events +from neutron_lib.callbacks import registry +from neutron_lib.callbacks import resources +from neutron_lib import constants as const +from neutron_lib import context as n_context +from neutron_lib import exceptions as n_exc +from neutron_lib.plugins import directory +from neutron_lib.plugins.ml2 import api +from neutron_lib.services.qos import constants as qos_consts +from neutron_lib.services.trunk import constants as trunk_consts +from oslo_config import cfg +from oslo_db import exception as os_db_exc +from oslo_log import log +from oslo_utils import timeutils +from ovsdbapp.backend.ovs_idl import idlutils + +from neutron._i18n import _ +from neutron.common.ovn import acl as ovn_acl +from neutron.common.ovn import constants as ovn_const +from neutron.common.ovn import utils as ovn_utils +from neutron.common import utils as n_utils +from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf +from neutron.db import ovn_hash_ring_db +from neutron.db import ovn_revision_numbers_db +from neutron.db import provisioning_blocks +from neutron.plugins.ml2 import db as ml2_db +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import worker +from neutron.services.qos.drivers.ovn import driver as qos_driver +from neutron.services.segments import db as segment_service_db +from neutron.services.trunk.drivers.ovn import trunk_driver + + +LOG = log.getLogger(__name__) +METADATA_READY_WAIT_TIMEOUT = 15 +AGENTS = {} + + +class MetadataServiceReadyWaitTimeoutException(Exception): + pass + + +class OVNPortUpdateError(n_exc.BadRequest): + pass + + +class OVNMechanismDriver(api.MechanismDriver): + """OVN ML2 mechanism driver + + A mechanism driver is called on the creation, update, and deletion + of networks and ports. For every event, there are two methods that + get called - one within the database transaction (method suffix of + _precommit), one right afterwards (method suffix of _postcommit). + + Exceptions raised by methods called inside the transaction can + rollback, but should not make any blocking calls (for example, + REST requests to an outside controller). Methods called after + transaction commits can make blocking external calls, though these + will block the entire process. Exceptions raised in calls after + the transaction commits may cause the associated resource to be + deleted. + + Because rollback outside of the transaction is not done in the + update network/port case, all data validation must be done within + methods that are part of the database transaction. + """ + + supported_qos_rule_types = [qos_consts.RULE_TYPE_BANDWIDTH_LIMIT] + + def initialize(self): + """Perform driver initialization. + + Called after all drivers have been loaded and the database has + been initialized. No abstract methods defined below will be + called prior to this method being called. + """ + LOG.info("Starting OVNMechanismDriver") + self._nb_ovn = None + self._sb_ovn = None + self._plugin_property = None + self._ovn_client_inst = None + self._maintenance_thread = None + self.node_uuid = None + self.hash_ring_group = ovn_const.HASH_RING_ML2_GROUP + self.sg_enabled = ovn_acl.is_sg_enabled() + # NOTE(lucasagomes): _clean_hash_ring() must be called before + # self.subscribe() to avoid processes racing when adding or + # deleting nodes from the Hash Ring during service initialization + self._clean_hash_ring() + self._post_fork_event = threading.Event() + if cfg.CONF.SECURITYGROUP.firewall_driver: + LOG.warning('Firewall driver configuration is ignored') + self._setup_vif_port_bindings() + self.subscribe() + self.qos_driver = qos_driver.OVNQosNotificationDriver.create(self) + self.trunk_driver = trunk_driver.OVNTrunkDriver.create(self) + + @property + def _plugin(self): + if self._plugin_property is None: + self._plugin_property = directory.get_plugin() + return self._plugin_property + + @property + def _ovn_client(self): + if self._ovn_client_inst is None: + if not(self._nb_ovn and self._sb_ovn): + # Wait until the post_fork_initialize method has finished and + # IDLs have been correctly setup. + self._post_fork_event.wait() + self._ovn_client_inst = ovn_client.OVNClient(self._nb_ovn, + self._sb_ovn) + return self._ovn_client_inst + + def _setup_vif_port_bindings(self): + self.supported_vnic_types = [portbindings.VNIC_NORMAL, + portbindings.VNIC_DIRECT] + self.vif_details = { + portbindings.VIF_TYPE_OVS: { + portbindings.CAP_PORT_FILTER: self.sg_enabled + }, + portbindings.VIF_TYPE_VHOST_USER: { + portbindings.CAP_PORT_FILTER: False, + portbindings.VHOST_USER_MODE: + portbindings.VHOST_USER_MODE_SERVER, + portbindings.VHOST_USER_OVS_PLUG: True + }, + portbindings.VIF_DETAILS_CONNECTIVITY: + portbindings.CONNECTIVITY_L2, + } + + def subscribe(self): + registry.subscribe(self.pre_fork_initialize, + resources.PROCESS, + events.BEFORE_SPAWN) + registry.subscribe(self.post_fork_initialize, + resources.PROCESS, + events.AFTER_INIT) + registry.subscribe(self._add_segment_host_mapping_for_segment, + resources.SEGMENT, + events.AFTER_CREATE) + + # Handle security group/rule notifications + if self.sg_enabled: + registry.subscribe(self._create_security_group_precommit, + resources.SECURITY_GROUP, + events.PRECOMMIT_CREATE) + registry.subscribe(self._update_security_group, + resources.SECURITY_GROUP, + events.AFTER_UPDATE) + registry.subscribe(self._create_security_group, + resources.SECURITY_GROUP, + events.AFTER_CREATE) + registry.subscribe(self._delete_security_group, + resources.SECURITY_GROUP, + events.AFTER_DELETE) + registry.subscribe(self._create_sg_rule_precommit, + resources.SECURITY_GROUP_RULE, + events.PRECOMMIT_CREATE) + registry.subscribe(self._process_sg_rule_notification, + resources.SECURITY_GROUP_RULE, + events.AFTER_CREATE) + registry.subscribe(self._process_sg_rule_notification, + resources.SECURITY_GROUP_RULE, + events.BEFORE_DELETE) + + def _clean_hash_ring(self, *args, **kwargs): + admin_context = n_context.get_admin_context() + ovn_hash_ring_db.remove_nodes_from_host(admin_context, + self.hash_ring_group) + + def pre_fork_initialize(self, resource, event, trigger, payload=None): + """Pre-initialize the ML2/OVN driver.""" + atexit.register(self._clean_hash_ring) + signal.signal(signal.SIGTERM, self._clean_hash_ring) + + def post_fork_initialize(self, resource, event, trigger, payload=None): + # NOTE(rtheis): This will initialize all workers (API, RPC, + # plugin service and OVN) with OVN IDL connections. + self._post_fork_event.clear() + self._ovn_client_inst = None + + is_maintenance = (ovn_utils.get_method_class(trigger) == + worker.MaintenanceWorker) + if not is_maintenance: + admin_context = n_context.get_admin_context() + self.node_uuid = ovn_hash_ring_db.add_node(admin_context, + self.hash_ring_group) + + self._nb_ovn, self._sb_ovn = impl_idl_ovn.get_ovn_idls( + self, trigger, binding_events=not is_maintenance) + + # AGENTS must be populated after fork so if ovn-controller is stopped + # before a worker handles a get_agents request, we still show agents + populate_agents(self) + + # Override agents API methods + self.patch_plugin_merge("get_agents", get_agents) + self.patch_plugin_choose("get_agent", get_agent) + self.patch_plugin_choose("update_agent", update_agent) + self.patch_plugin_choose("delete_agent", delete_agent) + + # Now IDL connections can be safely used. + self._post_fork_event.set() + + if is_maintenance: + # Call the synchronization task if its maintenance worker + # This sync neutron DB to OVN-NB DB only in inconsistent states + self.nb_synchronizer = ovn_db_sync.OvnNbSynchronizer( + self._plugin, + self._nb_ovn, + self._sb_ovn, + ovn_conf.get_ovn_neutron_sync_mode(), + self + ) + self.nb_synchronizer.sync() + + # This sync neutron DB to OVN-SB DB only in inconsistent states + self.sb_synchronizer = ovn_db_sync.OvnSbSynchronizer( + self._plugin, + self._sb_ovn, + self + ) + self.sb_synchronizer.sync() + + self._maintenance_thread = maintenance.MaintenanceThread() + self._maintenance_thread.add_periodics( + maintenance.DBInconsistenciesPeriodics(self._ovn_client)) + self._maintenance_thread.add_periodics( + maintenance.HashRingHealthCheckPeriodics( + self.hash_ring_group)) + self._maintenance_thread.start() + + def _create_security_group_precommit(self, resource, event, trigger, + security_group, context, **kwargs): + ovn_revision_numbers_db.create_initial_revision( + context, security_group['id'], ovn_const.TYPE_SECURITY_GROUPS) + + def _create_security_group(self, resource, event, trigger, + security_group, **kwargs): + self._ovn_client.create_security_group(security_group) + + def _delete_security_group(self, resource, event, trigger, + security_group_id, **kwargs): + self._ovn_client.delete_security_group(security_group_id) + + def _update_security_group(self, resource, event, trigger, + security_group, **kwargs): + # OVN doesn't care about updates to security groups, only if they + # exist or not. We are bumping the revision number here so it + # doesn't show as inconsistent to the maintenance periodic task + ovn_revision_numbers_db.bump_revision( + kwargs['context'], security_group, ovn_const.TYPE_SECURITY_GROUPS) + + def _create_sg_rule_precommit(self, resource, event, trigger, **kwargs): + sg_rule = kwargs.get('security_group_rule') + context = kwargs.get('context') + ovn_revision_numbers_db.create_initial_revision( + context, sg_rule['id'], ovn_const.TYPE_SECURITY_GROUP_RULES) + + def _process_sg_rule_notification( + self, resource, event, trigger, **kwargs): + if event == events.AFTER_CREATE: + self._ovn_client.create_security_group_rule( + kwargs.get('security_group_rule')) + elif event == events.BEFORE_DELETE: + admin_context = n_context.get_admin_context() + sg_rule = self._plugin.get_security_group_rule( + admin_context, kwargs.get('security_group_rule_id')) + self._ovn_client.delete_security_group_rule(sg_rule) + + def _is_network_type_supported(self, network_type): + return (network_type in [const.TYPE_LOCAL, + const.TYPE_FLAT, + const.TYPE_GENEVE, + const.TYPE_VLAN]) + + def _validate_network_segments(self, network_segments): + for network_segment in network_segments: + network_type = network_segment['network_type'] + segmentation_id = network_segment['segmentation_id'] + physical_network = network_segment['physical_network'] + LOG.debug('Validating network segment with ' + 'type %(network_type)s, ' + 'segmentation ID %(segmentation_id)s, ' + 'physical network %(physical_network)s', + {'network_type': network_type, + 'segmentation_id': segmentation_id, + 'physical_network': physical_network}) + if not self._is_network_type_supported(network_type): + msg = _('Network type %s is not supported') % network_type + raise n_exc.InvalidInput(error_message=msg) + + def create_network_precommit(self, context): + """Allocate resources for a new network. + + :param context: NetworkContext instance describing the new + network. + + Create a new network, allocating resources as necessary in the + database. Called inside transaction context on session. Call + cannot block. Raising an exception will result in a rollback + of the current transaction. + """ + self._validate_network_segments(context.network_segments) + ovn_revision_numbers_db.create_initial_revision( + context._plugin_context, context.current['id'], + ovn_const.TYPE_NETWORKS) + + def create_network_postcommit(self, context): + """Create a network. + + :param context: NetworkContext instance describing the new + network. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + """ + network = context.current + self._ovn_client.create_network(network) + + def update_network_precommit(self, context): + """Update resources of a network. + + :param context: NetworkContext instance describing the new + state of the network, as well as the original state prior + to the update_network call. + + Update values of a network, updating the associated resources + in the database. Called inside transaction context on session. + Raising an exception will result in rollback of the + transaction. + + update_network_precommit is called for all changes to the + network state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + self._validate_network_segments(context.network_segments) + + def update_network_postcommit(self, context): + """Update a network. + + :param context: NetworkContext instance describing the new + state of the network, as well as the original state prior + to the update_network call. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + cause the deletion of the resource. + + update_network_postcommit is called for all changes to the + network state. It is up to the mechanism driver to ignore + state or state changes that it does not know or care about. + """ + # FIXME(lucasagomes): We can delete this conditional after + # https://bugs.launchpad.net/neutron/+bug/1739798 is fixed. + if context._plugin_context.session.is_active: + return + self._ovn_client.update_network(context.current) + + def delete_network_postcommit(self, context): + """Delete a network. + + :param context: NetworkContext instance describing the current + state of the network, prior to the call to delete it. + + Called after the transaction commits. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + """ + self._ovn_client.delete_network(context.current['id']) + + def create_subnet_precommit(self, context): + ovn_revision_numbers_db.create_initial_revision( + context._plugin_context, context.current['id'], + ovn_const.TYPE_SUBNETS) + + def create_subnet_postcommit(self, context): + self._ovn_client.create_subnet(context.current, + context.network.current) + + def update_subnet_postcommit(self, context): + self._ovn_client.update_subnet( + context.current, context.network.current) + + def delete_subnet_postcommit(self, context): + self._ovn_client.delete_subnet(context.current['id']) + + def create_port_precommit(self, context): + """Allocate resources for a new port. + + :param context: PortContext instance describing the port. + + Create a new port, allocating resources as necessary in the + database. Called inside transaction context on session. Call + cannot block. Raising an exception will result in a rollback + of the current transaction. + """ + port = context.current + if ovn_utils.is_lsp_ignored(port): + return + ovn_utils.validate_and_get_data_from_binding_profile(port) + if self._is_port_provisioning_required(port, context.host): + self._insert_port_provisioning_block(context._plugin_context, + port['id']) + + ovn_revision_numbers_db.create_initial_revision( + context._plugin_context, port['id'], ovn_const.TYPE_PORTS) + + # in the case of router ports we also need to + # track the creation and update of the LRP OVN objects + if ovn_utils.is_lsp_router_port(port): + ovn_revision_numbers_db.create_initial_revision( + context._plugin_context, port['id'], + ovn_const.TYPE_ROUTER_PORTS) + + def _is_port_provisioning_required(self, port, host, original_host=None): + vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) + if vnic_type not in self.supported_vnic_types: + LOG.debug('No provisioning block for port %(port_id)s due to ' + 'unsupported vnic_type: %(vnic_type)s', + {'port_id': port['id'], 'vnic_type': vnic_type}) + return False + + if port['status'] == const.PORT_STATUS_ACTIVE: + LOG.debug('No provisioning block for port %s since it is active', + port['id']) + return False + + if not host: + LOG.debug('No provisioning block for port %s since it does not ' + 'have a host', port['id']) + return False + + if host == original_host: + LOG.debug('No provisioning block for port %s since host unchanged', + port['id']) + return False + + if not self._sb_ovn.chassis_exists(host): + LOG.debug('No provisioning block for port %(port_id)s since no ' + 'OVN chassis for host: %(host)s', + {'port_id': port['id'], 'host': host}) + return False + + return True + + def _insert_port_provisioning_block(self, context, port_id): + # Insert a provisioning block to prevent the port from + # transitioning to active until OVN reports back that + # the port is up. + provisioning_blocks.add_provisioning_component( + context, port_id, resources.PORT, + provisioning_blocks.L2_AGENT_ENTITY + ) + + def _notify_dhcp_updated(self, port_id): + """Notifies Neutron that the DHCP has been update for port.""" + if provisioning_blocks.is_object_blocked( + n_context.get_admin_context(), port_id, resources.PORT): + provisioning_blocks.provisioning_complete( + n_context.get_admin_context(), port_id, resources.PORT, + provisioning_blocks.DHCP_ENTITY) + + def _validate_ignored_port(self, port, original_port): + if ovn_utils.is_lsp_ignored(port): + if not ovn_utils.is_lsp_ignored(original_port): + # From not ignored port to ignored port + msg = (_('Updating device_owner to %(device_owner)s for port ' + '%(port_id)s is not supported') % + {'device_owner': port['device_owner'], + 'port_id': port['id']}) + raise OVNPortUpdateError(resource='port', msg=msg) + elif ovn_utils.is_lsp_ignored(original_port): + # From ignored port to not ignored port + msg = (_('Updating device_owner for port %(port_id)s owned by ' + '%(device_owner)s is not supported') % + {'port_id': port['id'], + 'device_owner': original_port['device_owner']}) + raise OVNPortUpdateError(resource='port', msg=msg) + + def create_port_postcommit(self, context): + """Create a port. + + :param context: PortContext instance describing the port. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + result in the deletion of the resource. + """ + port = copy.deepcopy(context.current) + port['network'] = context.network.current + self._ovn_client.create_port(port) + self._notify_dhcp_updated(port['id']) + + def update_port_precommit(self, context): + """Update resources of a port. + + :param context: PortContext instance describing the new + state of the port, as well as the original state prior + to the update_port call. + + Called inside transaction context on session to complete a + port update as defined by this mechanism driver. Raising an + exception will result in rollback of the transaction. + + update_port_precommit is called for all changes to the port + state. It is up to the mechanism driver to ignore state or + state changes that it does not know or care about. + """ + port = context.current + original_port = context.original + self._validate_ignored_port(port, original_port) + ovn_utils.validate_and_get_data_from_binding_profile(port) + if self._is_port_provisioning_required(port, context.host, + context.original_host): + self._insert_port_provisioning_block(context._plugin_context, + port['id']) + + if ovn_utils.is_lsp_router_port(port): + # handle the case when an existing port is added to a + # logical router so we need to track the creation of the lrp + if not ovn_utils.is_lsp_router_port(original_port): + ovn_revision_numbers_db.create_initial_revision( + context._plugin_context, port['id'], + ovn_const.TYPE_ROUTER_PORTS, may_exist=True) + + def update_port_postcommit(self, context): + """Update a port. + + :param context: PortContext instance describing the new + state of the port, as well as the original state prior + to the update_port call. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Raising an exception will + result in the deletion of the resource. + + update_port_postcommit is called for all changes to the port + state. It is up to the mechanism driver to ignore state or + state changes that it does not know or care about. + """ + port = copy.deepcopy(context.current) + port['network'] = context.network.current + original_port = copy.deepcopy(context.original) + original_port['network'] = context.network.current + + # NOTE(mjozefcz): Check if port is in migration state. If so update + # the port status from DOWN to UP in order to generate 'fake' + # vif-interface-plugged event. This workaround is needed to + # perform live-migration with live_migration_wait_for_vif_plug=True. + if ((port['status'] == const.PORT_STATUS_DOWN and + ovn_const.MIGRATING_ATTR in port[portbindings.PROFILE].keys() and + port[portbindings.VIF_TYPE] == portbindings.VIF_TYPE_OVS)): + admin_context = n_context.get_admin_context() + LOG.info("Setting port %s status from DOWN to UP in order " + "to emit vif-interface-plugged event.", + port['id']) + self._plugin.update_port_status(admin_context, port['id'], + const.PORT_STATUS_ACTIVE) + # The revision has been changed. In the meantime + # port-update event already updated the OVN configuration, + # So there is no need to update it again here. Anyway it + # will fail that OVN has port with bigger revision. + return + + self._ovn_client.update_port(port, port_object=original_port) + self._notify_dhcp_updated(port['id']) + + def delete_port_postcommit(self, context): + """Delete a port. + + :param context: PortContext instance describing the current + state of the port, prior to the call to delete it. + + Called after the transaction completes. Call can block, though + will block the entire process so care should be taken to not + drastically affect performance. Runtime errors are not + expected, and will not prevent the resource from being + deleted. + """ + port = copy.deepcopy(context.current) + port['network'] = context.network.current + # FIXME(lucasagomes): PortContext does not have a session, therefore + # we need to use the _plugin_context attribute. + self._ovn_client.delete_port(context._plugin_context, port['id'], + port_object=port) + + def bind_port(self, context): + """Attempt to bind a port. + + :param context: PortContext instance describing the port + + This method is called outside any transaction to attempt to + establish a port binding using this mechanism driver. Bindings + may be created at each of multiple levels of a hierarchical + network, and are established from the top level downward. At + each level, the mechanism driver determines whether it can + bind to any of the network segments in the + context.segments_to_bind property, based on the value of the + context.host property, any relevant port or network + attributes, and its own knowledge of the network topology. At + the top level, context.segments_to_bind contains the static + segments of the port's network. At each lower level of + binding, it contains static or dynamic segments supplied by + the driver that bound at the level above. If the driver is + able to complete the binding of the port to any segment in + context.segments_to_bind, it must call context.set_binding + with the binding details. If it can partially bind the port, + it must call context.continue_binding with the network + segments to be used to bind at the next lower level. + + If the binding results are committed after bind_port returns, + they will be seen by all mechanism drivers as + update_port_precommit and update_port_postcommit calls. But if + some other thread or process concurrently binds or updates the + port, these binding results will not be committed, and + update_port_precommit and update_port_postcommit will not be + called on the mechanism drivers with these results. Because + binding results can be discarded rather than committed, + drivers should avoid making persistent state changes in + bind_port, or else must ensure that such state changes are + eventually cleaned up. + + Implementing this method explicitly declares the mechanism + driver as having the intention to bind ports. This is inspected + by the QoS service to identify the available QoS rules you + can use with ports. + """ + port = context.current + vnic_type = port.get(portbindings.VNIC_TYPE, portbindings.VNIC_NORMAL) + if vnic_type not in self.supported_vnic_types: + LOG.debug('Refusing to bind port %(port_id)s due to unsupported ' + 'vnic_type: %(vnic_type)s', + {'port_id': port['id'], 'vnic_type': vnic_type}) + return + + profile = port.get(portbindings.PROFILE) + capabilities = [] + if profile: + capabilities = profile.get('capabilities', []) + if (vnic_type == portbindings.VNIC_DIRECT and + 'switchdev' not in capabilities): + LOG.debug("Refusing to bind port due to unsupported vnic_type: %s " + "with no switchdev capability", portbindings.VNIC_DIRECT) + return + + # OVN chassis information is needed to ensure a valid port bind. + # Collect port binding data and refuse binding if the OVN chassis + # cannot be found. + chassis_physnets = [] + try: + datapath_type, iface_types, chassis_physnets = ( + self._sb_ovn.get_chassis_data_for_ml2_bind_port(context.host)) + iface_types = iface_types.split(',') if iface_types else [] + except RuntimeError: + LOG.debug('Refusing to bind port %(port_id)s due to ' + 'no OVN chassis for host: %(host)s', + {'port_id': port['id'], 'host': context.host}) + return + + for segment_to_bind in context.segments_to_bind: + network_type = segment_to_bind['network_type'] + segmentation_id = segment_to_bind['segmentation_id'] + physical_network = segment_to_bind['physical_network'] + LOG.debug('Attempting to bind port %(port_id)s on host %(host)s ' + 'for network segment with type %(network_type)s, ' + 'segmentation ID %(segmentation_id)s, ' + 'physical network %(physical_network)s', + {'port_id': port['id'], + 'host': context.host, + 'network_type': network_type, + 'segmentation_id': segmentation_id, + 'physical_network': physical_network}) + # TODO(rtheis): This scenario is only valid on an upgrade from + # neutron ML2 OVS since invalid network types are prevented during + # network creation and update. The upgrade should convert invalid + # network types. Once bug/1621879 is fixed, refuse to bind + # ports with unsupported network types. + if not self._is_network_type_supported(network_type): + LOG.info('Upgrade allowing bind port %(port_id)s with ' + 'unsupported network type: %(network_type)s', + {'port_id': port['id'], + 'network_type': network_type}) + + if ((network_type in ['flat', 'vlan']) and + (physical_network not in chassis_physnets)): + LOG.info('Refusing to bind port %(port_id)s on ' + 'host %(host)s due to the OVN chassis ' + 'bridge mapping physical networks ' + '%(chassis_physnets)s not supporting ' + 'physical network: %(physical_network)s', + {'port_id': port['id'], + 'host': context.host, + 'chassis_physnets': chassis_physnets, + 'physical_network': physical_network}) + else: + if (datapath_type == ovn_const.CHASSIS_DATAPATH_NETDEV and + ovn_const.CHASSIS_IFACE_DPDKVHOSTUSER in iface_types): + vhost_user_socket = ovn_utils.ovn_vhu_sockpath( + ovn_conf.get_ovn_vhost_sock_dir(), port['id']) + vif_type = portbindings.VIF_TYPE_VHOST_USER + port[portbindings.VIF_DETAILS].update({ + portbindings.VHOST_USER_SOCKET: vhost_user_socket}) + vif_details = dict(self.vif_details[vif_type]) + vif_details[portbindings.VHOST_USER_SOCKET] = ( + vhost_user_socket) + else: + vif_type = portbindings.VIF_TYPE_OVS + vif_details = self.vif_details[vif_type] + + context.set_binding(segment_to_bind[api.ID], vif_type, + vif_details) + break + + def get_workers(self): + """Get any worker instances that should have their own process + + Any driver that needs to run processes separate from the API or RPC + workers, can return a sequence of worker instances. + """ + # See doc/source/design/ovn_worker.rst for more details. + return [worker.MaintenanceWorker()] + + def _update_subport_host_if_needed(self, port_id): + parent_port = self._ovn_client.get_parent_port(port_id) + if parent_port: + admin_context = n_context.get_admin_context() + host_id = None + try: + port = self._plugin.get_port(admin_context, parent_port) + host_id = port.get(portbindings.HOST_ID, '') + subport = { + 'port': {'binding:host_id': host_id, + 'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER}} + self._plugin.update_port(admin_context, port_id, subport) + except (os_db_exc.DBReferenceError, n_exc.PortNotFound): + LOG.debug("Error trying to set host_id %s for subport %s", + host_id, port_id) + + def _update_dnat_entry_if_needed(self, port_id, up=True): + """Update DNAT entry if using distributed floating ips.""" + if not ovn_conf.is_ovn_distributed_floating_ip(): + return + + if not self._nb_ovn: + self._nb_ovn = self._ovn_client._nb_idl + + nat = self._nb_ovn.db_find('NAT', + ('logical_port', '=', port_id), + ('type', '=', 'dnat_and_snat')).execute() + if not nat: + return + # We take first entry as one port can only have one FIP + nat = nat[0] + # If the external_id doesn't exist, let's create at this point. + # TODO(dalvarez): Remove this code in T cycle when we're sure that + # all DNAT entries have the external_id. + if not nat['external_ids'].get(ovn_const.OVN_FIP_EXT_MAC_KEY): + self._nb_ovn.db_set('NAT', nat['_uuid'], + ('external_ids', + {ovn_const.OVN_FIP_EXT_MAC_KEY: + nat['external_mac']})).execute() + + if up: + mac = nat['external_ids'][ovn_const.OVN_FIP_EXT_MAC_KEY] + LOG.debug("Setting external_mac of port %s to %s", + port_id, mac) + self._nb_ovn.db_set( + 'NAT', nat['_uuid'], + ('external_mac', mac)).execute(check_error=True) + else: + LOG.debug("Clearing up external_mac of port %s", port_id) + self._nb_ovn.db_clear( + 'NAT', nat['_uuid'], 'external_mac').execute(check_error=True) + + def _should_notify_nova(self, db_port): + # NOTE(twilson) It is possible for a test to override a config option + # after the plugin has been initialized so the nova_notifier attribute + # is not set on the plugin + return (cfg.CONF.notify_nova_on_port_status_changes and + hasattr(self._plugin, 'nova_notifier') and + db_port.device_owner.startswith( + const.DEVICE_OWNER_COMPUTE_PREFIX)) + + def set_port_status_up(self, port_id): + # Port provisioning is complete now that OVN has reported that the + # port is up. Any provisioning block (possibly added during port + # creation or when OVN reports that the port is down) must be removed. + LOG.info("OVN reports status up for port: %s", port_id) + + self._update_dnat_entry_if_needed(port_id) + self._wait_for_metadata_provisioned_if_needed(port_id) + + # If this port is a subport, we need to update the host_id and set it + # to its parent's. Otherwise, Neutron won't even try to bind it and + # it will not transition from DOWN to ACTIVE. + self._update_subport_host_if_needed(port_id) + + provisioning_blocks.provisioning_complete( + n_context.get_admin_context(), + port_id, + resources.PORT, + provisioning_blocks.L2_AGENT_ENTITY) + + admin_context = n_context.get_admin_context() + try: + # NOTE(lucasagomes): Router ports in OVN is never bound + # to a host given their decentralized nature. By calling + # provisioning_complete() - as above - don't do it for us + # becasue the router ports are unbind so, for OVN we are + # forcing the status here. Maybe it's something that we can + # change in core Neutron in the future. + db_port = ml2_db.get_port(admin_context, port_id) + if not db_port: + return + + if db_port.device_owner in (const.DEVICE_OWNER_ROUTER_INTF, + const.DEVICE_OWNER_DVR_INTERFACE, + const.DEVICE_OWNER_ROUTER_HA_INTF): + self._plugin.update_port_status(admin_context, port_id, + const.PORT_STATUS_ACTIVE) + elif self._should_notify_nova(db_port): + self._plugin.nova_notifier.notify_port_active_direct(db_port) + except (os_db_exc.DBReferenceError, n_exc.PortNotFound): + LOG.debug('Port not found during OVN status up report: %s', + port_id) + + def set_port_status_down(self, port_id): + # Port provisioning is required now that OVN has reported that the + # port is down. Insert a provisioning block and mark the port down + # in neutron. The block is inserted before the port status update + # to prevent another entity from bypassing the block with its own + # port status update. + LOG.info("OVN reports status down for port: %s", port_id) + self._update_dnat_entry_if_needed(port_id, False) + admin_context = n_context.get_admin_context() + try: + db_port = ml2_db.get_port(admin_context, port_id) + if not db_port: + return + + self._insert_port_provisioning_block(admin_context, port_id) + self._plugin.update_port_status(admin_context, port_id, + const.PORT_STATUS_DOWN) + + if self._should_notify_nova(db_port): + self._plugin.nova_notifier.record_port_status_changed( + db_port, const.PORT_STATUS_ACTIVE, const.PORT_STATUS_DOWN, + None) + self._plugin.nova_notifier.send_port_status( + None, None, db_port) + except (os_db_exc.DBReferenceError, n_exc.PortNotFound): + LOG.debug("Port not found during OVN status down report: %s", + port_id) + + def delete_mac_binding_entries(self, external_ip): + """Delete all MAC_Binding entries associated to this IP address""" + mac_binds = self._sb_ovn.db_find_rows( + 'MAC_Binding', ('ip', '=', external_ip)).execute() or [] + for entry in mac_binds: + self._sb_ovn.db_destroy('MAC_Binding', entry.uuid).execute() + + def update_segment_host_mapping(self, host, phy_nets): + """Update SegmentHostMapping in DB""" + if not host: + return + + ctx = n_context.get_admin_context() + segments = segment_service_db.get_segments_with_phys_nets( + ctx, phy_nets) + + available_seg_ids = { + segment['id'] for segment in segments + if segment['network_type'] in ('flat', 'vlan')} + + segment_service_db.update_segment_host_mapping( + ctx, host, available_seg_ids) + + def _add_segment_host_mapping_for_segment(self, resource, event, trigger, + context, segment): + phynet = segment.physical_network + if not phynet: + return + + host_phynets_map = self._sb_ovn.get_chassis_hostname_and_physnets() + hosts = {host for host, phynets in host_phynets_map.items() + if phynet in phynets} + segment_service_db.map_segment_to_hosts(context, segment.id, hosts) + + def _wait_for_metadata_provisioned_if_needed(self, port_id): + """Wait for metadata service to be provisioned. + + Wait until metadata service has been setup for this port in the chassis + it resides. If metadata is disabled or DHCP is not enabled for its + subnets, this function will return right away. + """ + if ovn_conf.is_ovn_metadata_enabled() and self._sb_ovn: + # Wait until metadata service has been setup for this port in the + # chassis it resides. + result = ( + self._sb_ovn.get_logical_port_chassis_and_datapath(port_id)) + if not result: + LOG.warning("Logical port %s doesn't exist in OVN", port_id) + return + chassis, datapath = result + if not chassis: + LOG.warning("Logical port %s is not bound to a " + "chassis", port_id) + return + + # Check if the port belongs to some IPv4 subnet with DHCP enabled. + context = n_context.get_admin_context() + port = self._plugin.get_port(context, port_id) + port_subnet_ids = set( + ip['subnet_id'] for ip in port['fixed_ips'] if + n_utils.get_ip_version(ip['ip_address']) == const.IP_VERSION_4) + if not port_subnet_ids: + # The port doesn't belong to any IPv4 subnet + return + + subnets = self._plugin.get_subnets(context, filters=dict( + network_id=[port['network_id']], ip_version=[4], + enable_dhcp=True)) + + subnet_ids = set( + s['id'] for s in subnets if s['id'] in port_subnet_ids) + if not subnet_ids: + return + + try: + n_utils.wait_until_true( + lambda: datapath in + self._sb_ovn.get_chassis_metadata_networks(chassis), + timeout=METADATA_READY_WAIT_TIMEOUT, + exception=MetadataServiceReadyWaitTimeoutException) + except MetadataServiceReadyWaitTimeoutException: + # If we reach this point it means that metadata agent didn't + # provision the datapath for this port on its chassis. Either + # the agent is not running or it crashed. We'll complete the + # provisioning block though. + LOG.warning("Metadata service is not ready for port %s, check" + " networking-ovn-metadata-agent status/logs.", + port_id) + + def agent_alive(self, chassis, type_): + nb_cfg = chassis.nb_cfg + key = ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY + if type_ == ovn_const.OVN_METADATA_AGENT: + nb_cfg = int(chassis.external_ids.get( + ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, 0)) + key = ovn_const.METADATA_LIVENESS_CHECK_EXT_ID_KEY + + try: + updated_at = timeutils.parse_isotime(chassis.external_ids[key]) + except KeyError: + updated_at = timeutils.utcnow(with_timezone=True) + + if self._nb_ovn.nb_global.nb_cfg == nb_cfg: + # update the time of our successful check + value = timeutils.utcnow(with_timezone=True).isoformat() + self._sb_ovn.db_set('Chassis', chassis.uuid, + ('external_ids', {key: value})).execute( + check_error=True) + return True + now = timeutils.utcnow(with_timezone=True) + + if (now - updated_at).total_seconds() < cfg.CONF.agent_down_time: + # down, but not yet timed out + return True + return False + + def _format_agent_info(self, chassis, binary, agent_id, type_, + description, alive): + return { + 'binary': binary, + 'host': chassis.hostname, + 'heartbeat_timestamp': timeutils.utcnow(), + 'availability_zone': 'n/a', + 'topic': 'n/a', + 'description': description, + 'configurations': { + 'chassis_name': chassis.name, + 'bridge-mappings': + chassis.external_ids.get('ovn-bridge-mappings', '')}, + 'start_flag': True, + 'agent_type': type_, + 'id': agent_id, + 'alive': alive, + 'admin_state_up': True} + + def agents_from_chassis(self, chassis): + agent_dict = {} + + # Check for ovn-controller / ovn-controller gateway + agent_type = ovn_const.OVN_CONTROLLER_AGENT + # Only the chassis name stays consistent after ovn-controller restart + agent_id = chassis.name + if ('enable-chassis-as-gw' in + chassis.external_ids.get('ovn-cms-options', [])): + agent_type = ovn_const.OVN_CONTROLLER_GW_AGENT + + alive = self.agent_alive(chassis, agent_type) + description = chassis.external_ids.get( + ovn_const.OVN_AGENT_DESC_KEY, '') + agent_dict[agent_id] = self._format_agent_info( + chassis, 'ovn-controller', agent_id, agent_type, description, + alive) + + # Check for the metadata agent + metadata_agent_id = chassis.external_ids.get( + ovn_const.OVN_AGENT_METADATA_ID_KEY) + if metadata_agent_id: + agent_type = ovn_const.OVN_METADATA_AGENT + alive = self.agent_alive(chassis, agent_type) + description = chassis.external_ids.get( + ovn_const.OVN_AGENT_METADATA_DESC_KEY, '') + agent_dict[metadata_agent_id] = self._format_agent_info( + chassis, 'networking-ovn-metadata-agent', + metadata_agent_id, agent_type, description, alive) + + return agent_dict + + def patch_plugin_merge(self, method_name, new_fn, op=operator.add): + old_method = getattr(self._plugin, method_name) + + @functools.wraps(old_method) + def fn(slf, *args, **kwargs): + new_method = types.MethodType(new_fn, self._plugin) + results = old_method(*args, **kwargs) + return op(results, new_method(*args, _driver=self, **kwargs)) + + setattr(self._plugin, method_name, types.MethodType(fn, self._plugin)) + + def patch_plugin_choose(self, method_name, new_fn): + old_method = getattr(self._plugin, method_name) + + @functools.wraps(old_method) + def fn(slf, *args, **kwargs): + new_method = types.MethodType(new_fn, self._plugin) + try: + return new_method(*args, _driver=self, **kwargs) + except KeyError: + return old_method(*args, **kwargs) + + setattr(self._plugin, method_name, types.MethodType(fn, self._plugin)) + + def ping_chassis(self): + """Update NB_Global.nb_cfg so that Chassis.nb_cfg will increment""" + + with self._nb_ovn.create_transaction(check_error=True, + bump_nb_cfg=True) as txn: + txn.add(self._nb_ovn.check_liveness()) + + +def populate_agents(driver): + for ch in driver._sb_ovn.tables['Chassis'].rows.values(): + # update the cache, rows are hashed on uuid but it is the name that + # stays consistent across ovn-controller restarts + AGENTS.update({ch.name: ch}) + + +def get_agents(self, context, filters=None, fields=None, _driver=None): + _driver.ping_chassis() + filters = filters or {} + agent_list = [] + populate_agents(_driver) + for ch in AGENTS.values(): + for agent in _driver.agents_from_chassis(ch).values(): + if all(agent[k] in v for k, v in filters.items()): + agent_list.append(agent) + return agent_list + + +def get_agent(self, context, id, fields=None, _driver=None): + chassis = None + try: + # look up Chassis by *name*, which the id attribte is + chassis = _driver._sb_ovn.lookup('Chassis', id) + except idlutils.RowNotFound: + # If the UUID is not found, check for the metadata agent ID + for ch in _driver._sb_ovn.tables['Chassis'].rows.values(): + metadata_agent_id = ch.external_ids.get( + ovn_const.OVN_AGENT_METADATA_ID_KEY) + if id == metadata_agent_id: + chassis = ch + break + else: + raise n_exc.agent.AgentNotFound(id=id) + return _driver.agents_from_chassis(chassis)[id] + + +def update_agent(self, context, id, agent, _driver=None): + ovn_agent = get_agent(self, None, id, _driver=_driver) + chassis_name = ovn_agent['configurations']['chassis_name'] + agent_type = ovn_agent['agent_type'] + agent = agent['agent'] + # neutron-client always passes admin_state_up, openstack client doesn't + # and we can just fall through to raising in the case that admin_state_up + # is being set to False, otherwise the end-state will be fine + if not agent.get('admin_state_up', True): + pass + elif 'description' in agent: + _driver._sb_ovn.set_chassis_neutron_description( + chassis_name, agent['description'], + agent_type).execute(check_error=True) + return agent + else: + # admin_state_up=True w/o description + return agent + raise n_exc.BadRequest(resource='agent', + msg='OVN agent status cannot be updated') + + +def delete_agent(self, context, id, _driver=None): + get_agent(self, None, id, _driver=_driver) + raise n_exc.BadRequest(resource='agent', + msg='OVN agents cannot be deleted') diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py new file mode 100644 index 00000000000..93f3de8b2c5 --- /dev/null +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py @@ -0,0 +1,469 @@ +# Copyright 2019 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect +import threading + +from futurist import periodics +from neutron_lib.api.definitions import external_net +from neutron_lib import constants as n_const +from neutron_lib import context as n_context +from neutron_lib import exceptions as n_exc +from oslo_config import cfg +from oslo_log import log +from oslo_utils import timeutils + +from neutron.common.ovn import constants as ovn_const +from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf +from neutron.db import ovn_hash_ring_db as hash_ring_db +from neutron.db import ovn_revision_numbers_db as revision_numbers_db +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync + + +CONF = cfg.CONF +LOG = log.getLogger(__name__) + +DB_CONSISTENCY_CHECK_INTERVAL = 300 # 5 minutes +INCONSISTENCY_TYPE_CREATE_UPDATE = 'create/update' +INCONSISTENCY_TYPE_DELETE = 'delete' + + +class MaintenanceThread(object): + + def __init__(self): + self._callables = [] + self._thread = None + self._worker = None + + def add_periodics(self, obj): + for name, member in inspect.getmembers(obj): + if periodics.is_periodic(member): + LOG.debug('Periodic task found: %(owner)s.%(member)s', + {'owner': obj.__class__.__name__, 'member': name}) + self._callables.append((member, (), {})) + + def start(self): + if self._thread is None: + self._worker = periodics.PeriodicWorker(self._callables) + self._thread = threading.Thread(target=self._worker.start) + self._thread.daemon = True + self._thread.start() + + def stop(self): + self._worker.stop() + self._worker.wait() + self._thread.join() + self._worker = self._thread = None + + +class DBInconsistenciesPeriodics(object): + + def __init__(self, ovn_client): + self._ovn_client = ovn_client + # FIXME(lucasagomes): We should not be accessing private + # attributes like that, perhaps we should extend the OVNClient + # class and create an interface for the locks ? + self._nb_idl = self._ovn_client._nb_idl + self._idl = self._nb_idl.idl + self._idl.set_lock('ovn_db_inconsistencies_periodics') + self._sync_timer = timeutils.StopWatch() + + self._resources_func_map = { + ovn_const.TYPE_NETWORKS: { + 'neutron_get': self._ovn_client._plugin.get_network, + 'ovn_get': self._nb_idl.get_lswitch, + 'ovn_create': self._ovn_client.create_network, + 'ovn_update': self._ovn_client.update_network, + 'ovn_delete': self._ovn_client.delete_network, + }, + ovn_const.TYPE_PORTS: { + 'neutron_get': self._ovn_client._plugin.get_port, + 'ovn_get': self._nb_idl.get_lswitch_port, + 'ovn_create': self._ovn_client.create_port, + 'ovn_update': self._ovn_client.update_port, + 'ovn_delete': self._ovn_client.delete_port, + }, + ovn_const.TYPE_FLOATINGIPS: { + 'neutron_get': self._ovn_client._l3_plugin.get_floatingip, + 'ovn_get': self._nb_idl.get_floatingip, + 'ovn_create': self._ovn_client.create_floatingip, + 'ovn_update': self._ovn_client.update_floatingip, + 'ovn_delete': self._ovn_client.delete_floatingip, + }, + ovn_const.TYPE_ROUTERS: { + 'neutron_get': self._ovn_client._l3_plugin.get_router, + 'ovn_get': self._nb_idl.get_lrouter, + 'ovn_create': self._ovn_client.create_router, + 'ovn_update': self._ovn_client.update_router, + 'ovn_delete': self._ovn_client.delete_router, + }, + ovn_const.TYPE_SECURITY_GROUPS: { + 'neutron_get': self._ovn_client._plugin.get_security_group, + 'ovn_get': self._get_security_group, + 'ovn_create': self._ovn_client.create_security_group, + 'ovn_delete': self._ovn_client.delete_security_group, + }, + ovn_const.TYPE_SECURITY_GROUP_RULES: { + 'neutron_get': + self._ovn_client._plugin.get_security_group_rule, + 'ovn_get': self._nb_idl.get_acl_by_id, + 'ovn_create': self._ovn_client.create_security_group_rule, + 'ovn_delete': self._ovn_client.delete_security_group_rule, + }, + ovn_const.TYPE_ROUTER_PORTS: { + 'neutron_get': + self._ovn_client._plugin.get_port, + 'ovn_get': self._nb_idl.get_lrouter_port, + 'ovn_create': self._create_lrouter_port, + 'ovn_update': self._ovn_client.update_router_port, + 'ovn_delete': self._ovn_client.delete_router_port, + }, + } + + def _get_security_group(self, uuid): + return (self._nb_idl.get_address_set(uuid) or + self._nb_idl.get_port_group(uuid)) + + @property + def has_lock(self): + return not self._idl.is_lock_contended + + def _fix_create_update(self, context, row): + res_map = self._resources_func_map[row.resource_type] + try: + # Get the latest version of the resource in Neutron DB + n_obj = res_map['neutron_get'](context, row.resource_uuid) + except n_exc.NotFound: + LOG.warning('Skip fixing resource %(res_uuid)s (type: ' + '%(res_type)s). Resource does not exist in Neutron ' + 'database anymore', {'res_uuid': row.resource_uuid, + 'res_type': row.resource_type}) + return + + ovn_obj = res_map['ovn_get'](row.resource_uuid) + + if not ovn_obj: + res_map['ovn_create'](n_obj) + else: + if row.resource_type == ovn_const.TYPE_SECURITY_GROUP_RULES: + LOG.error("SG rule %s found with a revision number while " + "this resource doesn't support updates", + row.resource_uuid) + elif row.resource_type == ovn_const.TYPE_SECURITY_GROUPS: + # In OVN, we don't care about updates to security groups, + # so just bump the revision number to whatever it's + # supposed to be. + revision_numbers_db.bump_revision(context, n_obj, + row.resource_type) + else: + ext_ids = getattr(ovn_obj, 'external_ids', {}) + ovn_revision = int(ext_ids.get( + ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1)) + # If the resource exist in the OVN DB but the revision + # number is different from Neutron DB, updated it. + if ovn_revision != n_obj['revision_number']: + res_map['ovn_update'](n_obj) + else: + # If the resource exist and the revision number + # is equal on both databases just bump the revision on + # the cache table. + revision_numbers_db.bump_revision(context, n_obj, + row.resource_type) + + def _fix_delete(self, context, row): + res_map = self._resources_func_map[row.resource_type] + ovn_obj = res_map['ovn_get'](row.resource_uuid) + if not ovn_obj: + revision_numbers_db.delete_revision( + context, row.resource_uuid, row.resource_type) + else: + res_map['ovn_delete'](row.resource_uuid) + + def _fix_create_update_subnet(self, context, row): + # Get the lasted version of the port in Neutron DB + sn_db_obj = self._ovn_client._plugin.get_subnet( + context, row.resource_uuid) + n_db_obj = self._ovn_client._plugin.get_network( + context, sn_db_obj['network_id']) + + if row.revision_number == ovn_const.INITIAL_REV_NUM: + self._ovn_client.create_subnet(sn_db_obj, n_db_obj) + else: + self._ovn_client.update_subnet(sn_db_obj, n_db_obj) + + # The migration will run just once per neutron-server instance. If the lock + # is held by some other neutron-server instance in the cloud, we'll attempt + # to perform the migration every 10 seconds until completed. + @periodics.periodic(spacing=10, run_immediately=True) + def migrate_to_port_groups(self): + """Perform the migration from Address Sets to Port Groups. """ + # TODO(dalvarez): Remove this in U cycle when we're sure that all + # versions are running using Port Groups (and OVS >= 2.10). + + # If Port Groups are not supported or we've already migrated, we don't + # need to attempt to migrate again. + if (not self._nb_idl.is_port_groups_supported() or + not self._nb_idl.get_address_sets()): + raise periodics.NeverAgain() + + # Only the worker holding a valid lock within OVSDB will perform the + # migration. + if not self.has_lock: + return + + admin_context = n_context.get_admin_context() + nb_sync = ovn_db_sync.OvnNbSynchronizer( + self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl, + None, None) + nb_sync.migrate_to_port_groups(admin_context) + raise periodics.NeverAgain() + + def _log_maintenance_inconsistencies(self, create_update_inconsistencies, + delete_inconsistencies): + if not CONF.debug: + return + + def _log(inconsistencies, type_): + if not inconsistencies: + return + + c = {} + for f in inconsistencies: + if f.resource_type not in c: + c[f.resource_type] = 1 + else: + c[f.resource_type] += 1 + + fail_str = ', '.join('{}={}'.format(k, v) for k, v in c.items()) + LOG.debug('Maintenance task: Number of inconsistencies ' + 'found at %(type_)s: %(fail_str)s', + {'type_': type_, 'fail_str': fail_str}) + + _log(create_update_inconsistencies, INCONSISTENCY_TYPE_CREATE_UPDATE) + _log(delete_inconsistencies, INCONSISTENCY_TYPE_DELETE) + + @periodics.periodic(spacing=DB_CONSISTENCY_CHECK_INTERVAL, + run_immediately=True) + def check_for_inconsistencies(self): + # Only the worker holding a valid lock within OVSDB will run + # this periodic + if not self.has_lock: + return + + admin_context = n_context.get_admin_context() + create_update_inconsistencies = ( + revision_numbers_db.get_inconsistent_resources(admin_context)) + delete_inconsistencies = ( + revision_numbers_db.get_deleted_resources(admin_context)) + if not any([create_update_inconsistencies, delete_inconsistencies]): + LOG.debug('Maintenance task: No inconsistencies found. Skipping') + return + + LOG.debug('Maintenance task: Synchronizing Neutron ' + 'and OVN databases') + self._log_maintenance_inconsistencies(create_update_inconsistencies, + delete_inconsistencies) + self._sync_timer.restart() + + dbg_log_msg = ('Maintenance task: Fixing resource %(res_uuid)s ' + '(type: %(res_type)s) at %(type_)s') + # Fix the create/update resources inconsistencies + for row in create_update_inconsistencies: + LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid, + 'res_type': row.resource_type, + 'type_': INCONSISTENCY_TYPE_CREATE_UPDATE}) + try: + # NOTE(lucasagomes): The way to fix subnets is bit + # different than other resources. A subnet in OVN language + # is just a DHCP rule but, this rule only exist if the + # subnet in Neutron has the "enable_dhcp" attribute set + # to True. So, it's possible to have a consistent subnet + # resource even when it does not exist in the OVN database. + if row.resource_type == ovn_const.TYPE_SUBNETS: + self._fix_create_update_subnet(admin_context, row) + else: + self._fix_create_update(admin_context, row) + except Exception: + LOG.exception('Maintenance task: Failed to fix resource ' + '%(res_uuid)s (type: %(res_type)s)', + {'res_uuid': row.resource_uuid, + 'res_type': row.resource_type}) + + # Fix the deleted resources inconsistencies + for row in delete_inconsistencies: + LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid, + 'res_type': row.resource_type, + 'type_': INCONSISTENCY_TYPE_DELETE}) + try: + if row.resource_type == ovn_const.TYPE_SUBNETS: + self._ovn_client.delete_subnet(row.resource_uuid) + else: + self._fix_delete(admin_context, row) + except Exception: + LOG.exception('Maintenance task: Failed to fix deleted ' + 'resource %(res_uuid)s (type: %(res_type)s)', + {'res_uuid': row.resource_uuid, + 'res_type': row.resource_type}) + + self._sync_timer.stop() + LOG.info('Maintenance task: Synchronization finished ' + '(took %.2f seconds)', self._sync_timer.elapsed()) + + def _create_lrouter_port(self, port): + admin_context = n_context.get_admin_context() + router_id = port['device_id'] + self._ovn_client._l3_plugin.add_router_interface( + admin_context, router_id, {'port_id': port['id']}, may_exist=True) + + def _check_subnet_global_dhcp_opts(self): + inconsistent_subnets = [] + admin_context = n_context.get_admin_context() + subnet_filter = {'enable_dhcp': [True]} + neutron_subnets = self._ovn_client._plugin.get_subnets( + admin_context, subnet_filter) + global_v4_opts = ovn_conf.get_global_dhcpv4_opts() + global_v6_opts = ovn_conf.get_global_dhcpv6_opts() + LOG.debug('Checking %s subnets for global DHCP option consistency', + len(neutron_subnets)) + for subnet in neutron_subnets: + ovn_dhcp_opts = self._nb_idl.get_subnet_dhcp_options( + subnet['id'])['subnet'] + inconsistent_opts = [] + if ovn_dhcp_opts: + if subnet['ip_version'] == n_const.IP_VERSION_4: + for opt, value in global_v4_opts.items(): + if value != ovn_dhcp_opts['options'].get(opt, None): + inconsistent_opts.append(opt) + if subnet['ip_version'] == n_const.IP_VERSION_6: + for opt, value in global_v6_opts.items(): + if value != ovn_dhcp_opts['options'].get(opt, None): + inconsistent_opts.append(opt) + if inconsistent_opts: + LOG.debug('Subnet %s has inconsistent DHCP opts: %s', + subnet['id'], inconsistent_opts) + inconsistent_subnets.append(subnet) + return inconsistent_subnets + + # A static spacing value is used here, but this method will only run + # once per lock due to the use of periodics.NeverAgain(). + @periodics.periodic(spacing=600, + run_immediately=True) + def check_global_dhcp_opts(self): + # This periodic task is included in DBInconsistenciesPeriodics since + # it uses the lock to ensure only one worker is executing + if not self.has_lock: + return + if (not ovn_conf.get_global_dhcpv4_opts() and + not ovn_conf.get_global_dhcpv6_opts()): + # No need to scan the subnets if the settings are unset. + raise periodics.NeverAgain() + LOG.debug('Maintenance task: Checking DHCP options on subnets') + self._sync_timer.restart() + fix_subnets = self._check_subnet_global_dhcp_opts() + if fix_subnets: + admin_context = n_context.get_admin_context() + LOG.debug('Triggering update for %s subnets', len(fix_subnets)) + for subnet in fix_subnets: + neutron_net = self._ovn_client._plugin.get_network( + admin_context, subnet['network_id']) + try: + self._ovn_client.update_subnet(subnet, neutron_net) + except Exception: + LOG.exception('Failed to update subnet %s', + subnet['id']) + + self._sync_timer.stop() + LOG.info('Maintenance task: DHCP options check finished ' + '(took %.2f seconds)', self._sync_timer.elapsed()) + + raise periodics.NeverAgain() + + # A static spacing value is used here, but this method will only run + # once per lock due to the use of periodics.NeverAgain(). + @periodics.periodic(spacing=1800, run_immediately=True) + def check_metadata_ports(self): + # If OVN metadata is disabled do not run this task again + if not ovn_conf.is_ovn_metadata_enabled(): + raise periodics.NeverAgain() + + # Make sure that only one worker is executing this + if not self.has_lock: + return + + admin_context = n_context.get_admin_context() + for n in self._ovn_client._plugin.get_networks(admin_context): + self._ovn_client.create_metadata_port(admin_context, n) + + raise periodics.NeverAgain() + + # TODO(lucasagomes): Remove this in the U cycle + # A static spacing value is used here, but this method will only run + # once per lock due to the use of periodics.NeverAgain(). + @periodics.periodic(spacing=600, run_immediately=True) + def check_for_port_security_unknown_address(self): + + if not self.has_lock: + return + + for port in self._nb_idl.lsp_list().execute(check_error=True): + + if port.type == ovn_const.LSP_TYPE_LOCALNET: + continue + + addresses = port.addresses + type_ = port.type.strip() + if not port.port_security: + if not type_ and ovn_const.UNKNOWN_ADDR not in addresses: + addresses.append(ovn_const.UNKNOWN_ADDR) + elif type_ and ovn_const.UNKNOWN_ADDR in addresses: + addresses.remove(ovn_const.UNKNOWN_ADDR) + else: + if type_ and ovn_const.UNKNOWN_ADDR in addresses: + addresses.remove(ovn_const.UNKNOWN_ADDR) + elif not type_ and ovn_const.UNKNOWN_ADDR in addresses: + addresses.remove(ovn_const.UNKNOWN_ADDR) + + self._nb_idl.lsp_set_addresses( + port.name, addresses=addresses).execute(check_error=True) + + raise periodics.NeverAgain() + + # A static spacing value is used here, but this method will only run + # once per lock due to the use of periodics.NeverAgain(). + @periodics.periodic(spacing=600, run_immediately=True) + def check_for_fragmentation_support(self): + if not self.has_lock: + return + + context = n_context.get_admin_context() + for net in self._ovn_client._plugin.get_networks( + context, {external_net.EXTERNAL: [True]}): + self._ovn_client.set_gateway_mtu(context, net) + + raise periodics.NeverAgain() + + +class HashRingHealthCheckPeriodics(object): + + def __init__(self, group): + self._group = group + self.ctx = n_context.get_admin_context() + + @periodics.periodic(spacing=ovn_const.HASH_RING_TOUCH_INTERVAL) + def touch_hash_ring_nodes(self): + # NOTE(lucasagomes): Note that we do not rely on the OVSDB lock + # here because we want the maintenance tasks from each instance to + # execute this task. + hash_ring_db.touch_nodes_from_host(self.ctx, self._group) diff --git a/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py new file mode 100644 index 00000000000..1fc4c90ccae --- /dev/null +++ b/neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py @@ -0,0 +1,1244 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +from datetime import datetime +import itertools + +from eventlet import greenthread +from neutron_lib.api.definitions import l3 +from neutron_lib.api.definitions import provider_net as pnet +from neutron_lib import constants +from neutron_lib import context +from neutron_lib import exceptions as n_exc +from neutron_lib.plugins import constants as plugin_constants +from neutron_lib.plugins import directory +from neutron_lib.utils import helpers +from oslo_log import log +import six + +from neutron.common.ovn import acl as acl_utils +from neutron.common.ovn import constants as ovn_const +from neutron.common.ovn import utils +from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client +from neutron.services.segments import db as segments_db + + +LOG = log.getLogger(__name__) + +SYNC_MODE_OFF = 'off' +SYNC_MODE_LOG = 'log' +SYNC_MODE_REPAIR = 'repair' + + +@six.add_metaclass(abc.ABCMeta) +class OvnDbSynchronizer(object): + + def __init__(self, core_plugin, ovn_api, ovn_driver): + self.ovn_driver = ovn_driver + self.ovn_api = ovn_api + self.core_plugin = core_plugin + + def sync(self, delay_seconds=10): + self._gt = greenthread.spawn_after_local(delay_seconds, self.do_sync) + + @abc.abstractmethod + def do_sync(self): + """Method to sync the OVN DB.""" + + def stop(self): + try: + self._gt.kill() + except AttributeError: + # Haven't started syncing + pass + + +class OvnNbSynchronizer(OvnDbSynchronizer): + """Synchronizer class for NB.""" + + def __init__(self, core_plugin, ovn_api, sb_ovn, mode, ovn_driver): + super(OvnNbSynchronizer, self).__init__( + core_plugin, ovn_api, ovn_driver) + self.mode = mode + self.l3_plugin = directory.get_plugin(plugin_constants.L3) + self._ovn_client = ovn_client.OVNClient(ovn_api, sb_ovn) + + def stop(self): + if utils.is_ovn_l3(self.l3_plugin): + self.l3_plugin._ovn.ovsdb_connection.stop() + self.l3_plugin._sb_ovn.ovsdb_connection.stop() + super(OvnNbSynchronizer, self).stop() + + def do_sync(self): + if self.mode == SYNC_MODE_OFF: + LOG.debug("Neutron sync mode is off") + return + LOG.debug("Starting OVN-Northbound DB sync process") + + ctx = context.get_admin_context() + + self.sync_address_sets(ctx) + self.sync_port_groups(ctx) + self.sync_networks_ports_and_dhcp_opts(ctx) + self.sync_port_dns_records(ctx) + self.sync_acls(ctx) + self.sync_routers_and_rports(ctx) + + def _create_port_in_ovn(self, ctx, port): + # Remove any old ACLs for the port to avoid creating duplicate ACLs. + self.ovn_api.delete_acl( + utils.ovn_name(port['network_id']), + port['id']).execute(check_error=True) + + # Create the port in OVN. This will include ACL and Address Set + # updates as needed. + self._ovn_client.create_port(port) + + def remove_common_acls(self, neutron_acls, nb_acls): + """Take out common acls of the two acl dictionaries. + + @param neutron_acls: neutron dictionary of port vs acls + @type neutron_acls: {} + @param nb_acls: nb dictionary of port vs acls + @type nb_acls: {} + @return: Nothing, original dictionary modified + """ + for port in neutron_acls.keys(): + for acl in list(neutron_acls[port]): + if port in nb_acls and acl in nb_acls[port]: + neutron_acls[port].remove(acl) + nb_acls[port].remove(acl) + + def compute_address_set_difference(self, neutron_sgs, nb_sgs): + neutron_sgs_name_set = set(neutron_sgs.keys()) + nb_sgs_name_set = set(nb_sgs.keys()) + sgnames_to_add = list(neutron_sgs_name_set - nb_sgs_name_set) + sgnames_to_delete = list(nb_sgs_name_set - neutron_sgs_name_set) + sgs_common = list(neutron_sgs_name_set & nb_sgs_name_set) + sgs_to_update = {} + for sg_name in sgs_common: + neutron_addr_set = set(neutron_sgs[sg_name]['addresses']) + nb_addr_set = set(nb_sgs[sg_name]['addresses']) + addrs_to_add = list(neutron_addr_set - nb_addr_set) + addrs_to_delete = list(nb_addr_set - neutron_addr_set) + if addrs_to_add or addrs_to_delete: + sgs_to_update[sg_name] = {'name': sg_name, + 'addrs_add': addrs_to_add, + 'addrs_remove': addrs_to_delete} + return sgnames_to_add, sgnames_to_delete, sgs_to_update + + def get_acls(self, context): + """create the list of ACLS in OVN. + + @param context: neutron_lib.context + @type context: object of type neutron_lib.context.Context + @var lswitch_names: List of lswitch names + @var acl_list: List of NB acls + @var acl_list_dict: Dictionary of acl-lists based on lport as key + @return: acl_list-dict + """ + lswitch_names = set([]) + for network in self.core_plugin.get_networks(context): + lswitch_names.add(network['id']) + acl_dict, ignore1, ignore2 = ( + self.ovn_api.get_acls_for_lswitches(lswitch_names)) + acl_list = list(itertools.chain(*acl_dict.values())) + acl_list_dict = {} + for acl in acl_list: + acl = acl_utils.filter_acl_dict( + acl, extra_fields=['lport', 'lswitch']) + key = acl['lport'] + if key in acl_list_dict: + acl_list_dict[key].append(acl) + else: + acl_list_dict[key] = list([acl]) + return acl_list_dict + + def get_address_sets(self): + return self.ovn_api.get_address_sets() + + def sync_port_groups(self, ctx): + """Sync Port Groups between neutron and NB. + + @param ctx: neutron_lib.context + @type ctx: object of type neutron_lib.context.Context + """ + if not self.ovn_api.is_port_groups_supported(): + return + + neutron_sgs = {} + neutron_pgs = set() + with ctx.session.begin(subtransactions=True): + for sg in self.core_plugin.get_security_groups(ctx): + pg_name = utils.ovn_port_group_name(sg['id']) + neutron_pgs.add(pg_name) + neutron_sgs[pg_name] = sg['id'] + neutron_pgs.add(ovn_const.OVN_DROP_PORT_GROUP_NAME) + + ovn_pgs = set() + port_groups = self.ovn_api.db_list_rows('Port_Group').execute() or [] + for pg in port_groups: + ovn_pgs.add(pg.name) + + add_pgs = neutron_pgs.difference(ovn_pgs) + remove_pgs = ovn_pgs.difference(neutron_pgs) + + LOG.debug('Port Groups added %d, removed %d', + len(add_pgs), len(remove_pgs)) + + if self.mode == SYNC_MODE_REPAIR: + LOG.debug('Port-Group-SYNC: transaction started @ %s', + str(datetime.now())) + if add_pgs: + db_ports = self.core_plugin.get_ports(ctx) + ovn_ports = set(p.name for p in + self.ovn_api.lsp_list().execute()) + with self.ovn_api.transaction(check_error=True) as txn: + pg = ovn_const.OVN_DROP_PORT_GROUP_NAME + # Process default drop port group first + if pg in add_pgs: + txn.add(self.ovn_api.pg_add(name=pg, acls=[])) + add_pgs.remove(pg) + # Add ports to the drop port group. Only add those that + # already exists in OVN. The rest will be added during the + # ports sync operation later. + for n_port in db_ports: + if ((n_port['security_groups'] or + n_port['port_security_enabled']) and + n_port['id'] in ovn_ports): + txn.add(self.ovn_api.pg_add_ports( + pg, n_port['id'])) + + for pg in add_pgs: + # If it's a security group PG, add the ext id + ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: neutron_sgs[pg]} + txn.add(self.ovn_api.pg_add(name=pg, acls=[], + external_ids=ext_ids)) + # Add the ports belonging to the SG to this port group + for n_port in db_ports: + if (neutron_sgs[pg] in n_port['security_groups'] and + n_port['id'] in ovn_ports): + txn.add(self.ovn_api.pg_add_ports( + pg, n_port['id'])) + for pg in remove_pgs: + txn.add(self.ovn_api.pg_del(pg)) + LOG.debug('Port-Group-SYNC: transaction finished @ %s', + str(datetime.now())) + + def sync_address_sets(self, ctx): + """Sync Address Sets between neutron and NB. + + @param ctx: neutron_lib.context + @type ctx: object of type neutron_lib.context.Context + @var db_ports: List of ports from neutron DB + """ + LOG.debug('Address-Set-SYNC: started @ %s', str(datetime.now())) + + sgnames_to_add = sgnames_to_delete = [] + sgs_to_update = {} + nb_sgs = self.get_address_sets() + + if self.ovn_api.is_port_groups_supported(): + # If Port Groups are supported, we just need to delete all Address + # Sets from NB database. + sgnames_to_delete = nb_sgs.keys() + else: + neutron_sgs = {} + with ctx.session.begin(subtransactions=True): + db_sgs = self.core_plugin.get_security_groups(ctx) + db_ports = self.core_plugin.get_ports(ctx) + + for sg in db_sgs: + for ip_version in ['ip4', 'ip6']: + name = utils.ovn_addrset_name(sg['id'], ip_version) + neutron_sgs[name] = { + 'name': name, 'addresses': [], + 'external_ids': { + ovn_const.OVN_SG_EXT_ID_KEY: sg['id']}} + + for port in db_ports: + sg_ids = utils.get_lsp_security_groups(port) + if port.get('fixed_ips') and sg_ids: + addresses = acl_utils.acl_port_ips(port) + for sg_id in sg_ids: + for ip_version in addresses: + name = utils.ovn_addrset_name(sg_id, ip_version) + neutron_sgs[name]['addresses'].extend( + addresses[ip_version]) + + sgnames_to_add, sgnames_to_delete, sgs_to_update = ( + self.compute_address_set_difference(neutron_sgs, nb_sgs)) + + LOG.debug('Address_Sets added %d, removed %d, updated %d', + len(sgnames_to_add), len(sgnames_to_delete), + len(sgs_to_update)) + + if self.mode == SYNC_MODE_REPAIR: + LOG.debug('Address-Set-SYNC: transaction started @ %s', + str(datetime.now())) + with self.ovn_api.transaction(check_error=True) as txn: + for sgname in sgnames_to_add: + sg = neutron_sgs[sgname] + txn.add(self.ovn_api.create_address_set(**sg)) + for sgname, sg in sgs_to_update.items(): + txn.add(self.ovn_api.update_address_set(**sg)) + for sgname in sgnames_to_delete: + txn.add(self.ovn_api.delete_address_set(name=sgname)) + LOG.debug('Address-Set-SYNC: transaction finished @ %s', + str(datetime.now())) + + def _get_acls_from_port_groups(self): + ovn_acls = [] + port_groups = self.ovn_api.db_list_rows('Port_Group').execute() + for pg in port_groups: + acls = getattr(pg, 'acls', []) + for acl in acls: + acl_string = {} + acl_string['port_group'] = pg.name + for acl_key in getattr(acl, "_data", {}): + acl_string[acl_key] = getattr(acl, acl_key) + acl_string.pop('meter') + acl_string.pop('external_ids') + ovn_acls.append(acl_string) + return ovn_acls + + def _sync_acls_port_groups(self, ctx): + # If Port Groups are supported, the ACLs in the system will equal + # the number of SG rules plus the default drop rules as OVN would + # allow all traffic by default if those are not added. + neutron_acls = [] + for sgr in self.core_plugin.get_security_group_rules(ctx): + pg_name = utils.ovn_port_group_name(sgr['security_group_id']) + neutron_acls.append(acl_utils._add_sg_rule_acl_for_port_group( + pg_name, sgr, self.ovn_api)) + neutron_acls += acl_utils.add_acls_for_drop_port_group( + ovn_const.OVN_DROP_PORT_GROUP_NAME) + + ovn_acls = self._get_acls_from_port_groups() + + # We need to remove also all the ACLs applied to Logical Switches + def get_num_acls(ovn_acls): + return len([item for sublist in ovn_acls for item in sublist[1]]) + + ovn_acls_from_ls = [(row.name, row.acls) for row in ( + self.ovn_api._tables['Logical_Switch'].rows.values())] + num_acls_to_remove_from_ls = get_num_acls(ovn_acls_from_ls) + + # Remove the common ones + for na in list(neutron_acls): + for ovn_a in ovn_acls: + if all(item in na.items() for item in ovn_a.items()): + neutron_acls.remove(na) + ovn_acls.remove(ovn_a) + break + + num_acls_to_add = len(neutron_acls) + num_acls_to_remove = len(ovn_acls) + num_acls_to_remove_from_ls + if 0 != num_acls_to_add or 0 != num_acls_to_remove: + LOG.warning('ACLs-to-be-added %(add)d ' + 'ACLs-to-be-removed %(remove)d', + {'add': num_acls_to_add, + 'remove': num_acls_to_remove}) + + if self.mode == SYNC_MODE_REPAIR: + with self.ovn_api.transaction(check_error=True) as txn: + for acla in neutron_acls: + LOG.warning('ACL found in Neutron but not in ' + 'OVN DB for port group %s', acla['port_group']) + txn.add(self.ovn_api.pg_acl_add(**acla)) + + with self.ovn_api.transaction(check_error=True) as txn: + for aclr in ovn_acls: + LOG.warning('ACLs found in OVN DB but not in ' + 'Neutron for port group %s', + aclr['port_group']) + txn.add(self.ovn_api.pg_acl_del(aclr['port_group'], + aclr['direction'], + aclr['priority'], + aclr['match'])) + for aclr in ovn_acls_from_ls: + # Remove all the ACLs from any Logical Switch if they have + # any. Elements are (lswitch_name, list_of_acls). + if len(aclr[1]) > 0: + LOG.warning('Removing ACLs from OVN from Logical ' + 'Switch %s', aclr[0]) + txn.add(self.ovn_api.acl_del(aclr[0])) + + def _sync_acls(self, ctx): + """Sync ACLs between neutron and NB when not using Port Groups. + + @param ctx: neutron_lib.context + @type ctx: object of type neutron_lib.context.Context + @var db_ports: List of ports from neutron DB + @var neutron_acls: neutron dictionary of port + vs list-of-acls + @var nb_acls: NB dictionary of port + vs list-of-acls + @var subnet_cache: cache for subnets + @return: Nothing + """ + db_ports = {} + for port in self.core_plugin.get_ports(ctx): + db_ports[port['id']] = port + + sg_cache = {} + subnet_cache = {} + neutron_acls = {} + for port_id, port in db_ports.items(): + if utils.get_lsp_security_groups(port): + acl_list = acl_utils.add_acls(self.core_plugin, + ctx, + port, + sg_cache, + subnet_cache, + self.ovn_api) + if port_id in neutron_acls: + neutron_acls[port_id].extend(acl_list) + else: + neutron_acls[port_id] = acl_list + + nb_acls = self.get_acls(ctx) + + self.remove_common_acls(neutron_acls, nb_acls) + + num_acls_to_add = len(list(itertools.chain(*neutron_acls.values()))) + num_acls_to_remove = len(list(itertools.chain(*nb_acls.values()))) + if 0 != num_acls_to_add or 0 != num_acls_to_remove: + LOG.warning('ACLs-to-be-added %(add)d ' + 'ACLs-to-be-removed %(remove)d', + {'add': num_acls_to_add, + 'remove': num_acls_to_remove}) + + if self.mode == SYNC_MODE_REPAIR: + with self.ovn_api.transaction(check_error=True) as txn: + for acla in list(itertools.chain(*neutron_acls.values())): + LOG.warning('ACL found in Neutron but not in ' + 'OVN DB for port %s', acla['lport']) + txn.add(self.ovn_api.add_acl(**acla)) + + with self.ovn_api.transaction(check_error=True) as txn: + for aclr in list(itertools.chain(*nb_acls.values())): + # Both lswitch and lport aren't needed within the ACL. + lswitchr = aclr.pop('lswitch').replace('neutron-', '') + lportr = aclr.pop('lport') + aclr_dict = {lportr: aclr} + LOG.warning('ACLs found in OVN DB but not in ' + 'Neutron for port %s', lportr) + txn.add(self.ovn_api.update_acls( + [lswitchr], + [lportr], + aclr_dict, + need_compare=False, + is_add_acl=False + )) + + def sync_acls(self, ctx): + """Sync ACLs between neutron and NB. + + @param ctx: neutron_lib.context + @type ctx: object of type neutron_lib.context.Context + @return: Nothing + """ + LOG.debug('ACL-SYNC: started @ %s', str(datetime.now())) + + if self.ovn_api.is_port_groups_supported(): + self._sync_acls_port_groups(ctx) + else: + self._sync_acls(ctx) + + LOG.debug('ACL-SYNC: finished @ %s', str(datetime.now())) + + def _calculate_fips_differences(self, ovn_fips, db_fips): + to_add = [] + to_remove = [] + for db_fip in db_fips: + for ovn_fip in ovn_fips: + if (ovn_fip['logical_ip'] == db_fip['fixed_ip_address'] and + ovn_fip['external_ip'] == db_fip['floating_ip_address']): + break + else: + to_add.append(db_fip) + + for ovn_fip in ovn_fips: + for db_fip in db_fips: + if (ovn_fip['logical_ip'] == db_fip['fixed_ip_address'] and + ovn_fip['external_ip'] == db_fip['floating_ip_address']): + break + else: + to_remove.append(ovn_fip) + + return to_add, to_remove + + def sync_routers_and_rports(self, ctx): + """Sync Routers between neutron and NB. + + @param ctx: neutron_lib.context + @type ctx: object of type neutron_lib.context.Context + @var db_routers: List of Routers from neutron DB + @var db_router_ports: List of Router ports from neutron DB + @var lrouters: NB dictionary of logical routers and + the corresponding logical router ports. + vs list-of-acls + @var del_lrouters_list: List of Routers that need to be + deleted from NB + @var del_lrouter_ports_list: List of Router ports that need to be + deleted from NB + @return: Nothing + """ + if not utils.is_ovn_l3(self.l3_plugin): + LOG.debug("OVN L3 mode is disabled, skipping " + "sync routers and router ports") + return + + LOG.debug('OVN-NB Sync Routers and Router ports started @ %s', + str(datetime.now())) + + db_routers = {} + db_extends = {} + db_router_ports = {} + for router in self.l3_plugin.get_routers(ctx): + db_routers[router['id']] = router + db_extends[router['id']] = {} + db_extends[router['id']]['routes'] = [] + db_extends[router['id']]['snats'] = [] + db_extends[router['id']]['fips'] = [] + if not router.get(l3.EXTERNAL_GW_INFO): + continue + gateways = self._ovn_client._get_gw_info(ctx, router) + for gw_info in gateways: + prefix = (constants.IPv4_ANY if + gw_info.ip_version == constants.IP_VERSION_4 else + constants.IPv6_ANY) + if gw_info.gateway_ip: + db_extends[router['id']]['routes'].append( + {'destination': prefix, + 'nexthop': gw_info.gateway_ip}) + if gw_info.ip_version == constants.IP_VERSION_6: + continue + if gw_info.router_ip and utils.is_snat_enabled(router): + networks = ( + self._ovn_client._get_v4_network_of_all_router_ports( + ctx, router['id'])) + for network in networks: + db_extends[router['id']]['snats'].append({ + 'logical_ip': network, + 'external_ip': gw_info.router_ip, + 'type': 'snat'}) + + fips = self.l3_plugin.get_floatingips( + ctx, {'router_id': list(db_routers.keys())}) + for fip in fips: + db_extends[fip['router_id']]['fips'].append(fip) + interfaces = self.l3_plugin._get_sync_interfaces( + ctx, list(db_routers.keys()), + [constants.DEVICE_OWNER_ROUTER_INTF, + constants.DEVICE_OWNER_ROUTER_GW, + constants.DEVICE_OWNER_DVR_INTERFACE, + constants.DEVICE_OWNER_ROUTER_HA_INTF, + constants.DEVICE_OWNER_HA_REPLICATED_INT]) + for interface in interfaces: + db_router_ports[interface['id']] = interface + + lrouters = self.ovn_api.get_all_logical_routers_with_rports() + + del_lrouters_list = [] + del_lrouter_ports_list = [] + update_sroutes_list = [] + update_lrport_list = [] + update_snats_list = [] + update_fips_list = [] + for lrouter in lrouters: + if lrouter['name'] in db_routers: + for lrport, lrport_nets in lrouter['ports'].items(): + if lrport in db_router_ports: + # We dont have to check for the networks and + # ipv6_ra_configs values. Lets add it to the + # update_lrport_list. If they are in sync, then + # update_router_port will be a no-op. + update_lrport_list.append(db_router_ports[lrport]) + del db_router_ports[lrport] + else: + del_lrouter_ports_list.append( + {'port': lrport, 'lrouter': lrouter['name']}) + if 'routes' in db_routers[lrouter['name']]: + db_routes = db_routers[lrouter['name']]['routes'] + else: + db_routes = [] + if 'routes' in db_extends[lrouter['name']]: + db_routes.extend(db_extends[lrouter['name']]['routes']) + + ovn_routes = lrouter['static_routes'] + add_routes, del_routes = helpers.diff_list_of_dict( + ovn_routes, db_routes) + update_sroutes_list.append({'id': lrouter['name'], + 'add': add_routes, + 'del': del_routes}) + ovn_fips = lrouter['dnat_and_snats'] + db_fips = db_extends[lrouter['name']]['fips'] + add_fips, del_fips = self._calculate_fips_differences( + ovn_fips, db_fips) + update_fips_list.append({'id': lrouter['name'], + 'add': add_fips, + 'del': del_fips}) + ovn_nats = lrouter['snats'] + db_snats = db_extends[lrouter['name']]['snats'] + add_snats, del_snats = helpers.diff_list_of_dict( + ovn_nats, db_snats) + update_snats_list.append({'id': lrouter['name'], + 'add': add_snats, + 'del': del_snats}) + del db_routers[lrouter['name']] + else: + del_lrouters_list.append(lrouter) + + for r_id, router in db_routers.items(): + LOG.warning("Router found in Neutron but not in " + "OVN DB, router id=%s", router['id']) + if self.mode == SYNC_MODE_REPAIR: + try: + LOG.warning("Creating the router %s in OVN NB DB", + router['id']) + self._ovn_client.create_router( + router, add_external_gateway=False) + if 'routes' in router: + update_sroutes_list.append( + {'id': router['id'], 'add': router['routes'], + 'del': []}) + if 'routes' in db_extends[router['id']]: + update_sroutes_list.append( + {'id': router['id'], + 'add': db_extends[router['id']]['routes'], + 'del': []}) + if 'snats' in db_extends[router['id']]: + update_snats_list.append( + {'id': router['id'], + 'add': db_extends[router['id']]['snats'], + 'del': []}) + if 'fips' in db_extends[router['id']]: + update_fips_list.append( + {'id': router['id'], + 'add': db_extends[router['id']]['fips'], + 'del': []}) + except RuntimeError: + LOG.warning("Create router in OVN NB failed for router %s", + router['id']) + + for rp_id, rrport in db_router_ports.items(): + LOG.warning("Router Port found in Neutron but not in OVN " + "DB, router port_id=%s", rrport['id']) + if self.mode == SYNC_MODE_REPAIR: + try: + LOG.warning("Creating the router port %s in OVN NB DB", + rrport['id']) + self._ovn_client._create_lrouter_port( + rrport['device_id'], rrport) + except RuntimeError: + LOG.warning("Create router port in OVN " + "NB failed for router port %s", rrport['id']) + + for rport in update_lrport_list: + LOG.warning("Router Port port_id=%s needs to be updated " + "for networks changed", + rport['id']) + if self.mode == SYNC_MODE_REPAIR: + try: + LOG.warning( + "Updating networks on router port %s in OVN NB DB", + rport['id']) + self._ovn_client.update_router_port(rport) + except RuntimeError: + LOG.warning("Update router port networks in OVN " + "NB failed for router port %s", rport['id']) + + with self.ovn_api.transaction(check_error=True) as txn: + for lrouter in del_lrouters_list: + LOG.warning("Router found in OVN but not in " + "Neutron, router id=%s", lrouter['name']) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning("Deleting the router %s from OVN NB DB", + lrouter['name']) + txn.add(self.ovn_api.delete_lrouter( + utils.ovn_name(lrouter['name']))) + + for lrport_info in del_lrouter_ports_list: + LOG.warning("Router Port found in OVN but not in " + "Neutron, port_id=%s", lrport_info['port']) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning("Deleting the port %s from OVN NB DB", + lrport_info['port']) + txn.add(self.ovn_api.delete_lrouter_port( + utils.ovn_lrouter_port_name(lrport_info['port']), + utils.ovn_name(lrport_info['lrouter']), + if_exists=False)) + for sroute in update_sroutes_list: + if sroute['add']: + LOG.warning("Router %(id)s static routes %(route)s " + "found in Neutron but not in OVN", + {'id': sroute['id'], 'route': sroute['add']}) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning("Add static routes %s to OVN NB DB", + sroute['add']) + for route in sroute['add']: + txn.add(self.ovn_api.add_static_route( + utils.ovn_name(sroute['id']), + ip_prefix=route['destination'], + nexthop=route['nexthop'])) + if sroute['del']: + LOG.warning("Router %(id)s static routes %(route)s " + "found in OVN but not in Neutron", + {'id': sroute['id'], 'route': sroute['del']}) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning("Delete static routes %s from OVN NB DB", + sroute['del']) + for route in sroute['del']: + txn.add(self.ovn_api.delete_static_route( + utils.ovn_name(sroute['id']), + ip_prefix=route['destination'], + nexthop=route['nexthop'])) + for fip in update_fips_list: + if fip['del']: + LOG.warning("Router %(id)s floating ips %(fip)s " + "found in OVN but not in Neutron", + {'id': fip['id'], 'fip': fip['del']}) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning( + "Delete floating ips %s from OVN NB DB", + fip['del']) + for nat in fip['del']: + self._ovn_client._delete_floatingip( + nat, utils.ovn_name(fip['id']), txn=txn) + if fip['add']: + LOG.warning("Router %(id)s floating ips %(fip)s " + "found in Neutron but not in OVN", + {'id': fip['id'], 'fip': fip['add']}) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning("Add floating ips %s to OVN NB DB", + fip['add']) + for nat in fip['add']: + self._ovn_client._create_or_update_floatingip( + nat, txn=txn) + for snat in update_snats_list: + if snat['del']: + LOG.warning("Router %(id)s snat %(snat)s " + "found in OVN but not in Neutron", + {'id': snat['id'], 'snat': snat['del']}) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning("Delete snats %s from OVN NB DB", + snat['del']) + for nat in snat['del']: + txn.add(self.ovn_api.delete_nat_rule_in_lrouter( + utils.ovn_name(snat['id']), + logical_ip=nat['logical_ip'], + external_ip=nat['external_ip'], + type='snat')) + if snat['add']: + LOG.warning("Router %(id)s snat %(snat)s " + "found in Neutron but not in OVN", + {'id': snat['id'], 'snat': snat['add']}) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning("Add snats %s to OVN NB DB", + snat['add']) + for nat in snat['add']: + txn.add(self.ovn_api.add_nat_rule_in_lrouter( + utils.ovn_name(snat['id']), + logical_ip=nat['logical_ip'], + external_ip=nat['external_ip'], + type='snat')) + LOG.debug('OVN-NB Sync routers and router ports finished %s', + str(datetime.now())) + + def _sync_subnet_dhcp_options(self, ctx, db_networks, + ovn_subnet_dhcp_options): + LOG.debug('OVN-NB Sync DHCP options for Neutron subnets started') + + db_subnets = {} + filters = {'enable_dhcp': [1]} + for subnet in self.core_plugin.get_subnets(ctx, filters=filters): + if (subnet['ip_version'] == constants.IP_VERSION_6 and + subnet.get('ipv6_address_mode') == constants.IPV6_SLAAC): + continue + db_subnets[subnet['id']] = subnet + + del_subnet_dhcp_opts_list = [] + for subnet_id, ovn_dhcp_opts in ovn_subnet_dhcp_options.items(): + if subnet_id in db_subnets: + network = db_networks[utils.ovn_name( + db_subnets[subnet_id]['network_id'])] + if constants.IP_VERSION_6 == db_subnets[subnet_id][ + 'ip_version']: + server_mac = ovn_dhcp_opts['options'].get('server_id') + else: + server_mac = ovn_dhcp_opts['options'].get('server_mac') + dhcp_options = self._ovn_client._get_ovn_dhcp_options( + db_subnets[subnet_id], network, server_mac=server_mac) + # Verify that the cidr and options are also in sync. + if dhcp_options['cidr'] == ovn_dhcp_opts['cidr'] and ( + dhcp_options['options'] == ovn_dhcp_opts['options']): + del db_subnets[subnet_id] + else: + db_subnets[subnet_id]['ovn_dhcp_options'] = dhcp_options + else: + del_subnet_dhcp_opts_list.append(ovn_dhcp_opts) + + for subnet_id, subnet in db_subnets.items(): + LOG.warning('DHCP options for subnet %s is present in ' + 'Neutron but out of sync for OVN', subnet_id) + if self.mode == SYNC_MODE_REPAIR: + try: + LOG.debug('Adding/Updating DHCP options for subnet %s in ' + ' OVN NB DB', subnet_id) + network = db_networks[utils.ovn_name(subnet['network_id'])] + # _ovn_client._add_subnet_dhcp_options doesn't create + # a new row in DHCP_Options if the row already exists. + # See commands.AddDHCPOptionsCommand. + self._ovn_client._add_subnet_dhcp_options( + subnet, network, subnet.get('ovn_dhcp_options')) + except RuntimeError: + LOG.warning('Adding/Updating DHCP options for subnet ' + '%s failed in OVN NB DB', subnet_id) + + txn_commands = [] + for dhcp_opt in del_subnet_dhcp_opts_list: + LOG.warning('Out of sync subnet DHCP options for subnet %s ' + 'found in OVN NB DB which needs to be deleted', + dhcp_opt['external_ids']['subnet_id']) + if self.mode == SYNC_MODE_REPAIR: + LOG.debug('Deleting subnet DHCP options for subnet %s ', + dhcp_opt['external_ids']['subnet_id']) + txn_commands.append(self.ovn_api.delete_dhcp_options( + dhcp_opt['uuid'])) + + if txn_commands: + with self.ovn_api.transaction(check_error=True) as txn: + for cmd in txn_commands: + txn.add(cmd) + LOG.debug('OVN-NB Sync DHCP options for Neutron subnets finished') + + def _sync_port_dhcp_options(self, ctx, ports_need_sync_dhcp_opts, + ovn_port_dhcpv4_opts, ovn_port_dhcpv6_opts): + LOG.debug('OVN-NB Sync DHCP options for Neutron ports with extra ' + 'dhcp options assigned started') + + txn_commands = [] + lsp_dhcp_key = {constants.IP_VERSION_4: 'dhcpv4_options', + constants.IP_VERSION_6: 'dhcpv6_options'} + ovn_port_dhcp_opts = {constants.IP_VERSION_4: ovn_port_dhcpv4_opts, + constants.IP_VERSION_6: ovn_port_dhcpv6_opts} + for port in ports_need_sync_dhcp_opts: + if self.mode == SYNC_MODE_REPAIR: + LOG.debug('Updating DHCP options for port %s in OVN NB DB', + port['id']) + set_lsp = {} + for ip_v in [constants.IP_VERSION_4, constants.IP_VERSION_6]: + dhcp_opts = ( + self._ovn_client._get_port_dhcp_options( + port, ip_v)) + if not dhcp_opts or 'uuid' in dhcp_opts: + # If the Logical_Switch_Port.dhcpv4_options or + # dhcpv6_options no longer refers a port dhcp options + # created in DHCP_Options earlier, that port dhcp + # options will be deleted in the following + # ovn_port_dhcp_options handling. + set_lsp[lsp_dhcp_key[ip_v]] = [ + dhcp_opts['uuid']] if dhcp_opts else [] + else: + # If port has extra port dhcp + # options, a command will returned by + # self._ovn_client._get_port_dhcp_options + # to add or update port dhcp options. + ovn_port_dhcp_opts[ip_v].pop(port['id'], None) + dhcp_options = dhcp_opts['cmd'] + txn_commands.append(dhcp_options) + set_lsp[lsp_dhcp_key[ip_v]] = dhcp_options + if set_lsp: + txn_commands.append(self.ovn_api.set_lswitch_port( + lport_name=port['id'], **set_lsp)) + + for ip_v in [constants.IP_VERSION_4, constants.IP_VERSION_6]: + for port_id, dhcp_opt in ovn_port_dhcp_opts[ip_v].items(): + LOG.warning( + 'Out of sync port DHCPv%(ip_version)d options for ' + '(subnet %(subnet_id)s port %(port_id)s) found in OVN ' + 'NB DB which needs to be deleted', + {'ip_version': ip_v, + 'subnet_id': dhcp_opt['external_ids']['subnet_id'], + 'port_id': port_id}) + + if self.mode == SYNC_MODE_REPAIR: + LOG.debug('Deleting port DHCPv%d options for (subnet %s, ' + 'port %s)', ip_v, + dhcp_opt['external_ids']['subnet_id'], port_id) + txn_commands.append(self.ovn_api.delete_dhcp_options( + dhcp_opt['uuid'])) + + if txn_commands: + with self.ovn_api.transaction(check_error=True) as txn: + for cmd in txn_commands: + txn.add(cmd) + LOG.debug('OVN-NB Sync DHCP options for Neutron ports with extra ' + 'dhcp options assigned finished') + + def _sync_metadata_ports(self, ctx, db_ports): + """Ensure metadata ports in all Neutron networks. + + This method will ensure that all networks have one and only one + metadata port. + """ + if not ovn_conf.is_ovn_metadata_enabled(): + return + LOG.debug('OVN sync metadata ports started') + for net in self.core_plugin.get_networks(ctx): + dhcp_ports = self.core_plugin.get_ports(ctx, filters=dict( + network_id=[net['id']], + device_owner=[constants.DEVICE_OWNER_DHCP])) + + for port in dhcp_ports: + # Do not touch the Neutron DHCP agents ports + if utils.is_neutron_dhcp_agent_port(port): + dhcp_ports.remove(port) + + if not dhcp_ports: + LOG.warning('Missing metadata port found in Neutron for ' + 'network %s', net['id']) + if self.mode == SYNC_MODE_REPAIR: + try: + # Create the missing port in both Neutron and OVN. + LOG.warning('Creating missing metadadata port in ' + 'Neutron and OVN for network %s', + net['id']) + self._ovn_client.create_metadata_port(ctx, net) + except n_exc.IpAddressGenerationFailure: + LOG.error('Could not allocate IP addresses for ' + 'metadata port in network %s', net['id']) + continue + else: + # Delete all but one DHCP ports. Only one is needed for + # metadata. + for port in dhcp_ports[1:]: + LOG.warning('Unnecessary DHCP port %s for network %s ' + 'found in Neutron', port['id'], net['id']) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning('Deleting unnecessary DHCP port %s for ' + 'network %s', port['id'], net['id']) + self.core_plugin.delete_port(ctx, port['id']) + db_ports.pop(port['id'], None) + port = dhcp_ports[0] + if port['id'] in db_ports.keys(): + LOG.warning('Metadata port %s for network %s found in ' + 'Neutron but not in OVN', + port['id'], net['id']) + if self.mode == SYNC_MODE_REPAIR: + LOG.warning('Creating metadata port %s for network ' + '%s in OVN', + port['id'], net['id']) + self._create_port_in_ovn(ctx, port) + db_ports.pop(port['id']) + + if self.mode == SYNC_MODE_REPAIR: + # Make sure that this port has an IP address in all the subnets + self._ovn_client.update_metadata_port(ctx, net['id']) + LOG.debug('OVN sync metadata ports finished') + + def sync_networks_ports_and_dhcp_opts(self, ctx): + LOG.debug('OVN-NB Sync networks, ports and DHCP options started') + db_networks = {} + for net in self.core_plugin.get_networks(ctx): + db_networks[utils.ovn_name(net['id'])] = net + + # Ignore the floating ip ports with device_owner set to + # constants.DEVICE_OWNER_FLOATINGIP + db_ports = {port['id']: port for port in + self.core_plugin.get_ports(ctx) if not + utils.is_lsp_ignored(port)} + + ovn_all_dhcp_options = self.ovn_api.get_all_dhcp_options() + db_network_cache = dict(db_networks) + + ports_need_sync_dhcp_opts = [] + lswitches = self.ovn_api.get_all_logical_switches_with_ports() + del_lswitchs_list = [] + del_lports_list = [] + add_provnet_ports_list = [] + for lswitch in lswitches: + if lswitch['name'] in db_networks: + for lport in lswitch['ports']: + if lport in db_ports: + port = db_ports.pop(lport) + if not utils.is_network_device_port(port): + ports_need_sync_dhcp_opts.append(port) + else: + del_lports_list.append({'port': lport, + 'lswitch': lswitch['name']}) + db_network = db_networks[lswitch['name']] + physnet = db_network.get(pnet.PHYSICAL_NETWORK) + # Updating provider attributes is forbidden by neutron, thus + # we only need to consider missing provnet-ports in OVN DB. + if physnet and not lswitch['provnet_port']: + add_provnet_ports_list.append( + {'network': db_network, + 'lswitch': lswitch['name']}) + + del db_networks[lswitch['name']] + else: + del_lswitchs_list.append(lswitch) + + for net_id, network in db_networks.items(): + LOG.warning("Network found in Neutron but not in " + "OVN DB, network_id=%s", network['id']) + if self.mode == SYNC_MODE_REPAIR: + try: + LOG.debug('Creating the network %s in OVN NB DB', + network['id']) + self._ovn_client.create_network(network) + except RuntimeError: + LOG.warning("Create network in OVN NB failed for " + "network %s", network['id']) + + self._sync_metadata_ports(ctx, db_ports) + + self._sync_subnet_dhcp_options( + ctx, db_network_cache, ovn_all_dhcp_options['subnets']) + + for port_id, port in db_ports.items(): + LOG.warning("Port found in Neutron but not in OVN " + "DB, port_id=%s", port['id']) + if self.mode == SYNC_MODE_REPAIR: + try: + LOG.debug('Creating the port %s in OVN NB DB', + port['id']) + self._create_port_in_ovn(ctx, port) + if port_id in ovn_all_dhcp_options['ports_v4']: + dhcp_disable, lsp_opts = utils.get_lsp_dhcp_opts( + port, constants.IP_VERSION_4) + if lsp_opts: + ovn_all_dhcp_options['ports_v4'].pop(port_id) + if port_id in ovn_all_dhcp_options['ports_v6']: + dhcp_disable, lsp_opts = utils.get_lsp_dhcp_opts( + port, constants.IP_VERSION_6) + if lsp_opts: + ovn_all_dhcp_options['ports_v6'].pop(port_id) + except RuntimeError: + LOG.warning("Create port in OVN NB failed for" + " port %s", port['id']) + + with self.ovn_api.transaction(check_error=True) as txn: + for lswitch in del_lswitchs_list: + LOG.warning("Network found in OVN but not in " + "Neutron, network_id=%s", lswitch['name']) + if self.mode == SYNC_MODE_REPAIR: + LOG.debug('Deleting the network %s from OVN NB DB', + lswitch['name']) + txn.add(self.ovn_api.ls_del(lswitch['name'])) + + for provnet_port_info in add_provnet_ports_list: + network = provnet_port_info['network'] + LOG.warning("Provider network found in Neutron but " + "provider network port not found in OVN DB, " + "network_id=%s", provnet_port_info['lswitch']) + if self.mode == SYNC_MODE_REPAIR: + LOG.debug('Creating the provnet port %s in OVN NB DB', + utils.ovn_provnet_port_name(network['id'])) + self._ovn_client._create_provnet_port( + txn, network, network.get(pnet.PHYSICAL_NETWORK), + network.get(pnet.SEGMENTATION_ID)) + + for lport_info in del_lports_list: + LOG.warning("Port found in OVN but not in " + "Neutron, port_id=%s", lport_info['port']) + if self.mode == SYNC_MODE_REPAIR: + LOG.debug('Deleting the port %s from OVN NB DB', + lport_info['port']) + txn.add(self.ovn_api.delete_lswitch_port( + lport_name=lport_info['port'], + lswitch_name=lport_info['lswitch'])) + if lport_info['port'] in ovn_all_dhcp_options['ports_v4']: + LOG.debug('Deleting port DHCPv4 options for (port %s)', + lport_info['port']) + txn.add(self.ovn_api.delete_dhcp_options( + ovn_all_dhcp_options['ports_v4'].pop( + lport_info['port'])['uuid'])) + if lport_info['port'] in ovn_all_dhcp_options['ports_v6']: + LOG.debug('Deleting port DHCPv6 options for (port %s)', + lport_info['port']) + txn.add(self.ovn_api.delete_dhcp_options( + ovn_all_dhcp_options['ports_v6'].pop( + lport_info['port'])['uuid'])) + + self._sync_port_dhcp_options(ctx, ports_need_sync_dhcp_opts, + ovn_all_dhcp_options['ports_v4'], + ovn_all_dhcp_options['ports_v6']) + LOG.debug('OVN-NB Sync networks, ports and DHCP options finished') + + def sync_port_dns_records(self, ctx): + if self.mode != SYNC_MODE_REPAIR: + return + LOG.debug('OVN-NB Sync port dns records') + # Ignore the floating ip ports with device_owner set to + # constants.DEVICE_OWNER_FLOATINGIP + db_ports = [port for port in + self.core_plugin.get_ports(ctx) if not + port.get('device_owner', '').startswith( + constants.DEVICE_OWNER_FLOATINGIP)] + dns_records = {} + for port in db_ports: + if self._ovn_client.is_dns_required_for_port(port): + port_dns_records = self._ovn_client.get_port_dns_records(port) + if port['network_id'] not in dns_records: + dns_records[port['network_id']] = {} + dns_records[port['network_id']].update(port_dns_records) + + for network_id, port_dns_records in dns_records.items(): + self._set_dns_records(network_id, port_dns_records) + + def _set_dns_records(self, network_id, dns_records): + lswitch_name = utils.ovn_name(network_id) + ls, ls_dns_record = self.ovn_api.get_ls_and_dns_record(lswitch_name) + + with self.ovn_api.transaction(check_error=True) as txn: + if not ls_dns_record: + dns_add_txn = txn.add(self.ovn_api.dns_add( + external_ids={'ls_name': ls.name}, records=dns_records)) + txn.add(self.ovn_api.ls_set_dns_records(ls.uuid, dns_add_txn)) + else: + txn.add(self.ovn_api.dns_set_records(ls_dns_record.uuid, + **dns_records)) + + def _delete_address_sets(self, ctx): + with self.ovn_api.transaction(check_error=True) as txn: + for sg in self.core_plugin.get_security_groups(ctx): + for ip_version in ['ip4', 'ip6']: + txn.add(self.ovn_api.delete_address_set( + utils.ovn_addrset_name(sg['id'], ip_version))) + + def _delete_acls_from_lswitches(self, ctx): + with self.ovn_api.transaction(check_error=True) as txn: + for net in self.core_plugin.get_networks(ctx): + # Calling acl_del from ovsdbapp with no ACL will delete + # all the ACLs belonging to that Logical Switch. + txn.add(self.ovn_api.acl_del(utils.ovn_name(net['id']))) + + def _create_default_drop_port_group(self, db_ports): + with self.ovn_api.transaction(check_error=True) as txn: + pg_name = ovn_const.OVN_DROP_PORT_GROUP_NAME + if not self.ovn_api.get_port_group(pg_name): + # If drop Port Group doesn't exist yet, create it. + txn.add(self.ovn_api.pg_add(pg_name, acls=[])) + # Add ACLs to this Port Group so that all traffic is dropped. + acls = acl_utils.add_acls_for_drop_port_group(pg_name) + for acl in acls: + txn.add(self.ovn_api.pg_acl_add(**acl)) + ports_ids = [port['id'] for port in db_ports] + # Add the ports to the default Port Group + txn.add(self.ovn_api.pg_add_ports(pg_name, ports_ids)) + + def _create_sg_port_groups_and_acls(self, ctx, db_ports): + # Create a Port Group per Neutron Security Group + with self.ovn_api.transaction(check_error=True) as txn: + for sg in self.core_plugin.get_security_groups(ctx): + pg_name = utils.ovn_port_group_name(sg['id']) + if self.ovn_api.get_port_group(pg_name): + continue + ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: sg['id']} + txn.add(self.ovn_api.pg_add( + name=pg_name, acls=[], external_ids=ext_ids)) + acl_utils.add_acls_for_sg_port_group(self.ovn_api, sg, txn) + for port in db_ports: + for sg in port['security_groups']: + txn.add(self.ovn_api.pg_add_ports( + utils.ovn_port_group_name(sg), port['id'])) + + def migrate_to_port_groups(self, ctx): + # This routine is responsible for migrating the current Security + # Groups and SG Rules to the new Port Groups implementation. + # 1. Create the default drop Port Group and add all ports with port + # security enabled to it. + # 2. Create a Port Group for every existing Neutron Security Group and + # add all its Security Group Rules as ACLs to that Port Group. + # 3. Delete all existing Address Sets in NorthBound database which + # correspond to a Neutron Security Group. + # 4. Delete all the ACLs in every Logical Switch (Neutron network). + + # If Port Groups are not supported or we've already migrated, return + if (not self.ovn_api.is_port_groups_supported() or + not self.ovn_api.get_address_sets()): + return + + LOG.debug('Port Groups Migration task started') + + # Ignore the floating ip ports with device_owner set to + # constants.DEVICE_OWNER_FLOATINGIP + db_ports = [port for port in + self.core_plugin.get_ports(ctx) if not + utils.is_lsp_ignored(port) and not + utils.is_lsp_trusted(port) and + utils.is_port_security_enabled(port)] + + self._create_default_drop_port_group(db_ports) + self._create_sg_port_groups_and_acls(ctx, db_ports) + self._delete_address_sets(ctx) + self._delete_acls_from_lswitches(ctx) + + LOG.debug('Port Groups Migration task finished') + + +class OvnSbSynchronizer(OvnDbSynchronizer): + """Synchronizer class for SB.""" + + def __init__(self, core_plugin, ovn_api, ovn_driver): + super(OvnSbSynchronizer, self).__init__( + core_plugin, ovn_api, ovn_driver) + self.l3_plugin = directory.get_plugin(plugin_constants.L3) + + def do_sync(self): + """Method to sync the OVN_Southbound DB with neutron DB. + + OvnSbSynchronizer will sync data from OVN_Southbound to neutron. And + the synchronization will always be performed, no matter what mode it + is. + """ + LOG.debug("Starting OVN-Southbound DB sync process") + + ctx = context.get_admin_context() + self.sync_hostname_and_physical_networks(ctx) + if utils.is_ovn_l3(self.l3_plugin): + self.l3_plugin.schedule_unhosted_gateways() + + def sync_hostname_and_physical_networks(self, ctx): + LOG.debug('OVN-SB Sync hostname and physical networks started') + host_phynets_map = self.ovn_api.get_chassis_hostname_and_physnets() + current_hosts = set(host_phynets_map) + previous_hosts = segments_db.get_hosts_mapped_with_segments(ctx) + + stale_hosts = previous_hosts - current_hosts + for host in stale_hosts: + LOG.debug('Stale host %s found in Neutron, but not in OVN SB DB. ' + 'Clear its SegmentHostMapping in Neutron', host) + self.ovn_driver.update_segment_host_mapping(host, []) + + new_hosts = current_hosts - previous_hosts + for host in new_hosts: + LOG.debug('New host %s found in OVN SB DB, but not in Neutron. ' + 'Add its SegmentHostMapping in Neutron', host) + self.ovn_driver.update_segment_host_mapping( + host, host_phynets_map[host]) + + for host in current_hosts & previous_hosts: + LOG.debug('Host %s found both in OVN SB DB and Neutron. ' + 'Trigger updating its SegmentHostMapping in Neutron, ' + 'to keep OVN SB DB and Neutron have consistent data', + host) + self.ovn_driver.update_segment_host_mapping( + host, host_phynets_map[host]) + + LOG.debug('OVN-SB Sync hostname and physical networks finished') diff --git a/neutron/tests/unit/fake_resources.py b/neutron/tests/unit/fake_resources.py index c39f47771cf..03a4261e994 100644 --- a/neutron/tests/unit/fake_resources.py +++ b/neutron/tests/unit/fake_resources.py @@ -164,6 +164,7 @@ class FakeOvsdbSbOvnIdl(object): self.get_gateway_chassis_from_cms_options = mock.Mock() self.is_col_present = mock.Mock() self.is_col_present.return_value = False + self.db_set = mock.Mock() class FakeOvsdbTransaction(object): diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py new file mode 100644 index 00000000000..a99b6b3e6fe --- /dev/null +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_maintenance.py @@ -0,0 +1,271 @@ +# Copyright 2019 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from futurist import periodics +from neutron_lib import context + +from neutron.common.ovn import constants +from neutron.common.ovn import utils +from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf +from neutron.db import ovn_revision_numbers_db +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync +from neutron.tests.unit.plugins.ml2 import test_security_group as test_sg +from neutron.tests.unit import testlib_api + + +@mock.patch.object(maintenance.DBInconsistenciesPeriodics, + 'has_lock', mock.PropertyMock(return_value=True)) +class TestDBInconsistenciesPeriodics(testlib_api.SqlTestCaseLight, + test_sg.Ml2SecurityGroupsTestCase): + + def setUp(self): + super(TestDBInconsistenciesPeriodics, self).setUp() + self.net = self._make_network( + self.fmt, name='net1', admin_state_up=True)['network'] + self.port = self._make_port( + self.fmt, self.net['id'], name='port1')['port'] + self.fake_ovn_client = mock.Mock() + self.periodic = maintenance.DBInconsistenciesPeriodics( + self.fake_ovn_client) + self.ctx = context.get_admin_context() + + @mock.patch.object(maintenance.DBInconsistenciesPeriodics, + '_fix_create_update') + @mock.patch.object(ovn_revision_numbers_db, 'get_inconsistent_resources') + def test_check_for_inconsistencies(self, mock_get_incon_res, mock_fix_net): + fake_row = mock.Mock(resource_type=constants.TYPE_NETWORKS) + mock_get_incon_res.return_value = [fake_row, ] + self.periodic.check_for_inconsistencies() + mock_fix_net.assert_called_once_with(mock.ANY, fake_row) + + def _test_migrate_to_port_groups_helper(self, pg_supported, a_sets, + migration_expected, never_again): + self.fake_ovn_client._nb_idl.is_port_groups_supported.return_value = ( + pg_supported) + self.fake_ovn_client._nb_idl.get_address_sets.return_value = a_sets + with mock.patch.object(ovn_db_sync.OvnNbSynchronizer, + 'migrate_to_port_groups') as mtpg: + if never_again: + self.assertRaises(periodics.NeverAgain, + self.periodic.migrate_to_port_groups) + else: + self.periodic.migrate_to_port_groups() + + if migration_expected: + mtpg.assert_called_once_with(mock.ANY) + else: + mtpg.assert_not_called() + + def test_migrate_to_port_groups_port_groups_not_supported(self): + self._test_migrate_to_port_groups_helper(pg_supported=False, + a_sets=None, + migration_expected=False, + never_again=True) + + def test_migrate_to_port_groups_not_needed(self): + self._test_migrate_to_port_groups_helper(pg_supported=True, + a_sets=None, + migration_expected=False, + never_again=True) + + def test_migrate_to_port_groups(self): + # Check normal migration path: if port groups are supported by the + # schema and the migration has to be done, it will take place and + # won't be attempted in the future. + self._test_migrate_to_port_groups_helper(pg_supported=True, + a_sets=['as1', 'as2'], + migration_expected=True, + never_again=True) + + def test_migrate_to_port_groups_no_lock(self): + with mock.patch.object(maintenance.DBInconsistenciesPeriodics, + 'has_lock', mock.PropertyMock( + return_value=False)): + # Check that if this worker doesn't have the lock, it won't + # perform the migration and it will try again later. + self._test_migrate_to_port_groups_helper(pg_supported=True, + a_sets=['as1', 'as2'], + migration_expected=False, + never_again=False) + + def _test_fix_create_update_network(self, ovn_rev, neutron_rev): + self.net['revision_number'] = neutron_rev + + # Create an entry to the revision_numbers table and assert the + # initial revision_number for our test object is the expected + ovn_revision_numbers_db.create_initial_revision( + self.ctx, self.net['id'], constants.TYPE_NETWORKS, + revision_number=ovn_rev) + row = ovn_revision_numbers_db.get_revision_row(self.ctx, + self.net['id']) + self.assertEqual(ovn_rev, row.revision_number) + + if ovn_rev < 0: + self.fake_ovn_client._nb_idl.get_lswitch.return_value = None + else: + fake_ls = mock.Mock(external_ids={ + constants.OVN_REV_NUM_EXT_ID_KEY: ovn_rev}) + self.fake_ovn_client._nb_idl.get_lswitch.return_value = fake_ls + + self.fake_ovn_client._plugin.get_network.return_value = self.net + self.periodic._fix_create_update(self.ctx, row) + + # Since the revision number was < 0, make sure create_network() + # is invoked with the latest version of the object in the neutron + # database + if ovn_rev < 0: + self.fake_ovn_client.create_network.assert_called_once_with( + self.net) + # If the revision number is > 0 it means that the object already + # exist and we just need to update to match the latest in the + # neutron database so, update_network() should be called. + else: + self.fake_ovn_client.update_network.assert_called_once_with( + self.net) + + def test_fix_network_create(self): + self._test_fix_create_update_network(ovn_rev=-1, neutron_rev=2) + + def test_fix_network_update(self): + self._test_fix_create_update_network(ovn_rev=5, neutron_rev=7) + + def _test_fix_create_update_port(self, ovn_rev, neutron_rev): + self.port['revision_number'] = neutron_rev + + # Create an entry to the revision_numbers table and assert the + # initial revision_number for our test object is the expected + ovn_revision_numbers_db.create_initial_revision( + self.ctx, self.port['id'], constants.TYPE_PORTS, + revision_number=ovn_rev) + row = ovn_revision_numbers_db.get_revision_row(self.ctx, + self.port['id']) + self.assertEqual(ovn_rev, row.revision_number) + + if ovn_rev < 0: + self.fake_ovn_client._nb_idl.get_lswitch_port.return_value = None + else: + fake_lsp = mock.Mock(external_ids={ + constants.OVN_REV_NUM_EXT_ID_KEY: ovn_rev}) + self.fake_ovn_client._nb_idl.get_lswitch_port.return_value = ( + fake_lsp) + + self.fake_ovn_client._plugin.get_port.return_value = self.port + self.periodic._fix_create_update(self.ctx, row) + + # Since the revision number was < 0, make sure create_port() + # is invoked with the latest version of the object in the neutron + # database + if ovn_rev < 0: + self.fake_ovn_client.create_port.assert_called_once_with( + self.port) + # If the revision number is > 0 it means that the object already + # exist and we just need to update to match the latest in the + # neutron database so, update_port() should be called. + else: + self.fake_ovn_client.update_port.assert_called_once_with( + self.port) + + def test_fix_port_create(self): + self._test_fix_create_update_port(ovn_rev=-1, neutron_rev=2) + + def test_fix_port_update(self): + self._test_fix_create_update_port(ovn_rev=5, neutron_rev=7) + + @mock.patch.object(ovn_revision_numbers_db, 'bump_revision') + def _test_fix_security_group_create(self, mock_bump, revision_number): + sg_name = utils.ovn_addrset_name('fake_id', 'ip4') + sg = self._make_security_group(self.fmt, sg_name, '')['security_group'] + + ovn_revision_numbers_db.create_initial_revision( + self.ctx, sg['id'], constants.TYPE_SECURITY_GROUPS, + revision_number=revision_number) + row = ovn_revision_numbers_db.get_revision_row(self.ctx, sg['id']) + self.assertEqual(revision_number, row.revision_number) + + if revision_number < 0: + self.fake_ovn_client._nb_idl.get_address_set.return_value = None + self.fake_ovn_client._nb_idl.get_port_group.return_value = None + else: + self.fake_ovn_client._nb_idl.get_address_set.return_value = ( + mock.sentinel.AddressSet) + + self.fake_ovn_client._plugin.get_security_group.return_value = sg + self.periodic._fix_create_update(self.ctx, row) + + if revision_number < 0: + self.fake_ovn_client.create_security_group.assert_called_once_with( + sg) + else: + # If the object already exist let's make sure we just bump + # the revision number in the ovn_revision_numbers table + self.assertFalse(self.fake_ovn_client.create_security_group.called) + mock_bump.assert_called_once_with( + self.ctx, sg, constants.TYPE_SECURITY_GROUPS) + + def test_fix_security_group_create_doesnt_exist(self): + self._test_fix_security_group_create(revision_number=-1) + + def test_fix_security_group_create_version_mismatch(self): + self._test_fix_security_group_create(revision_number=2) + + def test__create_lrouter_port(self): + port = {'id': 'port-id', + 'device_id': 'router-id'} + self.periodic._create_lrouter_port(port) + l3_mock = self.periodic._ovn_client._l3_plugin + l3_mock.add_router_interface.assert_called_once_with( + mock.ANY, port['device_id'], {'port_id': port['id']}, + may_exist=True) + + @mock.patch.object(maintenance.LOG, 'debug') + def test__log_maintenance_inconsistencies(self, mock_log): + ovn_conf.cfg.CONF.set_override('debug', True) + + # Create fake inconsistencies: 2 networks, 4 subnets and 8 ports + incst = [] + incst += [mock.Mock(resource_type=constants.TYPE_NETWORKS)] * 2 + incst += [mock.Mock(resource_type=constants.TYPE_SUBNETS)] * 4 + incst += [mock.Mock(resource_type=constants.TYPE_PORTS)] * 8 + + # Create fake inconsistencies for delete: 3 routers and 6 router ports + incst_del = [] + incst_del += [mock.Mock(resource_type=constants.TYPE_ROUTERS)] * 3 + incst_del += [mock.Mock(resource_type=constants.TYPE_ROUTER_PORTS)] * 6 + + self.periodic._log_maintenance_inconsistencies(incst, incst_del) + + # Assert LOG.debug was called twice + self.assertEqual(2, len(mock_log.call_args_list)) + + # Assert the log matches the number of inconsistencies + fail_str_create_update = mock_log.call_args_list[0][0][1]['fail_str'] + self.assertIn('networks=2', fail_str_create_update) + self.assertIn('subnets=4', fail_str_create_update) + self.assertIn('ports=8', fail_str_create_update) + + fail_str_delete = mock_log.call_args_list[1][0][1]['fail_str'] + self.assertIn('routers=3', fail_str_delete) + self.assertIn('router_ports=6', fail_str_delete) + + @mock.patch.object(maintenance.LOG, 'debug') + def test__log_maintenance_inconsistencies_debug_disabled(self, mock_log): + ovn_conf.cfg.CONF.set_override('debug', False) + + incst = [mock.Mock(resource_type=constants.TYPE_NETWORKS)] * 2 + self.periodic._log_maintenance_inconsistencies(incst, []) + self.assertFalse(mock_log.called) diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py new file mode 100644 index 00000000000..16116a34d09 --- /dev/null +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/test_ovn_db_sync.py @@ -0,0 +1,984 @@ +# Copyright 2019 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections + +import mock +from neutron_lib import constants as const + +from neutron.common.ovn import acl +from neutron.common.ovn import constants as ovn_const +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync +from neutron.services.ovn_l3 import plugin as ovn_plugin +from neutron.tests.unit.plugins.ml2.drivers.ovn.mech_driver import \ + test_mech_driver + + +OvnPortInfo = collections.namedtuple('OvnPortInfo', ['name']) + + +@mock.patch.object(ovn_plugin.OVNL3RouterPlugin, '_sb_ovn', mock.Mock()) +class TestOvnNbSyncML2(test_mech_driver.OVNMechanismDriverTestCase): + + l3_plugin = 'ovn-router' + + def setUp(self): + super(TestOvnNbSyncML2, self).setUp() + + self.subnet = {'cidr': '10.0.0.0/24', + 'id': 'subnet1', + 'subnetpool_id': None, + 'name': 'private-subnet', + 'enable_dhcp': True, + 'network_id': 'n1', + 'tenant_id': 'tenant1', + 'gateway_ip': '10.0.0.1', + 'ip_version': 4, + 'shared': False} + self.matches = ["", "", "", ""] + + self.networks = [{'id': 'n1', + 'mtu': 1450, + 'provider:physical_network': 'physnet1', + 'provider:segmentation_id': 1000}, + {'id': 'n2', + 'mtu': 1450}, + {'id': 'n4', + 'mtu': 1450, + 'provider:physical_network': 'physnet2'}] + + self.subnets = [{'id': 'n1-s1', + 'network_id': 'n1', + 'enable_dhcp': True, + 'cidr': '10.0.0.0/24', + 'tenant_id': 'tenant1', + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': [], + 'host_routes': [], + 'ip_version': 4}, + {'id': 'n1-s2', + 'network_id': 'n1', + 'enable_dhcp': True, + 'cidr': 'fd79:e1c:a55::/64', + 'tenant_id': 'tenant1', + 'gateway_ip': 'fd79:e1c:a55::1', + 'dns_nameservers': [], + 'host_routes': [], + 'ip_version': 6}, + {'id': 'n2', + 'network_id': 'n2', + 'enable_dhcp': True, + 'cidr': '20.0.0.0/24', + 'tenant_id': 'tenant1', + 'gateway_ip': '20.0.0.1', + 'dns_nameservers': [], + 'host_routes': [], + 'ip_version': 4}] + + self.security_groups = [ + {'id': 'sg1', 'tenant_id': 'tenant1', + 'security_group_rules': [{'remote_group_id': None, + 'direction': 'ingress', + 'remote_ip_prefix': '0.0.0.0/0', + 'protocol': 'tcp', + 'ethertype': 'IPv4', + 'tenant_id': 'tenant1', + 'port_range_max': 65535, + 'port_range_min': 1, + 'id': 'ruleid1', + 'security_group_id': 'sg1'}], + 'name': 'all-tcp'}, + {'id': 'sg2', 'tenant_id': 'tenant1', + 'security_group_rules': [{'remote_group_id': 'sg2', + 'direction': 'egress', + 'remote_ip_prefix': '0.0.0.0/0', + 'protocol': 'tcp', + 'ethertype': 'IPv4', + 'tenant_id': 'tenant1', + 'port_range_max': 65535, + 'port_range_min': 1, + 'id': 'ruleid1', + 'security_group_id': 'sg2'}], + 'name': 'all-tcpe'}] + + self.port_groups_ovn = [mock.Mock(), mock.Mock(), mock.Mock()] + self.port_groups_ovn[0].configure_mock( + name='pg_sg1', + external_ids={ovn_const.OVN_SG_EXT_ID_KEY: 'sg1'}, + ports=[], + acls=[]) + self.port_groups_ovn[1].configure_mock( + name='pg_unknown_del', + external_ids={ovn_const.OVN_SG_EXT_ID_KEY: 'sg2'}, + ports=[], + acls=[]) + self.port_groups_ovn[2].configure_mock( + name='neutron_pg_drop', + external_ids=[], + ports=[], + acls=[]) + + self.ports = [ + {'id': 'p1n1', + 'device_owner': 'compute:None', + 'fixed_ips': + [{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40', + 'ip_address': '10.0.0.4'}, + {'subnet_id': 'subnet1', + 'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}], + 'security_groups': ['sg1'], + 'network_id': 'n1'}, + {'id': 'p2n1', + 'device_owner': 'compute:None', + 'fixed_ips': + [{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40', + 'ip_address': '10.0.0.4'}, + {'subnet_id': 'subnet1', + 'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}], + 'security_groups': ['sg2'], + 'network_id': 'n1', + 'extra_dhcp_opts': [{'ip_version': 6, + 'opt_name': 'domain-search', + 'opt_value': 'foo-domain'}]}, + {'id': 'p1n2', + 'device_owner': 'compute:None', + 'fixed_ips': + [{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40', + 'ip_address': '10.0.0.4'}, + {'subnet_id': 'subnet1', + 'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}], + 'security_groups': ['sg1'], + 'network_id': 'n2', + 'extra_dhcp_opts': [{'ip_version': 4, + 'opt_name': 'tftp-server', + 'opt_value': '20.0.0.20'}, + {'ip_version': 4, + 'opt_name': 'dns-server', + 'opt_value': '8.8.8.8'}, + {'ip_version': 6, + 'opt_name': 'domain-search', + 'opt_value': 'foo-domain'}]}, + {'id': 'p2n2', + 'device_owner': 'compute:None', + 'fixed_ips': + [{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40', + 'ip_address': '10.0.0.4'}, + {'subnet_id': 'subnet1', + 'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}], + 'security_groups': ['sg2'], + 'network_id': 'n2'}, + {'id': 'fp1', + 'device_owner': 'network:floatingip', + 'fixed_ips': + [{'subnet_id': 'ext-subnet', + 'ip_address': '90.0.0.10'}], + 'network_id': 'ext-net'}] + + self.ports_ovn = [OvnPortInfo('p1n1'), OvnPortInfo('p1n2'), + OvnPortInfo('p2n1'), OvnPortInfo('p2n2'), + OvnPortInfo('p3n1'), OvnPortInfo('p3n3')] + + self.acls_ovn = { + 'lport1': + # ACLs need to be removed by the sync tool + [{'id': 'acl1', 'priority': 00, 'policy': 'allow', + 'lswitch': 'lswitch1', 'lport': 'lport1'}], + 'lport2': + [{'id': 'acl2', 'priority': 00, 'policy': 'drop', + 'lswitch': 'lswitch2', 'lport': 'lport2'}], + # ACLs need to be kept as-is by the sync tool + 'p2n2': + [{'lport': 'p2n2', 'direction': 'to-lport', + 'log': False, 'lswitch': 'neutron-n2', + 'priority': 1001, 'action': 'drop', + 'external_ids': {'neutron:lport': 'p2n2'}, + 'match': 'outport == "p2n2" && ip'}, + {'lport': 'p2n2', 'direction': 'to-lport', + 'log': False, 'lswitch': 'neutron-n2', + 'priority': 1002, 'action': 'allow', + 'external_ids': {'neutron:lport': 'p2n2'}, + 'match': 'outport == "p2n2" && ip4 && ' + 'ip4.src == 10.0.0.0/24 && udp && ' + 'udp.src == 67 && udp.dst == 68'}]} + self.address_sets_ovn = { + 'as_ip4_sg1': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: + 'all-tcp'}, + 'name': 'as_ip4_sg1', + 'addresses': ['10.0.0.4']}, + 'as_ip4_sg2': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: + 'all-tcpe'}, + 'name': 'as_ip4_sg2', + 'addresses': []}, + 'as_ip6_sg2': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: + 'all-tcpe'}, + 'name': 'as_ip6_sg2', + 'addresses': ['fd79:e1c:a55::816:eff:eff:ff2', + 'fd79:e1c:a55::816:eff:eff:ff3']}, + 'as_ip4_del': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: + 'all-delete'}, + 'name': 'as_ip4_delete', + 'addresses': ['10.0.0.4']}, + } + + self.routers = [{'id': 'r1', 'routes': [{'nexthop': '20.0.0.100', + 'destination': '11.0.0.0/24'}, { + 'nexthop': '20.0.0.101', + 'destination': '12.0.0.0/24'}], + 'gw_port_id': 'gpr1', + 'external_gateway_info': { + 'network_id': "ext-net", 'enable_snat': True, + 'external_fixed_ips': [ + {'subnet_id': 'ext-subnet', + 'ip_address': '90.0.0.2'}]}}, + {'id': 'r2', 'routes': [{'nexthop': '40.0.0.100', + 'destination': '30.0.0.0/24'}], + 'gw_port_id': 'gpr2', + 'external_gateway_info': { + 'network_id': "ext-net", 'enable_snat': True, + 'external_fixed_ips': [ + {'subnet_id': 'ext-subnet', + 'ip_address': '100.0.0.2'}]}}, + {'id': 'r4', 'routes': []}] + + self.get_sync_router_ports = [ + {'fixed_ips': [{'subnet_id': 'subnet1', + 'ip_address': '192.168.1.1'}], + 'id': 'p1r1', + 'device_id': 'r1', + 'mac_address': 'fa:16:3e:d7:fd:5f'}, + {'fixed_ips': [{'subnet_id': 'subnet2', + 'ip_address': '192.168.2.1'}], + 'id': 'p1r2', + 'device_id': 'r2', + 'mac_address': 'fa:16:3e:d6:8b:ce'}, + {'fixed_ips': [{'subnet_id': 'subnet4', + 'ip_address': '192.168.4.1'}], + 'id': 'p1r4', + 'device_id': 'r4', + 'mac_address': 'fa:16:3e:12:34:56'}] + + self.floating_ips = [{'id': 'fip1', 'router_id': 'r1', + 'floating_ip_address': '90.0.0.10', + 'fixed_ip_address': '172.16.0.10'}, + {'id': 'fip2', 'router_id': 'r1', + 'floating_ip_address': '90.0.0.12', + 'fixed_ip_address': '172.16.2.12'}, + {'id': 'fip3', 'router_id': 'r2', + 'floating_ip_address': '100.0.0.10', + 'fixed_ip_address': '192.168.2.10'}, + {'id': 'fip4', 'router_id': 'r2', + 'floating_ip_address': '100.0.0.11', + 'fixed_ip_address': '192.168.2.11'}] + + self.lrouters_with_rports = [{'name': 'r3', + 'ports': {'p1r3': ['fake']}, + 'static_routes': [], + 'snats': [], + 'dnat_and_snats': []}, + {'name': 'r4', + 'ports': {'p1r4': + ['fdad:123:456::1/64', + 'fdad:789:abc::1/64']}, + 'static_routes': [], + 'snats': [], + 'dnat_and_snats': []}, + {'name': 'r1', + 'ports': {'p3r1': ['fake']}, + 'static_routes': + [{'nexthop': '20.0.0.100', + 'destination': '11.0.0.0/24'}, + {'nexthop': '20.0.0.100', + 'destination': '10.0.0.0/24'}], + 'snats': + [{'logical_ip': '172.16.0.0/24', + 'external_ip': '90.0.0.2', + 'type': 'snat'}, + {'logical_ip': '172.16.1.0/24', + 'external_ip': '90.0.0.2', + 'type': 'snat'}], + 'dnat_and_snats': + [{'logical_ip': '172.16.0.10', + 'external_ip': '90.0.0.10', + 'type': 'dnat_and_snat'}, + {'logical_ip': '172.16.1.11', + 'external_ip': '90.0.0.11', + 'type': 'dnat_and_snat'}, + {'logical_ip': '192.168.2.11', + 'external_ip': '100.0.0.11', + 'type': 'dnat_and_snat', + 'external_mac': '01:02:03:04:05:06', + 'logical_port': 'vm1'}]}] + + self.lswitches_with_ports = [{'name': 'neutron-n1', + 'ports': ['p1n1', 'p3n1'], + 'provnet_port': None}, + {'name': 'neutron-n3', + 'ports': ['p1n3', 'p2n3'], + 'provnet_port': None}, + {'name': 'neutron-n4', + 'ports': [], + 'provnet_port': 'provnet-n4'}] + + self.lrport_networks = ['fdad:123:456::1/64', 'fdad:cafe:a1b2::1/64'] + + def _fake_get_ovn_dhcp_options(self, subnet, network, server_mac=None): + if subnet['id'] == 'n1-s1': + return {'cidr': '10.0.0.0/24', + 'options': {'server_id': '10.0.0.1', + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': '1450', + 'router': '10.0.0.1'}, + 'external_ids': {'subnet_id': 'n1-s1'}} + return {'cidr': '', 'options': '', 'external_ids': {}} + + def _fake_get_gw_info(self, ctx, router): + return { + 'r1': [ovn_client.GW_INFO(router_ip='90.0.0.2', + gateway_ip='90.0.0.1', + network_id='', subnet_id='', + ip_version=4, + ip_prefix=const.IPv4_ANY)], + 'r2': [ovn_client.GW_INFO(router_ip='100.0.0.2', + gateway_ip='100.0.0.1', + network_id='', subnet_id='', + ip_version=4, + ip_prefix=const.IPv4_ANY)] + }.get(router['id'], []) + + def _fake_get_v4_network_of_all_router_ports(self, ctx, router_id): + return {'r1': ['172.16.0.0/24', '172.16.2.0/24'], + 'r2': ['192.168.2.0/24']}.get(router_id, []) + + def _test_mocks_helper(self, ovn_nb_synchronizer): + core_plugin = ovn_nb_synchronizer.core_plugin + ovn_api = ovn_nb_synchronizer.ovn_api + ovn_driver = ovn_nb_synchronizer.ovn_driver + l3_plugin = ovn_nb_synchronizer.l3_plugin + + core_plugin.get_networks = mock.Mock() + core_plugin.get_networks.return_value = self.networks + core_plugin.get_subnets = mock.Mock() + core_plugin.get_subnets.return_value = self.subnets + # following block is used for acl syncing unit-test + + # With the given set of values in the unit testing, + # 19 neutron acls should have been there, + # 4 acls are returned as current ovn acls, + # two of which will match with neutron. + # So, in this example 17 will be added, 2 removed + core_plugin.get_ports = mock.Mock() + core_plugin.get_ports.return_value = self.ports + mock.patch.object(acl, '_get_subnet_from_cache', + return_value=self.subnet).start() + mock.patch.object(acl, 'acl_remote_group_id', + side_effect=self.matches).start() + core_plugin.get_security_group = mock.MagicMock( + side_effect=self.security_groups) + ovn_nb_synchronizer.get_acls = mock.Mock() + ovn_nb_synchronizer.get_acls.return_value = self.acls_ovn + core_plugin.get_security_groups = mock.MagicMock( + return_value=self.security_groups) + ovn_nb_synchronizer.get_address_sets = mock.Mock() + ovn_nb_synchronizer.get_address_sets.return_value =\ + self.address_sets_ovn + get_port_groups = mock.MagicMock() + get_port_groups.execute.return_value = self.port_groups_ovn + ovn_api.db_list_rows.return_value = get_port_groups + ovn_api.lsp_list.execute.return_value = self.ports_ovn + # end of acl-sync block + + # The following block is used for router and router port syncing tests + # With the give set of values in the unit test, + # The Neutron db has Routers r1 and r2 present. + # The OVN db has Routers r1 and r3 present. + # During the sync r2 will need to be created and r3 will need + # to be deleted from the OVN db. When Router r3 is deleted, all LRouter + # ports associated with r3 is deleted too. + # + # Neutron db has Router ports p1r1 in Router r1 and p1r2 in Router r2 + # OVN db has p1r3 in Router 3 and p3r1 in Router 1. + # During the sync p1r1 and p1r2 will be added and p1r3 and p3r1 + # will be deleted from the OVN db + l3_plugin.get_routers = mock.Mock() + l3_plugin.get_routers.return_value = self.routers + l3_plugin._get_sync_interfaces = mock.Mock() + l3_plugin._get_sync_interfaces.return_value = ( + self.get_sync_router_ports) + ovn_nb_synchronizer._ovn_client = mock.Mock() + ovn_nb_synchronizer._ovn_client.\ + _get_nets_and_ipv6_ra_confs_for_router_port.return_value = ( + self.lrport_networks, {}) + ovn_nb_synchronizer._ovn_client._get_v4_network_of_all_router_ports. \ + side_effect = self._fake_get_v4_network_of_all_router_ports + ovn_nb_synchronizer._ovn_client._get_gw_info = mock.Mock() + ovn_nb_synchronizer._ovn_client._get_gw_info.side_effect = ( + self._fake_get_gw_info) + # end of router-sync block + l3_plugin.get_floatingips = mock.Mock() + l3_plugin.get_floatingips.return_value = self.floating_ips + ovn_api.get_all_logical_switches_with_ports = mock.Mock() + ovn_api.get_all_logical_switches_with_ports.return_value = ( + self.lswitches_with_ports) + + ovn_api.get_all_logical_routers_with_rports = mock.Mock() + ovn_api.get_all_logical_routers_with_rports.return_value = ( + self.lrouters_with_rports) + + ovn_api.transaction = mock.MagicMock() + + ovn_nb_synchronizer._ovn_client.create_network = mock.Mock() + ovn_nb_synchronizer._ovn_client.create_port = mock.Mock() + ovn_driver.validate_and_get_data_from_binding_profile = mock.Mock() + ovn_nb_synchronizer._ovn_client.create_port = mock.Mock() + ovn_nb_synchronizer._ovn_client.create_port.return_value = mock.ANY + ovn_nb_synchronizer._ovn_client._create_provnet_port = mock.Mock() + ovn_api.ls_del = mock.Mock() + ovn_api.delete_lswitch_port = mock.Mock() + + ovn_api.delete_lrouter = mock.Mock() + ovn_api.delete_lrouter_port = mock.Mock() + ovn_api.add_static_route = mock.Mock() + ovn_api.delete_static_route = mock.Mock() + ovn_api.get_all_dhcp_options.return_value = { + 'subnets': {'n1-s1': {'cidr': '10.0.0.0/24', + 'options': + {'server_id': '10.0.0.1', + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': '1450', + 'router': '10.0.0.1'}, + 'external_ids': {'subnet_id': 'n1-s1'}, + 'uuid': 'UUID1'}, + 'n1-s3': {'cidr': '30.0.0.0/24', + 'options': + {'server_id': '30.0.0.1', + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': '1450', + 'router': '30.0.0.1'}, + 'external_ids': {'subnet_id': 'n1-s3'}, + 'uuid': 'UUID2'}}, + 'ports_v4': {'p1n2': {'cidr': '10.0.0.0/24', + 'options': {'server_id': '10.0.0.1', + 'server_mac': + '01:02:03:04:05:06', + 'lease_time': '1000', + 'mtu': '1400', + 'router': '10.0.0.1'}, + 'external_ids': {'subnet_id': 'n1-s1', + 'port_id': 'p1n2'}, + 'uuid': 'UUID3'}, + 'p5n2': {'cidr': '10.0.0.0/24', + 'options': {'server_id': '10.0.0.1', + 'server_mac': + '01:02:03:04:05:06', + 'lease_time': '1000', + 'mtu': '1400', + 'router': '10.0.0.1'}, + 'external_ids': {'subnet_id': 'n1-s1', + 'port_id': 'p5n2'}, + 'uuid': 'UUID4'}}, + 'ports_v6': {'p1n1': {'cidr': 'fd79:e1c:a55::/64', + 'options': {'server_id': '01:02:03:04:05:06', + 'mtu': '1450'}, + 'external_ids': {'subnet_id': 'fake', + 'port_id': 'p1n1'}, + 'uuid': 'UUID5'}, + 'p1n2': {'cidr': 'fd79:e1c:a55::/64', + 'options': {'server_id': '01:02:03:04:05:06', + 'mtu': '1450'}, + 'external_ids': {'subnet_id': 'fake', + 'port_id': 'p1n2'}, + 'uuid': 'UUID6'}}} + + ovn_api.create_address_set = mock.Mock() + ovn_api.delete_address_set = mock.Mock() + ovn_api.update_address_set = mock.Mock() + ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options = mock.Mock() + ovn_nb_synchronizer._ovn_client._get_ovn_dhcp_options = mock.Mock() + ovn_nb_synchronizer._ovn_client._get_ovn_dhcp_options.side_effect = ( + self._fake_get_ovn_dhcp_options) + ovn_api.delete_dhcp_options = mock.Mock() + ovn_nb_synchronizer._ovn_client.get_port_dns_records = mock.Mock() + ovn_nb_synchronizer._ovn_client.get_port_dns_records.return_value = {} + + def _test_ovn_nb_sync_helper(self, ovn_nb_synchronizer, + networks, ports, + routers, router_ports, + create_router_list, create_router_port_list, + update_router_port_list, + del_router_list, del_router_port_list, + create_network_list, create_port_list, + create_provnet_port_list, + del_network_list, del_port_list, + add_static_route_list, del_static_route_list, + add_snat_list, del_snat_list, + add_floating_ip_list, del_floating_ip_list, + add_address_set_list, del_address_set_list, + update_address_set_list, + add_subnet_dhcp_options_list, + delete_dhcp_options_list, + add_port_groups_list, + del_port_groups_list, + port_groups_supported=False): + self._test_mocks_helper(ovn_nb_synchronizer) + + core_plugin = ovn_nb_synchronizer.core_plugin + ovn_api = ovn_nb_synchronizer.ovn_api + ovn_api.is_port_groups_supported.return_value = port_groups_supported + mock.patch.object(impl_idl_ovn, 'get_connection').start() + + ovn_nb_synchronizer.do_sync() + + if not ovn_api.is_port_groups_supported(): + get_security_group_calls = [mock.call(mock.ANY, sg['id']) + for sg in self.security_groups] + self.assertEqual(len(self.security_groups), + core_plugin.get_security_group.call_count) + core_plugin.get_security_group.assert_has_calls( + get_security_group_calls, any_order=True) + + create_address_set_calls = [mock.call(**a) + for a in add_address_set_list] + self.assertEqual( + len(add_address_set_list), + ovn_api.create_address_set.call_count) + ovn_api.create_address_set.assert_has_calls( + create_address_set_calls, any_order=True) + + del_address_set_calls = [mock.call(**d) + for d in del_address_set_list] + self.assertEqual( + len(del_address_set_list), + ovn_api.delete_address_set.call_count) + ovn_api.delete_address_set.assert_has_calls( + del_address_set_calls, any_order=True) + + update_address_set_calls = [mock.call(**u) + for u in update_address_set_list] + self.assertEqual( + len(update_address_set_list), + ovn_api.update_address_set.call_count) + ovn_api.update_address_set.assert_has_calls( + update_address_set_calls, any_order=True) + + create_port_groups_calls = [mock.call(**a) + for a in add_port_groups_list] + self.assertEqual( + len(add_port_groups_list), + ovn_api.pg_add.call_count) + ovn_api.pg_add.assert_has_calls( + create_port_groups_calls, any_order=True) + + del_port_groups_calls = [mock.call(d) + for d in del_port_groups_list] + self.assertEqual( + len(del_port_groups_list), + ovn_api.pg_del.call_count) + ovn_api.pg_del.assert_has_calls( + del_port_groups_calls, any_order=True) + + self.assertEqual( + len(create_network_list), + ovn_nb_synchronizer._ovn_client.create_network.call_count) + create_network_calls = [mock.call(net['net']) + for net in create_network_list] + ovn_nb_synchronizer._ovn_client.create_network.assert_has_calls( + create_network_calls, any_order=True) + + self.assertEqual( + len(create_port_list), + ovn_nb_synchronizer._ovn_client.create_port.call_count) + create_port_calls = [mock.call(port) for port in create_port_list] + ovn_nb_synchronizer._ovn_client.create_port.assert_has_calls( + create_port_calls, any_order=True) + + create_provnet_port_calls = [ + mock.call(mock.ANY, mock.ANY, + network['provider:physical_network'], + network['provider:segmentation_id']) + for network in create_provnet_port_list] + self.assertEqual( + len(create_provnet_port_list), + ovn_nb_synchronizer._ovn_client._create_provnet_port.call_count) + ovn_nb_synchronizer._ovn_client._create_provnet_port.assert_has_calls( + create_provnet_port_calls, any_order=True) + + self.assertEqual(len(del_network_list), + ovn_api.ls_del.call_count) + ls_del_calls = [mock.call(net_name) + for net_name in del_network_list] + ovn_api.ls_del.assert_has_calls( + ls_del_calls, any_order=True) + + self.assertEqual(len(del_port_list), + ovn_api.delete_lswitch_port.call_count) + delete_lswitch_port_calls = [mock.call(lport_name=port['id'], + lswitch_name=port['lswitch']) + for port in del_port_list] + ovn_api.delete_lswitch_port.assert_has_calls( + delete_lswitch_port_calls, any_order=True) + + add_route_calls = [mock.call(mock.ANY, ip_prefix=route['destination'], + nexthop=route['nexthop']) + for route in add_static_route_list] + ovn_api.add_static_route.assert_has_calls(add_route_calls, + any_order=True) + self.assertEqual(len(add_static_route_list), + ovn_api.add_static_route.call_count) + del_route_calls = [mock.call(mock.ANY, ip_prefix=route['destination'], + nexthop=route['nexthop']) + for route in del_static_route_list] + ovn_api.delete_static_route.assert_has_calls(del_route_calls, + any_order=True) + self.assertEqual(len(del_static_route_list), + ovn_api.delete_static_route.call_count) + + add_nat_calls = [mock.call(mock.ANY, **nat) for nat in add_snat_list] + ovn_api.add_nat_rule_in_lrouter.assert_has_calls(add_nat_calls, + any_order=True) + self.assertEqual(len(add_snat_list), + ovn_api.add_nat_rule_in_lrouter.call_count) + + add_fip_calls = [mock.call(nat, txn=mock.ANY) + for nat in add_floating_ip_list] + (ovn_nb_synchronizer._ovn_client._create_or_update_floatingip. + assert_has_calls(add_fip_calls)) + self.assertEqual( + len(add_floating_ip_list), + ovn_nb_synchronizer._ovn_client._create_or_update_floatingip. + call_count) + + del_nat_calls = [mock.call(mock.ANY, **nat) for nat in del_snat_list] + ovn_api.delete_nat_rule_in_lrouter.assert_has_calls(del_nat_calls, + any_order=True) + self.assertEqual(len(del_snat_list), + ovn_api.delete_nat_rule_in_lrouter.call_count) + + del_fip_calls = [mock.call(nat, mock.ANY, txn=mock.ANY) for nat in + del_floating_ip_list] + ovn_nb_synchronizer._ovn_client._delete_floatingip.assert_has_calls( + del_fip_calls, any_order=True) + self.assertEqual( + len(del_floating_ip_list), + ovn_nb_synchronizer._ovn_client._delete_floatingip.call_count) + + create_router_calls = [mock.call(r, add_external_gateway=False) + for r in create_router_list] + self.assertEqual( + len(create_router_list), + ovn_nb_synchronizer._ovn_client.create_router.call_count) + ovn_nb_synchronizer._ovn_client.create_router.assert_has_calls( + create_router_calls, any_order=True) + + create_router_port_calls = [mock.call(p['device_id'], + mock.ANY) + for p in create_router_port_list] + self.assertEqual( + len(create_router_port_list), + ovn_nb_synchronizer._ovn_client._create_lrouter_port.call_count) + ovn_nb_synchronizer._ovn_client._create_lrouter_port.assert_has_calls( + create_router_port_calls, + any_order=True) + + self.assertEqual(len(del_router_list), + ovn_api.delete_lrouter.call_count) + update_router_port_calls = [mock.call(p) + for p in update_router_port_list] + self.assertEqual( + len(update_router_port_list), + ovn_nb_synchronizer._ovn_client.update_router_port.call_count) + ovn_nb_synchronizer._ovn_client.update_router_port.assert_has_calls( + update_router_port_calls, + any_order=True) + + delete_lrouter_calls = [mock.call(r['router']) + for r in del_router_list] + ovn_api.delete_lrouter.assert_has_calls( + delete_lrouter_calls, any_order=True) + + self.assertEqual( + len(del_router_port_list), + ovn_api.delete_lrouter_port.call_count) + delete_lrouter_port_calls = [mock.call(port['id'], + port['router'], if_exists=False) + for port in del_router_port_list] + ovn_api.delete_lrouter_port.assert_has_calls( + delete_lrouter_port_calls, any_order=True) + + self.assertEqual( + len(add_subnet_dhcp_options_list), + ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options. + call_count) + add_subnet_dhcp_options_calls = [ + mock.call(subnet, net, mock.ANY) + for (subnet, net) in add_subnet_dhcp_options_list] + ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options. \ + assert_has_calls(add_subnet_dhcp_options_calls, any_order=True) + + self.assertEqual(ovn_api.delete_dhcp_options.call_count, + len(delete_dhcp_options_list)) + delete_dhcp_options_calls = [ + mock.call(dhcp_opt_uuid) + for dhcp_opt_uuid in delete_dhcp_options_list] + ovn_api.delete_dhcp_options.assert_has_calls( + delete_dhcp_options_calls, any_order=True) + + def _test_ovn_nb_sync_mode_repair_helper(self, port_groups_supported=True): + + create_network_list = [{'net': {'id': 'n2', 'mtu': 1450}, + 'ext_ids': {}}] + del_network_list = ['neutron-n3'] + del_port_list = [{'id': 'p3n1', 'lswitch': 'neutron-n1'}, + {'id': 'p1n1', 'lswitch': 'neutron-n1'}] + create_port_list = self.ports + for port in create_port_list: + if port['id'] in ['p1n1', 'fp1']: + # this will be skipped by the logic, + # because p1n1 is already in lswitch-port list + # and fp1 is a floating IP port + create_port_list.remove(port) + create_provnet_port_list = [{'id': 'n1', 'mtu': 1450, + 'provider:physical_network': 'physnet1', + 'provider:segmentation_id': 1000}] + create_router_list = [{ + 'id': 'r2', 'routes': [ + {'nexthop': '40.0.0.100', 'destination': '30.0.0.0/24'}], + 'gw_port_id': 'gpr2', + 'external_gateway_info': { + 'network_id': "ext-net", 'enable_snat': True, + 'external_fixed_ips': [{ + 'subnet_id': 'ext-subnet', + 'ip_address': '100.0.0.2'}]}}] + + # Test adding and deleting routes snats fips behaviors for router r1 + # existing in both neutron DB and OVN DB. + # Test adding behaviors for router r2 only existing in neutron DB. + # Static routes with destination 0.0.0.0/0 are default gateway routes + add_static_route_list = [{'nexthop': '20.0.0.101', + 'destination': '12.0.0.0/24'}, + {'nexthop': '90.0.0.1', + 'destination': '0.0.0.0/0'}, + {'nexthop': '40.0.0.100', + 'destination': '30.0.0.0/24'}, + {'nexthop': '100.0.0.1', + 'destination': '0.0.0.0/0'}] + del_static_route_list = [{'nexthop': '20.0.0.100', + 'destination': '10.0.0.0/24'}] + add_snat_list = [{'logical_ip': '172.16.2.0/24', + 'external_ip': '90.0.0.2', + 'type': 'snat'}, + {'logical_ip': '192.168.2.0/24', + 'external_ip': '100.0.0.2', + 'type': 'snat'}] + del_snat_list = [{'logical_ip': '172.16.1.0/24', + 'external_ip': '90.0.0.2', + 'type': 'snat'}] + # fip 100.0.0.11 exists in OVN with distributed type and in Neutron + # with centralized type. This fip is used to test + # enable_distributed_floating_ip switch and migration + add_floating_ip_list = [{'id': 'fip2', 'router_id': 'r1', + 'floating_ip_address': '90.0.0.12', + 'fixed_ip_address': '172.16.2.12'}, + {'id': 'fip3', 'router_id': 'r2', + 'floating_ip_address': '100.0.0.10', + 'fixed_ip_address': '192.168.2.10'}, + {'id': 'fip4', 'router_id': 'r2', + 'floating_ip_address': '100.0.0.11', + 'fixed_ip_address': '192.168.2.11'}] + del_floating_ip_list = [{'logical_ip': '172.16.1.11', + 'external_ip': '90.0.0.11', + 'type': 'dnat_and_snat'}, + {'logical_ip': '192.168.2.11', + 'external_ip': '100.0.0.11', + 'type': 'dnat_and_snat', + 'external_mac': '01:02:03:04:05:06', + 'logical_port': 'vm1'}] + + del_router_list = [{'router': 'neutron-r3'}] + del_router_port_list = [{'id': 'lrp-p3r1', 'router': 'neutron-r1'}] + create_router_port_list = self.get_sync_router_ports[:2] + update_router_port_list = [self.get_sync_router_ports[2]] + update_router_port_list[0].update( + {'networks': self.lrport_networks}) + + if not port_groups_supported: + add_address_set_list = [ + {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'sg1'}, + 'name': 'as_ip6_sg1', + 'addresses': ['fd79:e1c:a55::816:eff:eff:ff2']}] + del_address_set_list = [{'name': 'as_ip4_del'}] + update_address_set_list = [ + {'addrs_remove': [], + 'addrs_add': ['10.0.0.4'], + 'name': 'as_ip4_sg2'}, + {'addrs_remove': ['fd79:e1c:a55::816:eff:eff:ff3'], + 'addrs_add': [], + 'name': 'as_ip6_sg2'}] + # If Port Groups are not supported, we don't expect any of those + # to be created/deleted. + add_port_groups_list = [] + del_port_groups_list = [] + else: + add_port_groups_list = [ + {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'sg2'}, + 'name': 'pg_sg2', + 'acls': []}] + del_port_groups_list = ['pg_unknown_del'] + # If using Port Groups, no Address Set shall be created/updated + # and all the existing ones have to be removed. + add_address_set_list = [] + update_address_set_list = [] + del_address_set_list = [{'name': 'as_ip4_sg1'}, + {'name': 'as_ip4_sg2'}, + {'name': 'as_ip6_sg2'}, + {'name': 'as_ip4_del'}] + + add_subnet_dhcp_options_list = [(self.subnets[2], self.networks[1]), + (self.subnets[1], self.networks[0])] + delete_dhcp_options_list = ['UUID2', 'UUID4', 'UUID5'] + + ovn_nb_synchronizer = ovn_db_sync.OvnNbSynchronizer( + self.plugin, self.mech_driver._nb_ovn, self.mech_driver._sb_ovn, + 'repair', self.mech_driver) + self._test_ovn_nb_sync_helper(ovn_nb_synchronizer, + self.networks, + self.ports, + self.routers, + self.get_sync_router_ports, + create_router_list, + create_router_port_list, + update_router_port_list, + del_router_list, del_router_port_list, + create_network_list, create_port_list, + create_provnet_port_list, + del_network_list, del_port_list, + add_static_route_list, + del_static_route_list, + add_snat_list, + del_snat_list, + add_floating_ip_list, + del_floating_ip_list, + add_address_set_list, + del_address_set_list, + update_address_set_list, + add_subnet_dhcp_options_list, + delete_dhcp_options_list, + add_port_groups_list, + del_port_groups_list, + port_groups_supported) + + def test_ovn_nb_sync_mode_repair_no_pgs(self): + self._test_ovn_nb_sync_mode_repair_helper(port_groups_supported=False) + + def test_ovn_nb_sync_mode_repair_pgs(self): + self._test_ovn_nb_sync_mode_repair_helper(port_groups_supported=True) + + def _test_ovn_nb_sync_mode_log_helper(self, port_groups_supported=True): + create_network_list = [] + create_port_list = [] + create_provnet_port_list = [] + del_network_list = [] + del_port_list = [] + create_router_list = [] + create_router_port_list = [] + update_router_port_list = [] + del_router_list = [] + del_router_port_list = [] + add_static_route_list = [] + del_static_route_list = [] + add_snat_list = [] + del_snat_list = [] + add_floating_ip_list = [] + del_floating_ip_list = [] + add_address_set_list = [] + del_address_set_list = [] + update_address_set_list = [] + add_subnet_dhcp_options_list = [] + delete_dhcp_options_list = [] + add_port_groups_list = [] + del_port_groups_list = [] + + ovn_nb_synchronizer = ovn_db_sync.OvnNbSynchronizer( + self.plugin, self.mech_driver._nb_ovn, self.mech_driver._sb_ovn, + 'log', self.mech_driver) + self._test_ovn_nb_sync_helper(ovn_nb_synchronizer, + self.networks, + self.ports, + self.routers, + self.get_sync_router_ports, + create_router_list, + create_router_port_list, + update_router_port_list, + del_router_list, del_router_port_list, + create_network_list, create_port_list, + create_provnet_port_list, + del_network_list, del_port_list, + add_static_route_list, + del_static_route_list, + add_snat_list, + del_snat_list, + add_floating_ip_list, + del_floating_ip_list, + add_address_set_list, + del_address_set_list, + update_address_set_list, + add_subnet_dhcp_options_list, + delete_dhcp_options_list, + add_port_groups_list, + del_port_groups_list, + port_groups_supported) + + def test_ovn_nb_sync_mode_log_pgs(self): + self._test_ovn_nb_sync_mode_log_helper(port_groups_supported=True) + + def test_ovn_nb_sync_mode_log_no_pgs(self): + self._test_ovn_nb_sync_mode_log_helper(port_groups_supported=False) + + +class TestOvnSbSyncML2(test_mech_driver.OVNMechanismDriverTestCase): + + def test_ovn_sb_sync(self): + ovn_sb_synchronizer = ovn_db_sync.OvnSbSynchronizer( + self.plugin, + self.mech_driver._sb_ovn, + self.mech_driver) + ovn_api = ovn_sb_synchronizer.ovn_api + hostname_with_physnets = {'hostname1': ['physnet1', 'physnet2'], + 'hostname2': ['physnet1']} + ovn_api.get_chassis_hostname_and_physnets.return_value = ( + hostname_with_physnets) + ovn_driver = ovn_sb_synchronizer.ovn_driver + ovn_driver.update_segment_host_mapping = mock.Mock() + hosts_in_neutron = {'hostname2', 'hostname3'} + + with mock.patch.object(ovn_db_sync.segments_db, + 'get_hosts_mapped_with_segments', + return_value=hosts_in_neutron): + ovn_sb_synchronizer.sync_hostname_and_physical_networks(mock.ANY) + all_hosts = set(hostname_with_physnets.keys()) | hosts_in_neutron + self.assertEqual( + len(all_hosts), + ovn_driver.update_segment_host_mapping.call_count) + update_segment_host_mapping_calls = [mock.call( + host, hostname_with_physnets[host]) + for host in hostname_with_physnets] + update_segment_host_mapping_calls += [ + mock.call(host, []) for host in + hosts_in_neutron - set(hostname_with_physnets.keys())] + ovn_driver.update_segment_host_mapping.assert_has_calls( + update_segment_host_mapping_calls, any_order=True) diff --git a/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py new file mode 100644 index 00000000000..9053f9ec6f4 --- /dev/null +++ b/neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/test_mech_driver.py @@ -0,0 +1,2983 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +import copy +import datetime +import uuid + +import mock +from neutron_lib.api.definitions import external_net +from neutron_lib.api.definitions import portbindings +from neutron_lib.api.definitions import provider_net as pnet +from neutron_lib.callbacks import events +from neutron_lib.callbacks import registry +from neutron_lib.callbacks import resources +from neutron_lib import constants as const +from neutron_lib import context +from neutron_lib import exceptions as n_exc +from neutron_lib.plugins import directory +from neutron_lib.services.trunk import constants as trunk_consts +from neutron_lib.tests import tools +from neutron_lib.utils import net as n_net +from oslo_config import cfg +from oslo_db import exception as os_db_exc +from oslo_serialization import jsonutils +from oslo_utils import timeutils +from oslo_utils import uuidutils +from webob import exc + +from neutron.common.ovn import acl as ovn_acl +from neutron.common.ovn import constants as ovn_const +from neutron.common.ovn import hash_ring_manager +from neutron.common.ovn import utils as ovn_utils +from neutron.common import utils as n_utils +from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf +from neutron.db import db_base_plugin_v2 +from neutron.db import ovn_revision_numbers_db +from neutron.db import provisioning_blocks +from neutron.db import securitygroups_db +from neutron.plugins.ml2.drivers.ovn.mech_driver import mech_driver +from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client +from neutron.plugins.ml2.drivers import type_geneve # noqa +from neutron.services.revisions import revision_plugin +from neutron.tests.unit.extensions import test_segment +from neutron.tests.unit import fake_resources as fakes +from neutron.tests.unit.plugins.ml2 import test_ext_portsecurity +from neutron.tests.unit.plugins.ml2 import test_plugin +from neutron.tests.unit.plugins.ml2 import test_security_group + + +OVN_PROFILE = ovn_const.OVN_PORT_BINDING_PROFILE + + +class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase): + + _mechanism_drivers = ['logger', 'ovn'] + _extension_drivers = ['port_security', 'dns'] + + def setUp(self): + cfg.CONF.set_override('extension_drivers', + self._extension_drivers, + group='ml2') + cfg.CONF.set_override('tenant_network_types', + ['geneve'], + group='ml2') + cfg.CONF.set_override('vni_ranges', + ['1:65536'], + group='ml2_type_geneve') + ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', False, + group='ovn') + ovn_conf.cfg.CONF.set_override('dns_servers', ['8.8.8.8'], + group='ovn') + super(TestOVNMechanismDriver, self).setUp() + mm = directory.get_plugin().mechanism_manager + self.mech_driver = mm.mech_drivers['ovn'].obj + self.mech_driver._nb_ovn = fakes.FakeOvsdbNbOvnIdl() + self.mech_driver._sb_ovn = fakes.FakeOvsdbSbOvnIdl() + self.nb_ovn = self.mech_driver._nb_ovn + self.sb_ovn = self.mech_driver._sb_ovn + + self.fake_subnet = fakes.FakeSubnet.create_one_subnet().info() + + self.fake_sg_rule = \ + fakes.FakeSecurityGroupRule.create_one_security_group_rule().info() + self.fake_sg = fakes.FakeSecurityGroup.create_one_security_group( + attrs={'security_group_rules': [self.fake_sg_rule]} + ).info() + + self.sg_cache = {self.fake_sg['id']: self.fake_sg} + self.subnet_cache = {self.fake_subnet['id']: self.fake_subnet} + mock.patch.object(ovn_acl, '_acl_columns_name_severity_supported', + return_value=True).start() + revision_plugin.RevisionPlugin() + p = mock.patch.object(ovn_utils, 'get_revision_number', return_value=1) + p.start() + self.addCleanup(p.stop) + p = mock.patch.object(ovn_revision_numbers_db, 'bump_revision') + p.start() + self.addCleanup(p.stop) + + @mock.patch.object(ovn_revision_numbers_db, 'bump_revision') + def test__create_security_group(self, mock_bump): + self.mech_driver._create_security_group( + resources.SECURITY_GROUP, events.AFTER_CREATE, {}, + security_group=self.fake_sg) + external_ids = {ovn_const.OVN_SG_EXT_ID_KEY: self.fake_sg['id']} + ip4_name = ovn_utils.ovn_addrset_name(self.fake_sg['id'], 'ip4') + ip6_name = ovn_utils.ovn_addrset_name(self.fake_sg['id'], 'ip6') + create_address_set_calls = [mock.call(name=name, + external_ids=external_ids) + for name in [ip4_name, ip6_name]] + + self.nb_ovn.create_address_set.assert_has_calls( + create_address_set_calls, any_order=True) + mock_bump.assert_called_once_with( + mock.ANY, self.fake_sg, ovn_const.TYPE_SECURITY_GROUPS) + + def test__delete_security_group(self): + self.mech_driver._delete_security_group( + resources.SECURITY_GROUP, events.AFTER_CREATE, {}, + security_group_id=self.fake_sg['id']) + ip4_name = ovn_utils.ovn_addrset_name(self.fake_sg['id'], 'ip4') + ip6_name = ovn_utils.ovn_addrset_name(self.fake_sg['id'], 'ip6') + delete_address_set_calls = [mock.call(name=name) + for name in [ip4_name, ip6_name]] + + self.nb_ovn.delete_address_set.assert_has_calls( + delete_address_set_calls, any_order=True) + + @mock.patch.object(ovn_revision_numbers_db, 'bump_revision') + def test__process_sg_rule_notifications_sgr_create(self, mock_bump): + with mock.patch.object(ovn_acl, 'update_acls_for_security_group') \ + as ovn_acl_up: + rule = {'security_group_id': 'sg_id'} + self.mech_driver._process_sg_rule_notification( + resources.SECURITY_GROUP_RULE, events.AFTER_CREATE, {}, + security_group_rule=rule) + ovn_acl_up.assert_called_once_with( + mock.ANY, mock.ANY, mock.ANY, + 'sg_id', rule, is_add_acl=True) + mock_bump.assert_called_once_with( + mock.ANY, rule, ovn_const.TYPE_SECURITY_GROUP_RULES) + + @mock.patch.object(ovn_revision_numbers_db, 'delete_revision') + def test_process_sg_rule_notifications_sgr_delete(self, mock_delrev): + rule = {'id': 'sgr_id', 'security_group_id': 'sg_id'} + with mock.patch.object(ovn_acl, 'update_acls_for_security_group') \ + as ovn_acl_up, \ + mock.patch.object(securitygroups_db.SecurityGroupDbMixin, + 'get_security_group_rule', + return_value=rule): + self.mech_driver._process_sg_rule_notification( + resources.SECURITY_GROUP_RULE, events.BEFORE_DELETE, {}, + security_group_rule=rule) + ovn_acl_up.assert_called_once_with( + mock.ANY, mock.ANY, mock.ANY, + 'sg_id', rule, is_add_acl=False) + mock_delrev.assert_called_once_with( + mock.ANY, rule['id'], ovn_const.TYPE_SECURITY_GROUP_RULES) + + def test_add_acls_no_sec_group(self): + fake_port_no_sg = fakes.FakePort.create_one_port().info() + expected_acls = ovn_acl.drop_all_ip_traffic_for_port(fake_port_no_sg) + acls = ovn_acl.add_acls(self.mech_driver._plugin, + mock.Mock(), + fake_port_no_sg, + {}, {}, self.mech_driver._nb_ovn) + self.assertEqual(expected_acls, acls) + + def test_add_acls_no_sec_group_no_port_security(self): + fake_port_no_sg_no_ps = fakes.FakePort.create_one_port( + attrs={'port_security_enabled': False}).info() + acls = ovn_acl.add_acls(self.mech_driver._plugin, + mock.Mock(), + fake_port_no_sg_no_ps, + {}, {}, self.mech_driver._nb_ovn) + self.assertEqual([], acls) + + def _test_add_acls_with_sec_group_helper(self, native_dhcp=True): + fake_port_sg = fakes.FakePort.create_one_port( + attrs={'security_groups': [self.fake_sg['id']], + 'fixed_ips': [{'subnet_id': self.fake_subnet['id'], + 'ip_address': '10.10.10.20'}]} + ).info() + + expected_acls = [] + expected_acls += ovn_acl.drop_all_ip_traffic_for_port( + fake_port_sg) + expected_acls += ovn_acl.add_acl_dhcp( + fake_port_sg, self.fake_subnet, native_dhcp) + sg_rule_acl = ovn_acl.add_sg_rule_acl_for_port( + fake_port_sg, self.fake_sg_rule, + 'outport == "' + fake_port_sg['id'] + '" ' + + '&& ip4 && ip4.src == 0.0.0.0/0 ' + + '&& tcp && tcp.dst == 22') + expected_acls.append(sg_rule_acl) + + # Test with caches + acls = ovn_acl.add_acls(self.mech_driver._plugin, + mock.Mock(), + fake_port_sg, + self.sg_cache, + self.subnet_cache, + self.mech_driver._nb_ovn) + self.assertEqual(expected_acls, acls) + + # Test without caches + with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + 'get_subnet', return_value=self.fake_subnet), \ + mock.patch.object(securitygroups_db.SecurityGroupDbMixin, + 'get_security_group', + return_value=self.fake_sg): + acls = ovn_acl.add_acls(self.mech_driver._plugin, + mock.Mock(), + fake_port_sg, + {}, {}, self.mech_driver._nb_ovn) + self.assertEqual(expected_acls, acls) + + # Test with security groups disabled + with mock.patch.object(ovn_acl, 'is_sg_enabled', return_value=False): + acls = ovn_acl.add_acls(self.mech_driver._plugin, + mock.Mock(), + fake_port_sg, + self.sg_cache, + self.subnet_cache, + self.mech_driver._nb_ovn) + self.assertEqual([], acls) + + # Test with multiple fixed IPs on the same subnet. + fake_port_sg['fixed_ips'].append({'subnet_id': self.fake_subnet['id'], + 'ip_address': '10.10.10.21'}) + acls = ovn_acl.add_acls(self.mech_driver._plugin, + mock.Mock(), + fake_port_sg, + self.sg_cache, + self.subnet_cache, + self.mech_driver._nb_ovn) + self.assertEqual(expected_acls, acls) + + def test_add_acls_with_sec_group_native_dhcp_enabled(self): + self._test_add_acls_with_sec_group_helper() + + def test_port_invalid_binding_profile(self): + invalid_binding_profiles = [ + {'tag': 0, + 'parent_name': 'fakename'}, + {'tag': 1024}, + {'tag': 1024, 'parent_name': 1024}, + {'parent_name': 'test'}, + {'tag': 'test'}, + {'vtep-physical-switch': 'psw1'}, + {'vtep-logical-switch': 'lsw1'}, + {'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 1234}, + {'vtep-physical-switch': 1234, 'vtep-logical-switch': 'lsw1'}, + {'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1', + 'tag': 1024}, + {'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1', + 'parent_name': 'fakename'}, + {'vtep-physical-switch': 'psw1', 'vtep-logical-switch': 'lsw1', + 'tag': 1024, 'parent_name': 'fakename'}, + ] + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + # succeed without binding:profile + with self.port(subnet=subnet1, + set_context=True, tenant_id='test'): + pass + # fail with invalid binding profiles + for invalid_profile in invalid_binding_profiles: + try: + kwargs = {ovn_const.OVN_PORT_BINDING_PROFILE: + invalid_profile} + with self.port( + subnet=subnet1, + expected_res_status=403, + arg_list=( + ovn_const.OVN_PORT_BINDING_PROFILE,), + set_context=True, tenant_id='test', + **kwargs): + pass + except exc.HTTPClientError: + pass + + def test__validate_ignored_port_update_from_fip_port(self): + p = {'id': 'id', 'device_owner': 'test'} + ori_p = {'id': 'id', 'device_owner': const.DEVICE_OWNER_FLOATINGIP} + self.assertRaises(mech_driver.OVNPortUpdateError, + self.mech_driver._validate_ignored_port, + p, ori_p) + + def test__validate_ignored_port_update_to_fip_port(self): + p = {'id': 'id', 'device_owner': const.DEVICE_OWNER_FLOATINGIP} + ori_p = {'id': 'port-id', 'device_owner': 'test'} + self.assertRaises(mech_driver.OVNPortUpdateError, + self.mech_driver._validate_ignored_port, + p, ori_p) + + def test_create_and_update_ignored_fip_port(self): + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + device_owner=const.DEVICE_OWNER_FLOATINGIP, + set_context=True, tenant_id='test') as port: + self.nb_ovn.create_lswitch_port.assert_not_called() + data = {'port': {'name': 'new'}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(exc.HTTPOk.code, res.status_int) + self.nb_ovn.set_lswitch_port.assert_not_called() + + def test_update_ignored_port_from_fip_device_owner(self): + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + device_owner=const.DEVICE_OWNER_FLOATINGIP, + set_context=True, tenant_id='test') as port: + self.nb_ovn.create_lswitch_port.assert_not_called() + data = {'port': {'device_owner': 'test'}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + msg = jsonutils.loads(res.body)['NeutronError']['message'] + expect_msg = ('Bad port request: Updating device_owner for' + ' port %s owned by network:floatingip is' + ' not supported.' % port['port']['id']) + self.assertEqual(msg, expect_msg) + self.nb_ovn.set_lswitch_port.assert_not_called() + + def test_update_ignored_port_to_fip_device_owner(self): + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + device_owner='test', + set_context=True, tenant_id='test') as port: + self.assertEqual( + 1, self.nb_ovn.create_lswitch_port.call_count) + data = {'port': {'device_owner': + const.DEVICE_OWNER_FLOATINGIP}} + req = self.new_update_request('ports', data, + port['port']['id']) + res = req.get_response(self.api) + self.assertEqual(exc.HTTPBadRequest.code, res.status_int) + msg = jsonutils.loads(res.body)['NeutronError']['message'] + expect_msg = ('Bad port request: Updating device_owner to' + ' network:floatingip for port %s is' + ' not supported.' % port['port']['id']) + self.assertEqual(msg, expect_msg) + self.nb_ovn.set_lswitch_port.assert_not_called() + + def test_create_port_security(self): + kwargs = {'mac_address': '00:00:00:00:00:01', + 'fixed_ips': [{'ip_address': '10.0.0.2'}, + {'ip_address': '10.0.0.4'}]} + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + arg_list=('mac_address', 'fixed_ips'), + set_context=True, tenant_id='test', + **kwargs) as port: + self.assertTrue(self.nb_ovn.create_lswitch_port.called) + called_args_dict = ( + (self.nb_ovn.create_lswitch_port + ).call_args_list[0][1]) + self.assertEqual(['00:00:00:00:00:01 10.0.0.2 10.0.0.4'], + called_args_dict.get('port_security')) + + data = {'port': {'mac_address': '00:00:00:00:00:02'}} + req = self.new_update_request( + 'ports', + data, port['port']['id']) + req.get_response(self.api) + self.assertTrue(self.nb_ovn.set_lswitch_port.called) + called_args_dict = ( + (self.nb_ovn.set_lswitch_port + ).call_args_list[0][1]) + self.assertEqual(['00:00:00:00:00:02 10.0.0.2 10.0.0.4'], + called_args_dict.get('port_security')) + + def test_create_port_with_disabled_security(self): + # NOTE(mjozefcz): Lets pretend this is nova port to not + # be treated as VIP. + kwargs = {'port_security_enabled': False, + 'device_owner': 'compute:nova'} + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + arg_list=('port_security_enabled',), + set_context=True, tenant_id='test', + **kwargs) as port: + self.assertTrue(self.nb_ovn.create_lswitch_port.called) + called_args_dict = ( + (self.nb_ovn.create_lswitch_port + ).call_args_list[0][1]) + self.assertEqual([], + called_args_dict.get('port_security')) + + self.assertEqual('unknown', + called_args_dict.get('addresses')[1]) + data = {'port': {'mac_address': '00:00:00:00:00:01'}} + req = self.new_update_request( + 'ports', + data, port['port']['id']) + req.get_response(self.api) + self.assertTrue(self.nb_ovn.set_lswitch_port.called) + called_args_dict = ( + (self.nb_ovn.set_lswitch_port + ).call_args_list[0][1]) + self.assertEqual([], + called_args_dict.get('port_security')) + self.assertEqual(2, len(called_args_dict.get('addresses'))) + self.assertEqual('unknown', + called_args_dict.get('addresses')[1]) + + # Enable port security + data = {'port': {'port_security_enabled': 'True'}} + req = self.new_update_request( + 'ports', + data, port['port']['id']) + req.get_response(self.api) + called_args_dict = ( + (self.nb_ovn.set_lswitch_port + ).call_args_list[1][1]) + self.assertEqual(2, + self.nb_ovn.set_lswitch_port.call_count) + self.assertEqual(1, len(called_args_dict.get('addresses'))) + self.assertNotIn('unknown', + called_args_dict.get('addresses')) + + def test_create_port_security_allowed_address_pairs(self): + # NOTE(mjozefcz): Lets pretend this is nova port to not + # be treated as VIP. + kwargs = {'allowed_address_pairs': + [{"ip_address": "1.1.1.1"}, + {"ip_address": "2.2.2.2", + "mac_address": "22:22:22:22:22:22"}], + 'device_owner': 'compute:nova'} + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + arg_list=('allowed_address_pairs',), + set_context=True, tenant_id='test', + **kwargs) as port: + port_ip = port['port'].get('fixed_ips')[0]['ip_address'] + self.assertTrue(self.nb_ovn.create_lswitch_port.called) + called_args_dict = ( + (self.nb_ovn.create_lswitch_port + ).call_args_list[0][1]) + self.assertEqual( + tools.UnorderedList( + ["22:22:22:22:22:22 2.2.2.2", + port['port']['mac_address'] + ' ' + port_ip + + ' ' + '1.1.1.1']), + called_args_dict.get('port_security')) + self.assertEqual( + tools.UnorderedList( + ["22:22:22:22:22:22", + port['port']['mac_address'] + ' ' + port_ip]), + called_args_dict.get('addresses')) + + old_mac = port['port']['mac_address'] + + # we are updating only the port mac address. So the + # mac address of the allowed address pair ip 1.1.1.1 + # will have old mac address + data = {'port': {'mac_address': '00:00:00:00:00:01'}} + req = self.new_update_request( + 'ports', + data, port['port']['id']) + req.get_response(self.api) + self.assertTrue(self.nb_ovn.set_lswitch_port.called) + called_args_dict = ( + (self.nb_ovn.set_lswitch_port + ).call_args_list[0][1]) + self.assertEqual(tools.UnorderedList( + ["22:22:22:22:22:22 2.2.2.2", + "00:00:00:00:00:01 " + port_ip, + old_mac + " 1.1.1.1"]), + called_args_dict.get('port_security')) + self.assertEqual( + tools.UnorderedList( + ["22:22:22:22:22:22", + "00:00:00:00:00:01 " + port_ip, + old_mac]), + called_args_dict.get('addresses')) + + def test_create_port_possible_vip(self): + """Test if just created LSP has no adresses set. + + This could be potential VIP port. If not - next + port update will set the adresses corectly during + binding process. + """ + with (self.network(set_context=True, tenant_id='test')) as net1, ( + self.subnet(network=net1)) as subnet1, ( + self.port(subnet=subnet1, set_context=True, tenant_id='test')): + + self.assertTrue(self.nb_ovn.create_lswitch_port.called) + called_args_dict = ( + self.nb_ovn.create_lswitch_port.call_args_list[0][1]) + self.assertEqual([], + called_args_dict.get('addresses')) + + def _create_fake_network_context(self, + network_type, + physical_network=None, + segmentation_id=None): + network_attrs = {'provider:network_type': network_type, + 'provider:physical_network': physical_network, + 'provider:segmentation_id': segmentation_id} + segment_attrs = {'network_type': network_type, + 'physical_network': physical_network, + 'segmentation_id': segmentation_id} + fake_network = \ + fakes.FakeNetwork.create_one_network(attrs=network_attrs).info() + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + return fakes.FakeNetworkContext(fake_network, fake_segments) + + def _create_fake_mp_network_context(self): + network_type = 'flat' + network_attrs = {'segments': []} + fake_segments = [] + for physical_network in ['physnet1', 'physnet2']: + network_attrs['segments'].append( + {'provider:network_type': network_type, + 'provider:physical_network': physical_network}) + segment_attrs = {'network_type': network_type, + 'physical_network': physical_network} + fake_segments.append( + fakes.FakeSegment.create_one_segment( + attrs=segment_attrs).info()) + fake_network = \ + fakes.FakeNetwork.create_one_network(attrs=network_attrs).info() + fake_network.pop('provider:network_type') + fake_network.pop('provider:physical_network') + fake_network.pop('provider:segmentation_id') + return fakes.FakeNetworkContext(fake_network, fake_segments) + + def test_network_precommit(self): + # Test supported network types. + fake_network_context = self._create_fake_network_context('local') + self.mech_driver.create_network_precommit(fake_network_context) + fake_network_context = self._create_fake_network_context( + 'flat', physical_network='physnet') + self.mech_driver.update_network_precommit(fake_network_context) + fake_network_context = self._create_fake_network_context( + 'geneve', segmentation_id=10) + self.mech_driver.create_network_precommit(fake_network_context) + fake_network_context = self._create_fake_network_context( + 'vlan', physical_network='physnet', segmentation_id=11) + self.mech_driver.update_network_precommit(fake_network_context) + fake_mp_network_context = self._create_fake_mp_network_context() + self.mech_driver.create_network_precommit(fake_mp_network_context) + + # Test unsupported network types. + fake_network_context = self._create_fake_network_context( + 'vxlan', segmentation_id=12) + self.assertRaises(n_exc.InvalidInput, + self.mech_driver.create_network_precommit, + fake_network_context) + fake_network_context = self._create_fake_network_context( + 'gre', segmentation_id=13) + self.assertRaises(n_exc.InvalidInput, + self.mech_driver.update_network_precommit, + fake_network_context) + + def test_create_port_without_security_groups(self): + kwargs = {'security_groups': []} + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + arg_list=('security_groups',), + set_context=True, tenant_id='test', + **kwargs): + self.assertEqual( + 1, self.nb_ovn.create_lswitch_port.call_count) + self.assertEqual(2, self.nb_ovn.add_acl.call_count) + self.nb_ovn.update_address_set.assert_not_called() + + def test_create_port_without_security_groups_no_ps(self): + kwargs = {'security_groups': [], 'port_security_enabled': False} + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + arg_list=('security_groups', + 'port_security_enabled'), + set_context=True, tenant_id='test', + **kwargs): + self.assertEqual( + 1, self.nb_ovn.create_lswitch_port.call_count) + self.nb_ovn.add_acl.assert_not_called() + self.nb_ovn.update_address_set.assert_not_called() + + def _test_create_port_with_security_groups_helper(self, + add_acl_call_count): + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + set_context=True, tenant_id='test'): + self.assertEqual( + 1, self.nb_ovn.create_lswitch_port.call_count) + self.assertEqual( + add_acl_call_count, self.nb_ovn.add_acl.call_count) + self.assertEqual( + 1, self.nb_ovn.update_address_set.call_count) + + def test_create_port_with_security_groups_native_dhcp_enabled(self): + self._test_create_port_with_security_groups_helper(7) + + def test_update_port_changed_security_groups(self): + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + set_context=True, tenant_id='test') as port1: + sg_id = port1['port']['security_groups'][0] + fake_lsp = ( + fakes.FakeOVNPort.from_neutron_port( + port1['port'])) + self.nb_ovn.lookup.return_value = fake_lsp + + # Remove the default security group. + self.nb_ovn.set_lswitch_port.reset_mock() + self.nb_ovn.update_acls.reset_mock() + self.nb_ovn.update_address_set.reset_mock() + data = {'port': {'security_groups': []}} + self._update('ports', port1['port']['id'], data) + self.assertEqual( + 1, self.nb_ovn.set_lswitch_port.call_count) + self.assertEqual( + 1, self.nb_ovn.update_acls.call_count) + self.assertEqual( + 1, self.nb_ovn.update_address_set.call_count) + + # Add the default security group. + self.nb_ovn.set_lswitch_port.reset_mock() + self.nb_ovn.update_acls.reset_mock() + self.nb_ovn.update_address_set.reset_mock() + fake_lsp.external_ids.pop(ovn_const.OVN_SG_IDS_EXT_ID_KEY) + data = {'port': {'security_groups': [sg_id]}} + self._update('ports', port1['port']['id'], data) + self.assertEqual( + 1, self.nb_ovn.set_lswitch_port.call_count) + self.assertEqual( + 1, self.nb_ovn.update_acls.call_count) + self.assertEqual( + 1, self.nb_ovn.update_address_set.call_count) + + def test_update_port_unchanged_security_groups(self): + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + set_context=True, tenant_id='test') as port1: + fake_lsp = ( + fakes.FakeOVNPort.from_neutron_port( + port1['port'])) + self.nb_ovn.lookup.return_value = fake_lsp + + # Update the port name. + self.nb_ovn.set_lswitch_port.reset_mock() + self.nb_ovn.update_acls.reset_mock() + self.nb_ovn.update_address_set.reset_mock() + data = {'port': {'name': 'rtheis'}} + self._update('ports', port1['port']['id'], data) + self.assertEqual( + 1, self.nb_ovn.set_lswitch_port.call_count) + self.nb_ovn.update_acls.assert_not_called() + self.nb_ovn.update_address_set.assert_not_called() + + # Update the port fixed IPs + self.nb_ovn.set_lswitch_port.reset_mock() + self.nb_ovn.update_acls.reset_mock() + self.nb_ovn.update_address_set.reset_mock() + data = {'port': {'fixed_ips': []}} + self._update('ports', port1['port']['id'], data) + self.assertEqual( + 1, self.nb_ovn.set_lswitch_port.call_count) + self.assertEqual( + 1, self.nb_ovn.update_acls.call_count) + self.assertEqual( + 1, self.nb_ovn.update_address_set.call_count) + + def _test_update_port_vip(self, is_vip=True): + kwargs = {} + if not is_vip: + # NOTE(mjozefcz): Lets pretend this is nova port to not + # be treated as VIP. + kwargs['device_owner'] = 'compute:nova' + with ( + self.network(set_context=True, tenant_id='test')) as net1, ( + self.subnet(network=net1)) as subnet1, ( + self.port(subnet=subnet1, set_context=True, + tenant_id='test', **kwargs)) as port1: + + fake_lsp = ( + fakes.FakeOVNPort.from_neutron_port( + port1['port'])) + self.nb_ovn.lookup.return_value = fake_lsp + + # Update the port name. + self.nb_ovn.set_lswitch_port.reset_mock() + data = {'port': {'name': 'rtheis'}} + self._update('ports', port1['port']['id'], data) + self.assertEqual( + 1, self.nb_ovn.set_lswitch_port.call_count) + called_args_dict = ( + self.nb_ovn.set_lswitch_port.call_args_list[0][1]) + self.assertEqual( + 'rtheis', + called_args_dict['external_ids']['neutron:port_name']) + if is_vip: + self.assertEqual([], + called_args_dict.get('addresses')) + else: + self.assertNotEqual([], + called_args_dict.get('addresses')) + + def test_update_port_not_vip_port(self): + self._test_update_port_vip(is_vip=False) + + def test_update_port_vip_port(self): + self._test_update_port_vip() + + def test_delete_port_without_security_groups(self): + kwargs = {'security_groups': []} + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + arg_list=('security_groups',), + set_context=True, tenant_id='test', + **kwargs) as port1: + fake_lsp = ( + fakes.FakeOVNPort.from_neutron_port( + port1['port'])) + self.nb_ovn.lookup.return_value = fake_lsp + self.nb_ovn.delete_lswitch_port.reset_mock() + self.nb_ovn.delete_acl.reset_mock() + self.nb_ovn.update_address_set.reset_mock() + self._delete('ports', port1['port']['id']) + self.assertEqual( + 1, self.nb_ovn.delete_lswitch_port.call_count) + self.assertEqual( + 1, self.nb_ovn.delete_acl.call_count) + self.nb_ovn.update_address_set.assert_not_called() + + def test_delete_port_with_security_groups(self): + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1) as subnet1: + with self.port(subnet=subnet1, + set_context=True, tenant_id='test') as port1: + fake_lsp = ( + fakes.FakeOVNPort.from_neutron_port( + port1['port'])) + self.nb_ovn.lookup.return_value = fake_lsp + self.nb_ovn.delete_lswitch_port.reset_mock() + self.nb_ovn.delete_acl.reset_mock() + self.nb_ovn.update_address_set.reset_mock() + self._delete('ports', port1['port']['id']) + self.assertEqual( + 1, self.nb_ovn.delete_lswitch_port.call_count) + self.assertEqual( + 1, self.nb_ovn.delete_acl.call_count) + self.assertEqual( + 1, self.nb_ovn.update_address_set.call_count) + + def _test_set_port_status_up(self, is_compute_port=False): + port_device_owner = 'compute:nova' if is_compute_port else '' + self.mech_driver._plugin.nova_notifier = mock.Mock() + with self.network(set_context=True, tenant_id='test') as net1, \ + self.subnet(network=net1) as subnet1, \ + self.port(subnet=subnet1, set_context=True, + tenant_id='test', + device_owner=port_device_owner) as port1, \ + mock.patch.object(provisioning_blocks, + 'provisioning_complete') as pc, \ + mock.patch.object( + self.mech_driver, + '_update_subport_host_if_needed') as upd_subport, \ + mock.patch.object(self.mech_driver, + '_update_dnat_entry_if_needed') as ude, \ + mock.patch.object( + self.mech_driver, + '_wait_for_metadata_provisioned_if_needed') as wmp, \ + mock.patch.object(self.mech_driver, '_should_notify_nova', + return_value=is_compute_port): + self.mech_driver.set_port_status_up(port1['port']['id']) + pc.assert_called_once_with( + mock.ANY, + port1['port']['id'], + resources.PORT, + provisioning_blocks.L2_AGENT_ENTITY + ) + upd_subport.assert_called_once_with(port1['port']['id']) + ude.assert_called_once_with(port1['port']['id']) + wmp.assert_called_once_with(port1['port']['id']) + + # If the port does NOT bellong to compute, do not notify Nova + # about it's status changes + if not is_compute_port: + self.mech_driver._plugin.nova_notifier.\ + notify_port_active_direct.assert_not_called() + else: + self.mech_driver._plugin.nova_notifier.\ + notify_port_active_direct.assert_called_once_with( + mock.ANY) + + def test_set_port_status_up(self): + self._test_set_port_status_up(is_compute_port=False) + + def test_set_compute_port_status_up(self): + self._test_set_port_status_up(is_compute_port=True) + + def _test_set_port_status_down(self, is_compute_port=False): + port_device_owner = 'compute:nova' if is_compute_port else '' + self.mech_driver._plugin.nova_notifier = mock.Mock() + with self.network(set_context=True, tenant_id='test') as net1, \ + self.subnet(network=net1) as subnet1, \ + self.port(subnet=subnet1, set_context=True, + tenant_id='test', + device_owner=port_device_owner) as port1, \ + mock.patch.object(provisioning_blocks, + 'add_provisioning_component') as apc, \ + mock.patch.object(self.mech_driver, + '_update_dnat_entry_if_needed') as ude, \ + mock.patch.object(self.mech_driver, '_should_notify_nova', + return_value=is_compute_port): + self.mech_driver.set_port_status_down(port1['port']['id']) + apc.assert_called_once_with( + mock.ANY, + port1['port']['id'], + resources.PORT, + provisioning_blocks.L2_AGENT_ENTITY + ) + ude.assert_called_once_with(port1['port']['id'], False) + + # If the port does NOT bellong to compute, do not notify Nova + # about it's status changes + if not is_compute_port: + self.mech_driver._plugin.nova_notifier.\ + record_port_status_changed.assert_not_called() + self.mech_driver._plugin.nova_notifier.\ + send_port_status.assert_not_called() + else: + self.mech_driver._plugin.nova_notifier.\ + record_port_status_changed.assert_called_once_with( + mock.ANY, const.PORT_STATUS_ACTIVE, + const.PORT_STATUS_DOWN, None) + self.mech_driver._plugin.nova_notifier.\ + send_port_status.assert_called_once_with( + None, None, mock.ANY) + + def test_set_port_status_down(self): + self._test_set_port_status_down(is_compute_port=False) + + def test_set_compute_port_status_down(self): + self._test_set_port_status_down(is_compute_port=True) + + def test_set_port_status_down_not_found(self): + with mock.patch.object(provisioning_blocks, + 'add_provisioning_component') as apc, \ + mock.patch.object(self.mech_driver, + '_update_dnat_entry_if_needed'): + self.mech_driver.set_port_status_down('foo') + apc.assert_not_called() + + def test_set_port_status_concurrent_delete(self): + exc = os_db_exc.DBReferenceError('', '', '', '') + with self.network(set_context=True, tenant_id='test') as net1, \ + self.subnet(network=net1) as subnet1, \ + self.port(subnet=subnet1, set_context=True, + tenant_id='test') as port1, \ + mock.patch.object(provisioning_blocks, + 'add_provisioning_component', + side_effect=exc) as apc, \ + mock.patch.object(self.mech_driver, + '_update_dnat_entry_if_needed') as ude: + self.mech_driver.set_port_status_down(port1['port']['id']) + apc.assert_called_once_with( + mock.ANY, + port1['port']['id'], + resources.PORT, + provisioning_blocks.L2_AGENT_ENTITY + ) + ude.assert_called_once_with(port1['port']['id'], False) + + def test__update_subport_host_if_needed(self): + """Check that a subport is updated with parent's host_id.""" + binding_host_id = {'binding:host_id': 'hostname', + 'device_owner': trunk_consts.TRUNK_SUBPORT_OWNER} + with mock.patch.object(self.mech_driver._ovn_client, 'get_parent_port', + return_value='parent'), \ + mock.patch.object(self.mech_driver._plugin, 'get_port', + return_value=binding_host_id) as get_port, \ + mock.patch.object(self.mech_driver._plugin, + 'update_port') as upd: + self.mech_driver._update_subport_host_if_needed('subport') + + get_port.assert_called_once_with(mock.ANY, 'parent') + upd.assert_called_once_with(mock.ANY, 'subport', + {'port': binding_host_id}) + + def _test__wait_for_metadata_provisioned_if_needed(self, enable_dhcp, + wait_expected): + with self.network(tenant_id='test') as net1, \ + self.subnet(network=net1, + enable_dhcp=enable_dhcp) as subnet1, \ + self.port(subnet=subnet1, set_context=True, + tenant_id='test') as port1, \ + mock.patch.object(n_utils, 'wait_until_true') as wut, \ + mock.patch.object(ovn_conf, 'is_ovn_metadata_enabled', + return_value=True): + self.mech_driver._wait_for_metadata_provisioned_if_needed( + port1['port']['id']) + if wait_expected: + self.assertEqual(1, wut.call_count) + else: + wut.assert_not_called() + + def test__wait_for_metadata_provisioned_if_needed(self): + self._test__wait_for_metadata_provisioned_if_needed( + enable_dhcp=True, wait_expected=True) + + def test__wait_for_metadata_provisioned_if_needed_not_needed(self): + self._test__wait_for_metadata_provisioned_if_needed( + enable_dhcp=False, wait_expected=False) + + def test_bind_port_unsupported_vnic_type(self): + fake_port = fakes.FakePort.create_one_port( + attrs={'binding:vnic_type': 'unknown'}).info() + fake_port_context = fakes.FakePortContext(fake_port, 'host', []) + self.mech_driver.bind_port(fake_port_context) + self.sb_ovn.get_chassis_data_for_ml2_bind_port.assert_not_called() + fake_port_context.set_binding.assert_not_called() + + def _test_bind_port_failed(self, fake_segments): + fake_port = fakes.FakePort.create_one_port().info() + fake_host = 'host' + fake_port_context = fakes.FakePortContext( + fake_port, fake_host, fake_segments) + self.mech_driver.bind_port(fake_port_context) + self.sb_ovn.get_chassis_data_for_ml2_bind_port.assert_called_once_with( + fake_host) + fake_port_context.set_binding.assert_not_called() + + def test_bind_port_host_not_found(self): + self.sb_ovn.get_chassis_data_for_ml2_bind_port.side_effect = \ + RuntimeError + self._test_bind_port_failed([]) + + def test_bind_port_no_segments_to_bind(self): + self._test_bind_port_failed([]) + + def test_bind_port_physnet_not_found(self): + segment_attrs = {'network_type': 'vlan', + 'physical_network': 'unknown-physnet', + 'segmentation_id': 23} + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + self._test_bind_port_failed(fake_segments) + + def _test_bind_port(self, fake_segments): + fake_port = fakes.FakePort.create_one_port().info() + fake_host = 'host' + fake_port_context = fakes.FakePortContext( + fake_port, fake_host, fake_segments) + self.mech_driver.bind_port(fake_port_context) + self.sb_ovn.get_chassis_data_for_ml2_bind_port.assert_called_once_with( + fake_host) + fake_port_context.set_binding.assert_called_once_with( + fake_segments[0]['id'], + portbindings.VIF_TYPE_OVS, + self.mech_driver.vif_details[portbindings.VIF_TYPE_OVS]) + + def _test_bind_port_sriov(self, fake_segments): + fake_port = fakes.FakePort.create_one_port( + attrs={'binding:vnic_type': 'direct', + 'binding:profile': {'capabilities': ['switchdev']}}).info() + fake_host = 'host' + fake_port_context = fakes.FakePortContext( + fake_port, fake_host, fake_segments) + self.mech_driver.bind_port(fake_port_context) + self.sb_ovn.get_chassis_data_for_ml2_bind_port.assert_called_once_with( + fake_host) + fake_port_context.set_binding.assert_called_once_with( + fake_segments[0]['id'], + portbindings.VIF_TYPE_OVS, + self.mech_driver.vif_details[portbindings.VIF_TYPE_OVS]) + + def test_bind_port_geneve(self): + segment_attrs = {'network_type': 'geneve', + 'physical_network': None, + 'segmentation_id': 1023} + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + self._test_bind_port(fake_segments) + + def test_bind_sriov_port_geneve(self): + """Test binding a SR-IOV port to a geneve segment.""" + segment_attrs = {'network_type': 'geneve', + 'physical_network': None, + 'segmentation_id': 1023} + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + self._test_bind_port_sriov(fake_segments) + + def test_bind_port_vlan(self): + segment_attrs = {'network_type': 'vlan', + 'physical_network': 'fake-physnet', + 'segmentation_id': 23} + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + self._test_bind_port(fake_segments) + + def test_bind_port_flat(self): + segment_attrs = {'network_type': 'flat', + 'physical_network': 'fake-physnet', + 'segmentation_id': None} + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + self._test_bind_port(fake_segments) + + def test_bind_port_vxlan(self): + segment_attrs = {'network_type': 'vxlan', + 'physical_network': None, + 'segmentation_id': 1024} + fake_segments = \ + [fakes.FakeSegment.create_one_segment(attrs=segment_attrs).info()] + self._test_bind_port(fake_segments) + + def test__is_port_provisioning_required(self): + fake_port = fakes.FakePort.create_one_port( + attrs={'binding:vnic_type': 'normal', + 'status': const.PORT_STATUS_DOWN}).info() + fake_host = 'fake-physnet' + + # Test host not changed + self.assertFalse(self.mech_driver._is_port_provisioning_required( + fake_port, fake_host, fake_host)) + + # Test invalid vnic type. + fake_port['binding:vnic_type'] = 'unknown' + self.assertFalse(self.mech_driver._is_port_provisioning_required( + fake_port, fake_host, None)) + fake_port['binding:vnic_type'] = 'normal' + + # Test invalid status. + fake_port['status'] = const.PORT_STATUS_ACTIVE + self.assertFalse(self.mech_driver._is_port_provisioning_required( + fake_port, fake_host, None)) + fake_port['status'] = const.PORT_STATUS_DOWN + + # Test no host. + self.assertFalse(self.mech_driver._is_port_provisioning_required( + fake_port, None, None)) + + # Test invalid host. + self.sb_ovn.chassis_exists.return_value = False + self.assertFalse(self.mech_driver._is_port_provisioning_required( + fake_port, fake_host, None)) + self.sb_ovn.chassis_exists.return_value = True + + # Test port provisioning required. + self.assertTrue(self.mech_driver._is_port_provisioning_required( + fake_port, fake_host, None)) + + def _test_add_subnet_dhcp_options_in_ovn(self, subnet, ovn_dhcp_opts=None, + call_get_dhcp_opts=True, + call_add_dhcp_opts=True): + subnet['id'] = 'fake_id' + with mock.patch.object(self.mech_driver._ovn_client, + '_get_ovn_dhcp_options') as get_opts: + self.mech_driver._ovn_client._add_subnet_dhcp_options( + subnet, mock.ANY, ovn_dhcp_opts) + self.assertEqual(call_get_dhcp_opts, get_opts.called) + self.assertEqual( + call_add_dhcp_opts, + self.mech_driver._nb_ovn.add_dhcp_options.called) + + def test_add_subnet_dhcp_options_in_ovn(self): + subnet = {'ip_version': const.IP_VERSION_4} + self._test_add_subnet_dhcp_options_in_ovn(subnet) + + def test_add_subnet_dhcp_options_in_ovn_with_given_ovn_dhcp_opts(self): + subnet = {'ip_version': const.IP_VERSION_4} + self._test_add_subnet_dhcp_options_in_ovn( + subnet, ovn_dhcp_opts={'foo': 'bar', 'external_ids': {}}, + call_get_dhcp_opts=False) + + def test_add_subnet_dhcp_options_in_ovn_with_slaac_v6_subnet(self): + subnet = {'ip_version': const.IP_VERSION_6, + 'ipv6_address_mode': const.IPV6_SLAAC} + self._test_add_subnet_dhcp_options_in_ovn( + subnet, call_get_dhcp_opts=False, call_add_dhcp_opts=False) + + @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports') + @mock.patch.object(n_net, 'get_random_mac') + def test_enable_subnet_dhcp_options_in_ovn_ipv4(self, grm, gps): + grm.return_value = '01:02:03:04:05:06' + gps.return_value = [ + {'id': 'port-id-1', 'device_owner': 'nova:compute'}, + {'id': 'port-id-2', 'device_owner': 'nova:compute', + 'extra_dhcp_opts': [ + {'opt_value': '10.0.0.33', 'ip_version': 4, + 'opt_name': 'router'}]}, + {'id': 'port-id-3', 'device_owner': 'nova:compute', + 'extra_dhcp_opts': [ + {'opt_value': '1200', 'ip_version': 4, + 'opt_name': 'mtu'}]}, + {'id': 'port-id-10', 'device_owner': 'network:foo'}] + subnet = {'id': 'subnet-id', 'ip_version': 4, 'cidr': '10.0.0.0/24', + 'network_id': 'network-id', + 'gateway_ip': '10.0.0.1', 'enable_dhcp': True, + 'dns_nameservers': [], 'host_routes': []} + network = {'id': 'network-id', 'mtu': 1000} + txn = self.mech_driver._nb_ovn.transaction().__enter__.return_value + dhcp_option_command = mock.Mock() + txn.add.return_value = dhcp_option_command + + self.mech_driver._ovn_client._enable_subnet_dhcp_options( + subnet, network, txn) + # Check adding DHCP_Options rows + subnet_dhcp_options = { + 'external_ids': {'subnet_id': subnet['id'], + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, + 'cidr': subnet['cidr'], 'options': { + 'router': subnet['gateway_ip'], + 'server_id': subnet['gateway_ip'], + 'server_mac': '01:02:03:04:05:06', + 'dns_server': '{8.8.8.8}', + 'lease_time': str(12 * 60 * 60), + 'mtu': str(1000)}} + ports_dhcp_options = [{ + 'external_ids': {'subnet_id': subnet['id'], + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', + 'port_id': 'port-id-2'}, + 'cidr': subnet['cidr'], 'options': { + 'router': '10.0.0.33', + 'server_id': subnet['gateway_ip'], + 'dns_server': '{8.8.8.8}', + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': str(1000)}}, { + 'external_ids': {'subnet_id': subnet['id'], + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', + 'port_id': 'port-id-3'}, + 'cidr': subnet['cidr'], 'options': { + 'router': subnet['gateway_ip'], + 'server_id': subnet['gateway_ip'], + 'dns_server': '{8.8.8.8}', + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': str(1200)}}] + add_dhcp_calls = [mock.call('subnet-id', **subnet_dhcp_options)] + add_dhcp_calls.extend([mock.call( + 'subnet-id', port_id=port_dhcp_options['external_ids']['port_id'], + **port_dhcp_options) for port_dhcp_options in ports_dhcp_options]) + self.assertEqual(len(add_dhcp_calls), + self.mech_driver._nb_ovn.add_dhcp_options.call_count) + self.mech_driver._nb_ovn.add_dhcp_options.assert_has_calls( + add_dhcp_calls, any_order=True) + + # Check setting lport rows + set_lsp_calls = [mock.call(lport_name='port-id-1', + dhcpv4_options=dhcp_option_command), + mock.call(lport_name='port-id-2', + dhcpv4_options=dhcp_option_command), + mock.call(lport_name='port-id-3', + dhcpv4_options=dhcp_option_command)] + self.assertEqual(len(set_lsp_calls), + self.mech_driver._nb_ovn.set_lswitch_port.call_count) + self.mech_driver._nb_ovn.set_lswitch_port.assert_has_calls( + set_lsp_calls, any_order=True) + + @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports') + @mock.patch.object(n_net, 'get_random_mac') + def test_enable_subnet_dhcp_options_in_ovn_ipv6(self, grm, gps): + grm.return_value = '01:02:03:04:05:06' + gps.return_value = [ + {'id': 'port-id-1', 'device_owner': 'nova:compute'}, + {'id': 'port-id-2', 'device_owner': 'nova:compute', + 'extra_dhcp_opts': [ + {'opt_value': '11:22:33:44:55:66', 'ip_version': 6, + 'opt_name': 'server-id'}]}, + {'id': 'port-id-3', 'device_owner': 'nova:compute', + 'extra_dhcp_opts': [ + {'opt_value': '10::34', 'ip_version': 6, + 'opt_name': 'dns-server'}]}, + {'id': 'port-id-10', 'device_owner': 'network:foo'}] + subnet = {'id': 'subnet-id', 'ip_version': 6, 'cidr': '10::0/64', + 'gateway_ip': '10::1', 'enable_dhcp': True, + 'ipv6_address_mode': 'dhcpv6-stateless', + 'dns_nameservers': [], 'host_routes': []} + network = {'id': 'network-id', 'mtu': 1000} + txn = self.mech_driver._nb_ovn.transaction().__enter__.return_value + dhcp_option_command = mock.Mock() + txn.add.return_value = dhcp_option_command + + self.mech_driver._ovn_client._enable_subnet_dhcp_options( + subnet, network, txn) + # Check adding DHCP_Options rows + subnet_dhcp_options = { + 'external_ids': {'subnet_id': subnet['id'], + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, + 'cidr': subnet['cidr'], 'options': { + 'dhcpv6_stateless': 'true', + 'server_id': '01:02:03:04:05:06'}} + ports_dhcp_options = [{ + 'external_ids': {'subnet_id': subnet['id'], + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', + 'port_id': 'port-id-2'}, + 'cidr': subnet['cidr'], 'options': { + 'dhcpv6_stateless': 'true', + 'server_id': '11:22:33:44:55:66'}}, { + 'external_ids': {'subnet_id': subnet['id'], + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1', + 'port_id': 'port-id-3'}, + 'cidr': subnet['cidr'], 'options': { + 'dhcpv6_stateless': 'true', + 'server_id': '01:02:03:04:05:06', + 'dns_server': '10::34'}}] + add_dhcp_calls = [mock.call('subnet-id', **subnet_dhcp_options)] + add_dhcp_calls.extend([mock.call( + 'subnet-id', port_id=port_dhcp_options['external_ids']['port_id'], + **port_dhcp_options) for port_dhcp_options in ports_dhcp_options]) + self.assertEqual(len(add_dhcp_calls), + self.mech_driver._nb_ovn.add_dhcp_options.call_count) + self.mech_driver._nb_ovn.add_dhcp_options.assert_has_calls( + add_dhcp_calls, any_order=True) + + # Check setting lport rows + set_lsp_calls = [mock.call(lport_name='port-id-1', + dhcpv6_options=dhcp_option_command), + mock.call(lport_name='port-id-2', + dhcpv6_options=dhcp_option_command), + mock.call(lport_name='port-id-3', + dhcpv6_options=dhcp_option_command)] + self.assertEqual(len(set_lsp_calls), + self.mech_driver._nb_ovn.set_lswitch_port.call_count) + self.mech_driver._nb_ovn.set_lswitch_port.assert_has_calls( + set_lsp_calls, any_order=True) + + def test_enable_subnet_dhcp_options_in_ovn_ipv6_slaac(self): + subnet = {'id': 'subnet-id', 'ip_version': 6, 'enable_dhcp': True, + 'ipv6_address_mode': 'slaac'} + network = {'id': 'network-id'} + + self.mech_driver._ovn_client._enable_subnet_dhcp_options( + subnet, network, mock.Mock()) + self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() + self.mech_driver._nb_ovn.set_lswitch_port.assert_not_called() + + def _test_remove_subnet_dhcp_options_in_ovn(self, ip_version): + opts = {'subnet': {'uuid': 'subnet-uuid'}, + 'ports': [{'uuid': 'port1-uuid'}]} + self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value = opts + self.mech_driver._ovn_client._remove_subnet_dhcp_options( + 'subnet-id', mock.Mock()) + + # Check deleting DHCP_Options rows + delete_dhcp_calls = [mock.call('subnet-uuid'), mock.call('port1-uuid')] + self.assertEqual( + len(delete_dhcp_calls), + self.mech_driver._nb_ovn.delete_dhcp_options.call_count) + self.mech_driver._nb_ovn.delete_dhcp_options.assert_has_calls( + delete_dhcp_calls, any_order=True) + + def test_remove_subnet_dhcp_options_in_ovn_ipv4(self): + self._test_remove_subnet_dhcp_options_in_ovn(4) + + def test_remove_subnet_dhcp_options_in_ovn_ipv6(self): + self._test_remove_subnet_dhcp_options_in_ovn(6) + + def test_update_subnet_dhcp_options_in_ovn_ipv4(self): + subnet = {'id': 'subnet-id', 'ip_version': 4, 'cidr': '10.0.0.0/24', + 'network_id': 'network-id', + 'gateway_ip': '10.0.0.1', 'enable_dhcp': True, + 'dns_nameservers': [], 'host_routes': []} + network = {'id': 'network-id', 'mtu': 1000} + orignal_options = {'subnet': { + 'external_ids': {'subnet_id': subnet['id']}, + 'cidr': subnet['cidr'], 'options': { + 'router': '10.0.0.2', + 'server_id': '10.0.0.2', + 'server_mac': '01:02:03:04:05:06', + 'dns_server': '{8.8.8.8}', + 'lease_time': str(12 * 60 * 60), + 'mtu': str(1000)}}, 'ports': []} + self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value =\ + orignal_options + + self.mech_driver._ovn_client._update_subnet_dhcp_options( + subnet, network, mock.Mock()) + new_options = { + 'external_ids': {'subnet_id': subnet['id'], + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, + 'cidr': subnet['cidr'], 'options': { + 'router': subnet['gateway_ip'], + 'server_id': subnet['gateway_ip'], + 'dns_server': '{8.8.8.8}', + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': str(1000)}} + self.mech_driver._nb_ovn.add_dhcp_options.assert_called_once_with( + subnet['id'], **new_options) + + def test_update_subnet_dhcp_options_in_ovn_ipv4_not_change(self): + subnet = {'id': 'subnet-id', 'ip_version': 4, 'cidr': '10.0.0.0/24', + 'network_id': 'network-id', + 'gateway_ip': '10.0.0.1', 'enable_dhcp': True, + 'dns_nameservers': [], 'host_routes': []} + network = {'id': 'network-id', 'mtu': 1000} + orignal_options = {'subnet': { + 'external_ids': {'subnet_id': subnet['id']}, + 'cidr': subnet['cidr'], 'options': { + 'router': subnet['gateway_ip'], + 'server_id': subnet['gateway_ip'], + 'server_mac': '01:02:03:04:05:06', + 'dns_server': '{8.8.8.8}', + 'lease_time': str(12 * 60 * 60), + 'mtu': str(1000)}}, 'ports': []} + self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value =\ + orignal_options + + self.mech_driver._ovn_client._update_subnet_dhcp_options( + subnet, network, mock.Mock()) + self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() + + def test_update_subnet_dhcp_options_in_ovn_ipv6(self): + subnet = {'id': 'subnet-id', 'ip_version': 6, 'cidr': '10::0/64', + 'network_id': 'network-id', + 'gateway_ip': '10::1', 'enable_dhcp': True, + 'ipv6_address_mode': 'dhcpv6-stateless', + 'dns_nameservers': ['10::3'], 'host_routes': []} + network = {'id': 'network-id', 'mtu': 1000} + orignal_options = {'subnet': { + 'external_ids': {'subnet_id': subnet['id']}, + 'cidr': subnet['cidr'], 'options': { + 'dhcpv6_stateless': 'true', + 'server_id': '01:02:03:04:05:06'}}, 'ports': []} + self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value =\ + orignal_options + self.mech_driver._ovn_client._update_subnet_dhcp_options( + subnet, network, mock.Mock()) + + new_options = { + 'external_ids': {'subnet_id': subnet['id'], + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, + 'cidr': subnet['cidr'], 'options': { + 'dhcpv6_stateless': 'true', + 'dns_server': '{10::3}', + 'server_id': '01:02:03:04:05:06'}} + self.mech_driver._nb_ovn.add_dhcp_options.assert_called_once_with( + subnet['id'], **new_options) + + def test_update_subnet_dhcp_options_in_ovn_ipv6_not_change(self): + subnet = {'id': 'subnet-id', 'ip_version': 6, 'cidr': '10::0/64', + 'gateway_ip': '10::1', 'enable_dhcp': True, + 'ipv6_address_mode': 'dhcpv6-stateless', + 'dns_nameservers': [], 'host_routes': []} + network = {'id': 'network-id', 'mtu': 1000} + orignal_options = {'subnet': { + 'external_ids': {'subnet_id': subnet['id']}, + 'cidr': subnet['cidr'], 'options': { + 'dhcpv6_stateless': 'true', + 'server_id': '01:02:03:04:05:06'}}, 'ports': []} + self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value =\ + orignal_options + + self.mech_driver._ovn_client._update_subnet_dhcp_options( + subnet, network, mock.Mock()) + self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() + + def test_update_subnet_dhcp_options_in_ovn_ipv6_slaac(self): + subnet = {'id': 'subnet-id', 'ip_version': 6, 'enable_dhcp': True, + 'ipv6_address_mode': 'slaac'} + network = {'id': 'network-id'} + self.mech_driver._ovn_client._update_subnet_dhcp_options( + subnet, network, mock.Mock()) + self.mech_driver._nb_ovn.get_subnet_dhcp_options.assert_not_called() + self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() + + def test_update_subnet_postcommit_ovn_do_nothing(self): + context = fakes.FakeSubnetContext( + subnet={'enable_dhcp': False, 'ip_version': 4, 'network_id': 'id', + 'id': 'subnet_id'}, + network={'id': 'id'}) + with mock.patch.object( + self.mech_driver._ovn_client, + '_enable_subnet_dhcp_options') as esd,\ + mock.patch.object( + self.mech_driver._ovn_client, + '_remove_subnet_dhcp_options') as dsd,\ + mock.patch.object( + self.mech_driver._ovn_client, + '_update_subnet_dhcp_options') as usd,\ + mock.patch.object( + self.mech_driver._ovn_client, + '_find_metadata_port') as fmd,\ + mock.patch.object( + self.mech_driver._ovn_client, + 'update_metadata_port') as umd: + self.mech_driver.update_subnet_postcommit(context) + esd.assert_not_called() + dsd.assert_not_called() + usd.assert_not_called() + fmd.assert_not_called() + umd.assert_not_called() + + def test_update_subnet_postcommit_enable_dhcp(self): + context = fakes.FakeSubnetContext( + subnet={'enable_dhcp': True, 'ip_version': 4, 'network_id': 'id', + 'id': 'subnet_id'}, + network={'id': 'id'}) + with mock.patch.object( + self.mech_driver._ovn_client, + '_enable_subnet_dhcp_options') as esd,\ + mock.patch.object( + self.mech_driver._ovn_client, + 'update_metadata_port') as umd: + self.mech_driver.update_subnet_postcommit(context) + esd.assert_called_once_with( + context.current, context.network.current, mock.ANY) + umd.assert_called_once_with(mock.ANY, 'id') + + def test_update_subnet_postcommit_disable_dhcp(self): + self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value = { + 'subnet': mock.sentinel.subnet, 'ports': []} + context = fakes.FakeSubnetContext( + subnet={'enable_dhcp': False, 'id': 'fake_id', 'ip_version': 4, + 'network_id': 'id'}, + network={'id': 'id'}) + with mock.patch.object( + self.mech_driver._ovn_client, + '_remove_subnet_dhcp_options') as dsd,\ + mock.patch.object( + self.mech_driver._ovn_client, + 'update_metadata_port') as umd: + self.mech_driver.update_subnet_postcommit(context) + dsd.assert_called_once_with(context.current['id'], mock.ANY) + umd.assert_called_once_with(mock.ANY, 'id') + + def test_update_subnet_postcommit_update_dhcp(self): + self.mech_driver._nb_ovn.get_subnet_dhcp_options.return_value = { + 'subnet': mock.sentinel.subnet, 'ports': []} + context = fakes.FakeSubnetContext( + subnet={'enable_dhcp': True, 'ip_version': 4, 'network_id': 'id', + 'id': 'subnet_id'}, + network={'id': 'id'}) + with mock.patch.object( + self.mech_driver._ovn_client, + '_update_subnet_dhcp_options') as usd,\ + mock.patch.object( + self.mech_driver._ovn_client, + 'update_metadata_port') as umd: + self.mech_driver.update_subnet_postcommit(context) + usd.assert_called_once_with( + context.current, context.network.current, mock.ANY) + umd.assert_called_once_with(mock.ANY, 'id') + + @mock.patch.object(provisioning_blocks, 'is_object_blocked') + @mock.patch.object(provisioning_blocks, 'provisioning_complete') + def test_notify_dhcp_updated(self, mock_prov_complete, mock_is_obj_block): + port_id = 'fake-port-id' + mock_is_obj_block.return_value = True + self.mech_driver._notify_dhcp_updated(port_id) + mock_prov_complete.assert_called_once_with( + mock.ANY, port_id, resources.PORT, + provisioning_blocks.DHCP_ENTITY) + + mock_is_obj_block.return_value = False + mock_prov_complete.reset_mock() + self.mech_driver._notify_dhcp_updated(port_id) + mock_prov_complete.assert_not_called() + + @mock.patch.object(mech_driver.OVNMechanismDriver, + '_is_port_provisioning_required', lambda *_: True) + @mock.patch.object(mech_driver.OVNMechanismDriver, '_notify_dhcp_updated') + @mock.patch.object(ovn_client.OVNClient, 'create_port') + def test_create_port_postcommit(self, mock_create_port, mock_notify_dhcp): + fake_port = fakes.FakePort.create_one_port( + attrs={'status': const.PORT_STATUS_DOWN}).info() + fake_ctx = mock.Mock(current=fake_port) + self.mech_driver.create_port_postcommit(fake_ctx) + passed_fake_port = copy.deepcopy(fake_port) + passed_fake_port['network'] = fake_ctx.network.current + mock_create_port.assert_called_once_with(passed_fake_port) + mock_notify_dhcp.assert_called_once_with(fake_port['id']) + + @mock.patch.object(mech_driver.OVNMechanismDriver, + '_is_port_provisioning_required', lambda *_: True) + @mock.patch.object(mech_driver.OVNMechanismDriver, '_notify_dhcp_updated') + @mock.patch.object(ovn_client.OVNClient, 'update_port') + def test_update_port_postcommit(self, mock_update_port, + mock_notify_dhcp): + fake_port = fakes.FakePort.create_one_port( + attrs={'status': const.PORT_STATUS_ACTIVE}).info() + fake_ctx = mock.Mock(current=fake_port, original=fake_port) + self.mech_driver.update_port_postcommit(fake_ctx) + + passed_fake_port = copy.deepcopy(fake_port) + passed_fake_port['network'] = fake_ctx.network.current + passed_fake_port_orig = copy.deepcopy(fake_ctx.original) + passed_fake_port_orig['network'] = fake_ctx.network.current + + mock_update_port.assert_called_once_with( + passed_fake_port, port_object=passed_fake_port_orig) + mock_notify_dhcp.assert_called_once_with(fake_port['id']) + + @mock.patch.object(mech_driver.OVNMechanismDriver, + '_is_port_provisioning_required', lambda *_: True) + @mock.patch.object(mech_driver.OVNMechanismDriver, '_notify_dhcp_updated') + @mock.patch.object(ovn_client.OVNClient, 'update_port') + @mock.patch.object(context, 'get_admin_context') + def test_update_port_postcommit_live_migration( + self, mock_admin_context, mock_update_port, mock_notify_dhcp): + self.plugin.update_port_status = mock.Mock() + foo_admin_context = mock.Mock() + mock_admin_context.return_value = foo_admin_context + fake_port = fakes.FakePort.create_one_port( + attrs={ + 'status': const.PORT_STATUS_DOWN, + portbindings.PROFILE: {ovn_const.MIGRATING_ATTR: 'foo'}, + portbindings.VIF_TYPE: portbindings.VIF_TYPE_OVS}).info() + fake_ctx = mock.Mock(current=fake_port, original=fake_port) + + self.mech_driver.update_port_postcommit(fake_ctx) + + mock_update_port.assert_not_called() + mock_notify_dhcp.assert_not_called() + self.plugin.update_port_status.assert_called_once_with( + foo_admin_context, fake_port['id'], const.PORT_STATUS_ACTIVE) + + def _add_chassis_agent(self, nb_cfg, agent_type, updated_at=None): + chassis = mock.Mock() + chassis.nb_cfg = nb_cfg + chassis.uuid = uuid.uuid4() + chassis.external_ids = {ovn_const.OVN_LIVENESS_CHECK_EXT_ID_KEY: + timeutils.isotime(updated_at)} + if agent_type == ovn_const.OVN_METADATA_AGENT: + chassis.external_ids.update({ + ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY: nb_cfg, + ovn_const.METADATA_LIVENESS_CHECK_EXT_ID_KEY: + timeutils.isotime(updated_at)}) + + return chassis + + def test_agent_alive_true(self): + for agent_type in (ovn_const.OVN_CONTROLLER_AGENT, + ovn_const.OVN_METADATA_AGENT): + self.mech_driver._nb_ovn.nb_global.nb_cfg = 5 + chassis = self._add_chassis_agent(5, agent_type) + self.assertTrue(self.mech_driver.agent_alive(chassis, agent_type)) + + def test_agent_alive_not_timed_out(self): + for agent_type in (ovn_const.OVN_CONTROLLER_AGENT, + ovn_const.OVN_METADATA_AGENT): + self.mech_driver._nb_ovn.nb_global.nb_cfg = 5 + chassis = self._add_chassis_agent(4, agent_type) + self.assertTrue(self.mech_driver.agent_alive(chassis, agent_type), + "Agent type %s is not alive" % agent_type) + + def test_agent_alive_timed_out(self): + for agent_type in (ovn_const.OVN_CONTROLLER_AGENT, + ovn_const.OVN_METADATA_AGENT): + self.mech_driver._nb_ovn.nb_global.nb_cfg = 5 + now = timeutils.utcnow() + updated_at = now - datetime.timedelta(cfg.CONF.agent_down_time + 1) + chassis = self._add_chassis_agent(4, agent_type, updated_at) + self.assertFalse(self.mech_driver.agent_alive(chassis, agent_type)) + + def _test__update_dnat_entry_if_needed(self, up=True): + ovn_conf.cfg.CONF.set_override( + 'enable_distributed_floating_ip', True, group='ovn') + port_id = 'fake-port-id' + fake_ext_mac_key = 'fake-ext-mac-key' + fake_nat_uuid = uuidutils.generate_uuid() + nat_row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'_uuid': fake_nat_uuid, 'external_ids': { + ovn_const.OVN_FIP_EXT_MAC_KEY: fake_ext_mac_key}}) + + fake_db_find = mock.Mock() + fake_db_find.execute.return_value = [nat_row] + self.nb_ovn.db_find.return_value = fake_db_find + + self.mech_driver._update_dnat_entry_if_needed(port_id, up=up) + + if up: + # Assert that we are setting the external_mac in the NAT table + self.nb_ovn.db_set.assert_called_once_with( + 'NAT', fake_nat_uuid, ('external_mac', fake_ext_mac_key)) + else: + # Assert that we are cleaning the external_mac from the NAT table + self.nb_ovn.db_clear.assert_called_once_with( + 'NAT', fake_nat_uuid, 'external_mac') + + def test__update_dnat_entry_if_needed_up(self): + self._test__update_dnat_entry_if_needed() + + def test__update_dnat_entry_if_needed_down(self): + self._test__update_dnat_entry_if_needed(up=False) + + def _test_update_network_fragmentation(self, new_mtu, expected_opts): + network_attrs = {external_net.EXTERNAL: True} + network = self._make_network( + self.fmt, 'net1', True, arg_list=(external_net.EXTERNAL,), + **network_attrs) + + with self.subnet(network=network) as subnet: + with self.port(subnet=subnet, + device_owner=const.DEVICE_OWNER_ROUTER_GW) as port: + # Let's update the MTU to something different + network['network']['mtu'] = new_mtu + fake_ctx = mock.Mock(current=network['network']) + fake_ctx._plugin_context.session.is_active = False + + self.mech_driver.update_network_postcommit(fake_ctx) + + lrp_name = ovn_utils.ovn_lrouter_port_name(port['port']['id']) + self.nb_ovn.update_lrouter_port.assert_called_once_with( + if_exists=True, name=lrp_name, options=expected_opts) + + def test_update_network_need_to_frag_enabled(self): + ovn_conf.cfg.CONF.set_override('ovn_emit_need_to_frag', True, + group='ovn') + new_mtu = 1234 + expected_opts = {ovn_const.OVN_ROUTER_PORT_GW_MTU_OPTION: + str(new_mtu)} + self._test_update_network_fragmentation(new_mtu, expected_opts) + + def test_update_network_need_to_frag_disabled(self): + ovn_conf.cfg.CONF.set_override('ovn_emit_need_to_frag', False, + group='ovn') + new_mtu = 1234 + # Assert that the options column is empty (cleaning up an ' + # existing value if set before) + expected_opts = {} + self._test_update_network_fragmentation(new_mtu, expected_opts) + + +class OVNMechanismDriverTestCase(test_plugin.Ml2PluginV2TestCase): + _mechanism_drivers = ['logger', 'ovn'] + + def setUp(self): + cfg.CONF.set_override('global_physnet_mtu', 1550) + cfg.CONF.set_override('tenant_network_types', + ['geneve'], + group='ml2') + cfg.CONF.set_override('vni_ranges', + ['1:65536'], + group='ml2_type_geneve') + ovn_conf.cfg.CONF.set_override('dns_servers', ['8.8.8.8'], group='ovn') + super(OVNMechanismDriverTestCase, self).setUp() + # Make sure the node and target_node for the hash ring in the + # mechanism driver matches + node_uuid = uuidutils.generate_uuid() + p = mock.patch.object(hash_ring_manager.HashRingManager, 'get_node', + return_value=node_uuid) + p.start() + self.addCleanup(p.stop) + self.driver.node_uuid = node_uuid + self.driver.hash_ring_group = 'fake_hash_ring_group' + + mm = directory.get_plugin().mechanism_manager + self.mech_driver = mm.mech_drivers['ovn'].obj + nb_ovn = fakes.FakeOvsdbNbOvnIdl() + sb_ovn = fakes.FakeOvsdbSbOvnIdl() + self.mech_driver._nb_ovn = nb_ovn + self.mech_driver._sb_ovn = sb_ovn + self.mech_driver._insert_port_provisioning_block = mock.Mock() + p = mock.patch.object(ovn_utils, 'get_revision_number', return_value=1) + p.start() + self.addCleanup(p.stop) + + +class TestOVNMechanismDriverBasicGet(test_plugin.TestMl2BasicGet, + OVNMechanismDriverTestCase): + pass + + +class TestOVNMechanismDriverV2HTTPResponse(test_plugin.TestMl2V2HTTPResponse, + OVNMechanismDriverTestCase): + pass + + +class TestOVNMechanismDriverNetworksV2(test_plugin.TestMl2NetworksV2, + OVNMechanismDriverTestCase): + + def test__update_segmentation_id_ports_wrong_vif_type(self): + """Skip the Update Segmentation ID tests + + Currently Segmentation ID cannot be updated till + https://review.openstack.org/#/c/632984/ is merged + to allow OVS Agents and thus OVN Mechanism Driver to allow + updation of Segmentation IDs. Till then the test needs to be skipped + """ + pass + + def test__update_segmentation_id_ports(self): + """Skip the Update Segmentation ID tests + + Currently Segmentation ID cannot be updated till + https://review.openstack.org/#/c/632984/ is merged + to allow OVS Agents and thus OVN Mechanism Driver to allow + updation of Segmentation IDs. Till then the test needs to be skipped + """ + pass + + +class TestOVNMechanismDriverSubnetsV2(test_plugin.TestMl2SubnetsV2, + OVNMechanismDriverTestCase): + + def setUp(self): + # Disable metadata so that we don't interfere with existing tests + # in Neutron tree. Doing this because some of the tests assume that + # first IP address in a subnet will be available and this is not true + # with metadata since it will book an IP address on each subnet. + ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', False, + group='ovn') + super(TestOVNMechanismDriverSubnetsV2, self).setUp() + + # NOTE(rtheis): Mock the OVN port update since it is getting subnet + # information for ACL processing. This interferes with the update_port + # mock already done by the test. + def test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets(self): + with mock.patch.object(self.mech_driver._ovn_client, 'update_port'),\ + mock.patch.object(self.mech_driver._ovn_client, + '_get_subnet_dhcp_options_for_port', + return_value={}): + super(TestOVNMechanismDriverSubnetsV2, self).\ + test_subnet_update_ipv4_and_ipv6_pd_v6stateless_subnets() + + # NOTE(rtheis): Mock the OVN port update since it is getting subnet + # information for ACL processing. This interferes with the update_port + # mock already done by the test. + def test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets(self): + with mock.patch.object(self.mech_driver._ovn_client, 'update_port'),\ + mock.patch.object(self.mech_driver._ovn_client, + '_get_subnet_dhcp_options_for_port', + return_value={}): + super(TestOVNMechanismDriverSubnetsV2, self).\ + test_subnet_update_ipv4_and_ipv6_pd_slaac_subnets() + + # NOTE(numans) Overriding the base test case here because the base test + # case creates a network with vxlan type and OVN mech driver doesn't + # support it. + def test_create_subnet_check_mtu_in_mech_context(self): + plugin = directory.get_plugin() + plugin.mechanism_manager.create_subnet_precommit = mock.Mock() + net_arg = {pnet.NETWORK_TYPE: 'geneve', + pnet.SEGMENTATION_ID: '1'} + network = self._make_network(self.fmt, 'net1', True, + arg_list=(pnet.NETWORK_TYPE, + pnet.SEGMENTATION_ID,), + **net_arg) + with self.subnet(network=network): + mock_subnet_pre = plugin.mechanism_manager.create_subnet_precommit + observerd_mech_context = mock_subnet_pre.call_args_list[0][0][0] + self.assertEqual(network['network']['mtu'], + observerd_mech_context.network.current['mtu']) + + +class TestOVNMechanismDriverPortsV2(test_plugin.TestMl2PortsV2, + OVNMechanismDriverTestCase): + + def setUp(self): + # Disable metadata so that we don't interfere with existing tests + # in Neutron tree. Doing this because some of the tests assume that + # first IP address in a subnet will be available and this is not true + # with metadata since it will book an IP address on each subnet. + ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', False, + group='ovn') + super(TestOVNMechanismDriverPortsV2, self).setUp() + + # NOTE(rtheis): Override this test to verify that updating + # a port MAC fails when the port is bound. + def test_update_port_mac(self): + self.check_update_port_mac( + host_arg={portbindings.HOST_ID: 'fake-host'}, + arg_list=(portbindings.HOST_ID,), + expected_status=exc.HTTPConflict.code, + expected_error='PortBound') + + +class TestOVNMechanismDriverAllowedAddressPairs( + test_plugin.TestMl2AllowedAddressPairs, + OVNMechanismDriverTestCase): + pass + + +class TestOVNMechanismDriverPortSecurity( + test_ext_portsecurity.PSExtDriverTestCase, + OVNMechanismDriverTestCase): + pass + + +class TestOVNMechanismDriverSegment(test_segment.HostSegmentMappingTestCase): + _mechanism_drivers = ['logger', 'ovn'] + + def setUp(self): + super(TestOVNMechanismDriverSegment, self).setUp() + mm = directory.get_plugin().mechanism_manager + self.mech_driver = mm.mech_drivers['ovn'].obj + nb_ovn = fakes.FakeOvsdbNbOvnIdl() + sb_ovn = fakes.FakeOvsdbSbOvnIdl() + self.mech_driver._nb_ovn = nb_ovn + self.mech_driver._sb_ovn = sb_ovn + p = mock.patch.object(ovn_utils, 'get_revision_number', return_value=1) + p.start() + self.addCleanup(p.stop) + + def _test_segment_host_mapping(self): + # Disable the callback to update SegmentHostMapping by default, so + # that update_segment_host_mapping is the only path to add the mapping + registry.unsubscribe( + self.mech_driver._add_segment_host_mapping_for_segment, + resources.SEGMENT, events.AFTER_CREATE) + host = 'hostname' + with self.network() as network: + network = network['network'] + segment1 = self._test_create_segment( + network_id=network['id'], physical_network='phys_net1', + segmentation_id=200, network_type='vlan')['segment'] + + # As geneve networks mtu shouldn't be more than 1450, update it + data = {'network': {'mtu': 1450}} + req = self.new_update_request('networks', data, network['id']) + res = self.deserialize(self.fmt, req.get_response(self.api)) + self.assertEqual(1450, res['network']['mtu']) + + self._test_create_segment( + network_id=network['id'], + segmentation_id=200, + network_type='geneve') + self.mech_driver.update_segment_host_mapping(host, ['phys_net1']) + segments_host_db = self._get_segments_for_host(host) + self.assertEqual({segment1['id']}, set(segments_host_db)) + return network['id'], host + + def test_update_segment_host_mapping(self): + network_id, host = self._test_segment_host_mapping() + + # Update the mapping + segment2 = self._test_create_segment( + network_id=network_id, physical_network='phys_net2', + segmentation_id=201, network_type='vlan')['segment'] + self.mech_driver.update_segment_host_mapping(host, ['phys_net2']) + segments_host_db = self._get_segments_for_host(host) + self.assertEqual({segment2['id']}, set(segments_host_db)) + + def test_clear_segment_host_mapping(self): + _, host = self._test_segment_host_mapping() + + # Clear the mapping + self.mech_driver.update_segment_host_mapping(host, []) + segments_host_db = self._get_segments_for_host(host) + self.assertEqual({}, segments_host_db) + + def test_update_segment_host_mapping_with_new_segment(self): + hostname_with_physnets = {'hostname1': ['phys_net1', 'phys_net2'], + 'hostname2': ['phys_net1']} + ovn_sb_api = self.mech_driver._sb_ovn + ovn_sb_api.get_chassis_hostname_and_physnets.return_value = ( + hostname_with_physnets) + self.mech_driver.subscribe() + with self.network() as network: + network_id = network['network']['id'] + segment = self._test_create_segment( + network_id=network_id, physical_network='phys_net2', + segmentation_id=201, network_type='vlan')['segment'] + segments_host_db1 = self._get_segments_for_host('hostname1') + # A new SegmentHostMapping should be created for hostname1 + self.assertEqual({segment['id']}, set(segments_host_db1)) + + segments_host_db2 = self._get_segments_for_host('hostname2') + self.assertFalse(set(segments_host_db2)) + + +@mock.patch.object(n_net, 'get_random_mac', lambda *_: '01:02:03:04:05:06') +class TestOVNMechanismDriverDHCPOptions(OVNMechanismDriverTestCase): + + def _test_get_ovn_dhcp_options_helper(self, subnet, network, + expected_dhcp_options, + service_mac=None): + dhcp_options = self.mech_driver._ovn_client._get_ovn_dhcp_options( + subnet, network, service_mac) + self.assertEqual(expected_dhcp_options, dhcp_options) + + def test_get_ovn_dhcp_options(self): + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': '10.0.0.0/24', + 'ip_version': 4, + 'enable_dhcp': True, + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], + 'host_routes': [{'destination': '20.0.0.4', + 'nexthop': '10.0.0.100'}]} + network = {'id': 'network-id', 'mtu': 1400} + + expected_dhcp_options = {'cidr': '10.0.0.0/24', + 'external_ids': { + 'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}} + expected_dhcp_options['options'] = { + 'server_id': subnet['gateway_ip'], + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': '1400', + 'router': subnet['gateway_ip'], + 'dns_server': '{7.7.7.7, 8.8.8.8}', + 'classless_static_route': + '{20.0.0.4,10.0.0.100, 0.0.0.0/0,10.0.0.1}' + } + + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + expected_dhcp_options['options']['server_mac'] = '11:22:33:44:55:66' + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options, + service_mac='11:22:33:44:55:66') + + def test_get_ovn_dhcp_options_dhcp_disabled(self): + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': '10.0.0.0/24', + 'ip_version': 4, + 'enable_dhcp': False, + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], + 'host_routes': [{'destination': '20.0.0.4', + 'nexthop': '10.0.0.100'}]} + network = {'id': 'network-id', 'mtu': 1400} + + expected_dhcp_options = {'cidr': '10.0.0.0/24', + 'external_ids': { + 'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, + 'options': {}} + + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + + def test_get_ovn_dhcp_options_no_gw_ip(self): + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': '10.0.0.0/24', + 'ip_version': 4, + 'enable_dhcp': True, + 'gateway_ip': None, + 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], + 'host_routes': [{'destination': '20.0.0.4', + 'nexthop': '10.0.0.100'}]} + network = {'id': 'network-id', 'mtu': 1400} + + expected_dhcp_options = {'cidr': '10.0.0.0/24', + 'external_ids': { + 'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, + 'options': {}} + + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + + def test_get_ovn_dhcp_options_no_gw_ip_but_metadata_ip(self): + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': '10.0.0.0/24', + 'ip_version': 4, + 'enable_dhcp': True, + 'dns_nameservers': [], + 'host_routes': [], + 'gateway_ip': None} + network = {'id': 'network-id', 'mtu': 1400} + + expected_dhcp_options = { + 'cidr': '10.0.0.0/24', + 'external_ids': {'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}, + 'options': {'server_id': '10.0.0.2', + 'server_mac': '01:02:03:04:05:06', + 'dns_server': '{8.8.8.8}', + 'lease_time': str(12 * 60 * 60), + 'mtu': '1400', + 'classless_static_route': + '{169.254.169.254/32,10.0.0.2}'}} + + with mock.patch.object(self.mech_driver._ovn_client, + '_find_metadata_port_ip', + return_value='10.0.0.2'): + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + + def test_get_ovn_dhcp_options_with_global_options(self): + ovn_conf.cfg.CONF.set_override('ovn_dhcp4_global_options', + 'ntp_server:8.8.8.8,' + 'mtu:9000,' + 'wpad:', + group='ovn') + + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': '10.0.0.0/24', + 'ip_version': 4, + 'enable_dhcp': True, + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], + 'host_routes': [{'destination': '20.0.0.4', + 'nexthop': '10.0.0.100'}]} + network = {'id': 'network-id', 'mtu': 1400} + + expected_dhcp_options = {'cidr': '10.0.0.0/24', + 'external_ids': { + 'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}} + expected_dhcp_options['options'] = { + 'server_id': subnet['gateway_ip'], + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': '1400', + 'router': subnet['gateway_ip'], + 'ntp_server': '8.8.8.8', + 'dns_server': '{7.7.7.7, 8.8.8.8}', + 'classless_static_route': + '{20.0.0.4,10.0.0.100, 0.0.0.0/0,10.0.0.1}' + } + + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + expected_dhcp_options['options']['server_mac'] = '11:22:33:44:55:66' + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options, + service_mac='11:22:33:44:55:66') + + def test_get_ovn_dhcp_options_with_global_options_ipv6(self): + ovn_conf.cfg.CONF.set_override('ovn_dhcp6_global_options', + 'ntp_server:8.8.8.8,' + 'server_id:01:02:03:04:05:04,' + 'wpad:', + group='ovn') + + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': 'ae70::/24', + 'ip_version': 6, + 'enable_dhcp': True, + 'dns_nameservers': ['7.7.7.7', '8.8.8.8']} + network = {'id': 'network-id', 'mtu': 1400} + + ext_ids = {'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'} + expected_dhcp_options = { + 'cidr': 'ae70::/24', 'external_ids': ext_ids, + 'options': {'server_id': '01:02:03:04:05:06', + 'ntp_server': '8.8.8.8', + 'dns_server': '{7.7.7.7, 8.8.8.8}'}} + + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + expected_dhcp_options['options']['server_id'] = '11:22:33:44:55:66' + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options, + service_mac='11:22:33:44:55:66') + + def test_get_ovn_dhcp_options_ipv6_subnet(self): + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': 'ae70::/24', + 'ip_version': 6, + 'enable_dhcp': True, + 'dns_nameservers': ['7.7.7.7', '8.8.8.8']} + network = {'id': 'network-id', 'mtu': 1400} + + ext_ids = {'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'} + expected_dhcp_options = { + 'cidr': 'ae70::/24', 'external_ids': ext_ids, + 'options': {'server_id': '01:02:03:04:05:06', + 'dns_server': '{7.7.7.7, 8.8.8.8}'}} + + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + expected_dhcp_options['options']['server_id'] = '11:22:33:44:55:66' + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options, + service_mac='11:22:33:44:55:66') + + def test_get_ovn_dhcp_options_dhcpv6_stateless_subnet(self): + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': 'ae70::/24', + 'ip_version': 6, + 'enable_dhcp': True, + 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], + 'ipv6_address_mode': const.DHCPV6_STATELESS} + network = {'id': 'network-id', 'mtu': 1400} + + ext_ids = {'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'} + expected_dhcp_options = { + 'cidr': 'ae70::/24', 'external_ids': ext_ids, + 'options': {'server_id': '01:02:03:04:05:06', + 'dns_server': '{7.7.7.7, 8.8.8.8}', + 'dhcpv6_stateless': 'true'}} + + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + expected_dhcp_options['options']['server_id'] = '11:22:33:44:55:66' + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options, + service_mac='11:22:33:44:55:66') + + def test_get_ovn_dhcp_options_metadata_route(self): + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': '10.0.0.0/24', + 'ip_version': 4, + 'enable_dhcp': True, + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], + 'host_routes': []} + network = {'id': 'network-id', 'mtu': 1400} + + expected_dhcp_options = {'cidr': '10.0.0.0/24', + 'external_ids': { + 'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}} + expected_dhcp_options['options'] = { + 'server_id': subnet['gateway_ip'], + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': '1400', + 'router': subnet['gateway_ip'], + 'dns_server': '{7.7.7.7, 8.8.8.8}', + 'classless_static_route': + '{169.254.169.254/32,10.0.0.2, 0.0.0.0/0,10.0.0.1}' + } + + with mock.patch.object(self.mech_driver._ovn_client, + '_find_metadata_port_ip', + return_value='10.0.0.2'): + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + + def test_get_ovn_dhcp_options_domain_name(self): + cfg.CONF.set_override('dns_domain', 'foo.com') + subnet = {'id': 'foo-subnet', 'network_id': 'network-id', + 'cidr': '10.0.0.0/24', + 'ip_version': 4, + 'enable_dhcp': True, + 'gateway_ip': '10.0.0.1', + 'dns_nameservers': ['7.7.7.7', '8.8.8.8'], + 'host_routes': [{'destination': '20.0.0.4', + 'nexthop': '10.0.0.100'}]} + network = {'id': 'network-id', 'mtu': 1400} + + expected_dhcp_options = {'cidr': '10.0.0.0/24', + 'external_ids': { + 'subnet_id': 'foo-subnet', + ovn_const.OVN_REV_NUM_EXT_ID_KEY: '1'}} + expected_dhcp_options['options'] = { + 'server_id': subnet['gateway_ip'], + 'server_mac': '01:02:03:04:05:06', + 'lease_time': str(12 * 60 * 60), + 'mtu': '1400', + 'router': subnet['gateway_ip'], + 'domain_name': '"foo.com"', + 'dns_server': '{7.7.7.7, 8.8.8.8}', + 'classless_static_route': + '{20.0.0.4,10.0.0.100, 0.0.0.0/0,10.0.0.1}' + } + + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options) + expected_dhcp_options['options']['server_mac'] = '11:22:33:44:55:66' + self._test_get_ovn_dhcp_options_helper(subnet, network, + expected_dhcp_options, + service_mac='11:22:33:44:55:66') + + def _test__get_port_dhcp_options_port_dhcp_opts_set(self, ip_version=4): + if ip_version == 4: + ip_address = '10.0.0.11' + else: + ip_address = 'aef0::4' + + port = { + 'id': 'foo-port', + 'device_owner': 'compute:None', + 'fixed_ips': [{'subnet_id': 'foo-subnet', + 'ip_address': ip_address}]} + if ip_version == 4: + port['extra_dhcp_opts'] = [ + {'ip_version': 4, 'opt_name': 'mtu', 'opt_value': '1200'}, + {'ip_version': 4, 'opt_name': 'ntp-server', + 'opt_value': '8.8.8.8'}] + else: + port['extra_dhcp_opts'] = [ + {'ip_version': 6, 'opt_name': 'domain-search', + 'opt_value': 'foo-domain'}, + {'ip_version': 4, 'opt_name': 'dns-server', + 'opt_value': '7.7.7.7'}] + + self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port = ( + mock.Mock( + return_value=({ + 'cidr': '10.0.0.0/24' if ip_version == 4 else 'aef0::/64', + 'external_ids': {'subnet_id': 'foo-subnet'}, + 'options': (ip_version == 4) and { + 'router': '10.0.0.1', 'mtu': '1400'} or { + 'server_id': '01:02:03:04:05:06'}, + 'uuid': 'foo-uuid'}))) + + if ip_version == 4: + expected_dhcp_options = { + 'cidr': '10.0.0.0/24', + 'external_ids': {'subnet_id': 'foo-subnet', + 'port_id': 'foo-port'}, + 'options': {'router': '10.0.0.1', 'mtu': '1200', + 'ntp_server': '8.8.8.8'}} + else: + expected_dhcp_options = { + 'cidr': 'aef0::/64', + 'external_ids': {'subnet_id': 'foo-subnet', + 'port_id': 'foo-port'}, + 'options': {'server_id': '01:02:03:04:05:06', + 'domain_search': 'foo-domain'}} + + self.mech_driver._nb_ovn.add_dhcp_options.return_value = 'foo-val' + dhcp_options = self.mech_driver._ovn_client._get_port_dhcp_options( + port, ip_version) + self.assertEqual({'cmd': 'foo-val'}, dhcp_options) + self.mech_driver._nb_ovn.add_dhcp_options.assert_called_once_with( + 'foo-subnet', port_id='foo-port', **expected_dhcp_options) + + def test__get_port_dhcp_options_port_dhcp_opts_set_v4(self): + self._test__get_port_dhcp_options_port_dhcp_opts_set(ip_version=4) + + def test__get_port_dhcp_options_port_dhcp_opts_set_v6(self): + self._test__get_port_dhcp_options_port_dhcp_opts_set(ip_version=6) + + def _test__get_port_dhcp_options_port_dhcp_opts_not_set(self, + ip_version=4): + if ip_version == 4: + port = {'id': 'foo-port', + 'device_owner': 'compute:None', + 'fixed_ips': [{'subnet_id': 'foo-subnet', + 'ip_address': '10.0.0.11'}]} + else: + port = {'id': 'foo-port', + 'device_owner': 'compute:None', + 'fixed_ips': [{'subnet_id': 'foo-subnet', + 'ip_address': 'aef0::4'}]} + + if ip_version == 4: + expected_dhcp_opts = { + 'cidr': '10.0.0.0/24', + 'external_ids': {'subnet_id': 'foo-subnet'}, + 'options': {'router': '10.0.0.1', 'mtu': '1400'}} + else: + expected_dhcp_opts = { + 'cidr': 'aef0::/64', + 'external_ids': {'subnet_id': 'foo-subnet'}, + 'options': {'server_id': '01:02:03:04:05:06'}} + + self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port = ( + mock.Mock(return_value=expected_dhcp_opts)) + + self.assertEqual( + expected_dhcp_opts, + self.mech_driver._ovn_client._get_port_dhcp_options( + port, ip_version=ip_version)) + + # Since the port has no extra DHCPv4/v6 options defined, no new + # DHCP_Options row should be created and logical switch port DHCPv4/v6 + # options should point to the subnet DHCPv4/v6 options. + self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() + + def test__get_port_dhcp_options_port_dhcp_opts_not_set_v4(self): + self._test__get_port_dhcp_options_port_dhcp_opts_not_set(ip_version=4) + + def test__get_port_dhcp_options_port_dhcp_opts_not_set_v6(self): + self._test__get_port_dhcp_options_port_dhcp_opts_not_set(ip_version=6) + + def _test__get_port_dhcp_options_port_dhcp_disabled(self, ip_version=4): + port = { + 'id': 'foo-port', + 'device_owner': 'compute:None', + 'fixed_ips': [{'subnet_id': 'foo-subnet', + 'ip_address': '10.0.0.11'}, + {'subnet_id': 'foo-subnet-v6', + 'ip_address': 'aef0::11'}], + 'extra_dhcp_opts': [{'ip_version': 4, 'opt_name': 'dhcp_disabled', + 'opt_value': 'False'}, + {'ip_version': 6, 'opt_name': 'dhcp_disabled', + 'opt_value': 'False'}] + } + + subnet_dhcp_opts = mock.Mock() + self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port = ( + mock.Mock(return_value=subnet_dhcp_opts)) + + # No dhcp_disabled set to true, subnet dhcp options will be get for + # this port. Since it doesn't have any other extra dhcp options, but + # dhcp_disabled, no port dhcp options will be created. + self.assertEqual( + subnet_dhcp_opts, + self.mech_driver._ovn_client._get_port_dhcp_options( + port, ip_version)) + self.assertEqual( + 1, + self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port. + call_count) + self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() + + # Set dhcp_disabled with ip_version specified by this test case to + # true, no dhcp options will be get since it's dhcp_disabled now for + # ip_version be tested. + opt_index = 0 if ip_version == 4 else 1 + port['extra_dhcp_opts'][opt_index]['opt_value'] = 'True' + self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port.\ + reset_mock() + self.assertIsNone( + self.mech_driver._ovn_client._get_port_dhcp_options( + port, ip_version)) + self.assertEqual( + 0, + self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port. + call_count) + self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() + + # Set dhcp_disabled with ip_version specified by this test case to + # false, and set dhcp_disabled with ip_version not in test to true. + # Subnet dhcp options will be get, since dhcp_disabled with ip_version + # not in test should not affect. + opt_index_1 = 1 if ip_version == 4 else 0 + port['extra_dhcp_opts'][opt_index]['opt_value'] = 'False' + port['extra_dhcp_opts'][opt_index_1]['opt_value'] = 'True' + self.assertEqual( + subnet_dhcp_opts, + self.mech_driver._ovn_client._get_port_dhcp_options( + port, ip_version)) + self.assertEqual( + 1, + self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port. + call_count) + self.mech_driver._nb_ovn.add_dhcp_options.assert_not_called() + + def test__get_port_dhcp_options_port_dhcp_disabled_v4(self): + self._test__get_port_dhcp_options_port_dhcp_disabled(ip_version=4) + + def test__get_port_dhcp_options_port_dhcp_disabled_v6(self): + self._test__get_port_dhcp_options_port_dhcp_disabled(ip_version=6) + + def test__get_port_dhcp_options_port_with_invalid_device_owner(self): + port = {'id': 'foo-port', + 'device_owner': 'neutron:router_interface', + 'fixed_ips': ['fake']} + + self.assertIsNone( + self.mech_driver._ovn_client._get_port_dhcp_options( + port, mock.ANY)) + + def _test__get_subnet_dhcp_options_for_port(self, ip_version=4, + enable_dhcp=True): + port = {'fixed_ips': [ + {'ip_address': '10.0.0.4', + 'subnet_id': 'v4_snet_id_1' if enable_dhcp else 'v4_snet_id_2'}, + {'ip_address': '2001:dba::4', + 'subnet_id': 'v6_snet_id_1' if enable_dhcp else 'v6_snet_id_2'}, + {'ip_address': '2001:dbb::4', 'subnet_id': 'v6_snet_id_3'}]} + + def fake(subnets): + fake_rows = { + 'v4_snet_id_1': 'foo', + 'v6_snet_id_1': {'options': {}}, + 'v6_snet_id_3': {'options': { + ovn_const.DHCPV6_STATELESS_OPT: 'true'}}} + return [fake_rows[row] for row in fake_rows if row in subnets] + + self.mech_driver._nb_ovn.get_subnets_dhcp_options.side_effect = fake + + if ip_version == 4: + expected_opts = 'foo' if enable_dhcp else None + else: + expected_opts = { + 'options': {} if enable_dhcp else { + ovn_const.DHCPV6_STATELESS_OPT: 'true'}} + + self.assertEqual( + expected_opts, + self.mech_driver._ovn_client._get_subnet_dhcp_options_for_port( + port, ip_version)) + + def test__get_subnet_dhcp_options_for_port_v4(self): + self._test__get_subnet_dhcp_options_for_port() + + def test__get_subnet_dhcp_options_for_port_v4_dhcp_disabled(self): + self._test__get_subnet_dhcp_options_for_port(enable_dhcp=False) + + def test__get_subnet_dhcp_options_for_port_v6(self): + self._test__get_subnet_dhcp_options_for_port(ip_version=6) + + def test__get_subnet_dhcp_options_for_port_v6_dhcp_disabled(self): + self._test__get_subnet_dhcp_options_for_port(ip_version=6, + enable_dhcp=False) + + +class TestOVNMechanismDriverSecurityGroup( + test_security_group.Ml2SecurityGroupsTestCase): + # This set of test cases is supplement to test_acl.py, the purpose is to + # test acl methods invoking. Content correctness of args of acl methods + # is mainly guaranteed by acl_test.py. + + def setUp(self): + cfg.CONF.set_override('mechanism_drivers', + ['logger', 'ovn'], + 'ml2') + cfg.CONF.set_override('dns_servers', ['8.8.8.8'], group='ovn') + super(TestOVNMechanismDriverSecurityGroup, self).setUp() + mm = directory.get_plugin().mechanism_manager + self.mech_driver = mm.mech_drivers['ovn'].obj + nb_ovn = fakes.FakeOvsdbNbOvnIdl() + sb_ovn = fakes.FakeOvsdbSbOvnIdl() + self.mech_driver._nb_ovn = nb_ovn + self.mech_driver._sb_ovn = sb_ovn + self.ctx = context.get_admin_context() + revision_plugin.RevisionPlugin() + + def _delete_default_sg_rules(self, security_group_id): + res = self._list( + 'security-group-rules', + query_params='security_group_id=%s' % security_group_id) + for r in res['security_group_rules']: + self._delete('security-group-rules', r['id']) + + def _create_sg(self, sg_name): + sg = self._make_security_group(self.fmt, sg_name, '') + return sg['security_group'] + + def _create_empty_sg(self, sg_name): + sg = self._create_sg(sg_name) + self._delete_default_sg_rules(sg['id']) + return sg + + def _create_sg_rule(self, sg_id, direction, proto, + port_range_min=None, port_range_max=None, + remote_ip_prefix=None, remote_group_id=None, + ethertype=const.IPv4): + r = self._build_security_group_rule(sg_id, direction, proto, + port_range_min=port_range_min, + port_range_max=port_range_max, + remote_ip_prefix=remote_ip_prefix, + remote_group_id=remote_group_id, + ethertype=ethertype) + res = self._create_security_group_rule(self.fmt, r) + rule = self.deserialize(self.fmt, res) + return rule['security_group_rule'] + + def _delete_sg_rule(self, rule_id): + self._delete('security-group-rules', rule_id) + + def test_create_security_group_with_port_group(self): + self.mech_driver._nb_ovn.is_port_groups_supported.return_value = True + sg = self._create_sg('sg') + + expected_pg_name = ovn_utils.ovn_port_group_name(sg['id']) + expected_pg_add_calls = [ + mock.call(acls=[], + external_ids={'neutron:security_group_id': sg['id']}, + name=expected_pg_name), + ] + self.mech_driver._nb_ovn.pg_add.assert_has_calls( + expected_pg_add_calls) + + def test_delete_security_group_with_port_group(self): + self.mech_driver._nb_ovn.is_port_groups_supported.return_value = True + sg = self._create_sg('sg') + self._delete('security-groups', sg['id']) + + expected_pg_name = ovn_utils.ovn_port_group_name(sg['id']) + expected_pg_del_calls = [ + mock.call(name=expected_pg_name), + ] + self.mech_driver._nb_ovn.pg_del.assert_has_calls( + expected_pg_del_calls) + + def test_create_port_with_port_group(self): + self.mech_driver._nb_ovn.is_port_groups_supported.return_value = True + with self.network() as n, self.subnet(n): + sg = self._create_empty_sg('sg') + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg['id']]) + + # Assert the port has been added to the right security groups + expected_pg_name = ovn_utils.ovn_port_group_name(sg['id']) + expected_pg_add_ports_calls = [ + mock.call('neutron_pg_drop', mock.ANY), + mock.call(expected_pg_name, mock.ANY) + ] + self.mech_driver._nb_ovn.pg_add_ports.assert_has_calls( + expected_pg_add_ports_calls) + + # Assert add_acl() is not used anymore + self.assertFalse(self.mech_driver._nb_ovn.add_acl.called) + + def test_create_port_with_sg_default_rules(self): + with self.network() as n, self.subnet(n): + sg = self._create_sg('sg') + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg['id']]) + + # One DHCP rule, one IPv6 rule, one IPv4 rule and + # two default dropping rules. + self.assertEqual( + 5, self.mech_driver._nb_ovn.add_acl.call_count) + + def test_create_port_with_empty_sg(self): + with self.network() as n, self.subnet(n): + sg = self._create_empty_sg('sg') + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg['id']]) + # One DHCP rule and two default dropping rules. + self.assertEqual( + 3, self.mech_driver._nb_ovn.add_acl.call_count) + + def test_create_port_with_multi_sgs(self): + with self.network() as n, self.subnet(n): + sg1 = self._create_empty_sg('sg1') + sg2 = self._create_empty_sg('sg2') + self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_TCP, + port_range_min=22, port_range_max=23) + self._create_sg_rule(sg2['id'], 'egress', const.PROTO_NAME_UDP, + remote_ip_prefix='0.0.0.0/0') + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg1['id'], sg2['id']]) + + # One DHCP rule, one TCP rule, one UDP rule and + # two default dropping rules. + self.assertEqual( + 5, self.mech_driver._nb_ovn.add_acl.call_count) + + def test_create_port_with_multi_sgs_duplicate_rules(self): + with self.network() as n, self.subnet(n): + sg1 = self._create_empty_sg('sg1') + sg2 = self._create_empty_sg('sg2') + self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_TCP, + port_range_min=22, port_range_max=23, + remote_ip_prefix='20.0.0.0/24') + self._create_sg_rule(sg2['id'], 'ingress', const.PROTO_NAME_TCP, + port_range_min=22, port_range_max=23, + remote_ip_prefix='20.0.0.0/24') + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg1['id'], sg2['id']]) + + # One DHCP rule, two TCP rule and two default dropping rules. + self.assertEqual( + 5, self.mech_driver._nb_ovn.add_acl.call_count) + + def test_update_port_with_sgs(self): + with self.network() as n, self.subnet(n): + sg1 = self._create_empty_sg('sg1') + self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_TCP, + ethertype=const.IPv6) + + p = self._make_port(self.fmt, n['network']['id'], + security_groups=[sg1['id']])['port'] + # One DHCP rule, one TCP rule and two default dropping rules. + self.assertEqual( + 4, self.mech_driver._nb_ovn.add_acl.call_count) + + sg2 = self._create_empty_sg('sg2') + self._create_sg_rule(sg2['id'], 'egress', const.PROTO_NAME_UDP, + remote_ip_prefix='30.0.0.0/24') + data = {'port': {'security_groups': [sg1['id'], sg2['id']]}} + req = self.new_update_request('ports', data, p['id']) + req.get_response(self.api) + self.assertEqual( + 1, self.mech_driver._nb_ovn.update_acls.call_count) + + def test_update_sg_change_rule(self): + with self.network() as n, self.subnet(n): + sg = self._create_empty_sg('sg') + + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg['id']]) + # One DHCP rule and two default dropping rules. + self.assertEqual( + 3, self.mech_driver._nb_ovn.add_acl.call_count) + + sg_r = self._create_sg_rule(sg['id'], 'ingress', + const.PROTO_NAME_UDP, + ethertype=const.IPv6) + self.assertEqual( + 1, self.mech_driver._nb_ovn.update_acls.call_count) + + self._delete_sg_rule(sg_r['id']) + self.assertEqual( + 2, self.mech_driver._nb_ovn.update_acls.call_count) + + def test_update_sg_change_rule_unrelated_port(self): + with self.network() as n, self.subnet(n): + sg1 = self._create_empty_sg('sg1') + sg2 = self._create_empty_sg('sg2') + self._create_sg_rule(sg1['id'], 'ingress', const.PROTO_NAME_TCP, + remote_group_id=sg2['id']) + + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg1['id']]) + # One DHCP rule, one TCP rule and two default dropping rules. + self.assertEqual( + 4, self.mech_driver._nb_ovn.add_acl.call_count) + + sg2_r = self._create_sg_rule(sg2['id'], 'egress', + const.PROTO_NAME_UDP) + self.mech_driver._nb_ovn.update_acls.assert_not_called() + + self._delete_sg_rule(sg2_r['id']) + self.mech_driver._nb_ovn.update_acls.assert_not_called() + + def test_update_sg_duplicate_rule(self): + with self.network() as n, self.subnet(n): + sg1 = self._create_empty_sg('sg1') + sg2 = self._create_empty_sg('sg2') + self._create_sg_rule(sg1['id'], 'ingress', + const.PROTO_NAME_UDP, + port_range_min=22, port_range_max=23) + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg1['id'], sg2['id']]) + # One DHCP rule, one UDP rule and two default dropping rules. + self.assertEqual( + 4, self.mech_driver._nb_ovn.add_acl.call_count) + + # Add a new duplicate rule to sg2. It's expected to be added. + sg2_r = self._create_sg_rule(sg2['id'], 'ingress', + const.PROTO_NAME_UDP, + port_range_min=22, port_range_max=23) + self.assertEqual( + 1, self.mech_driver._nb_ovn.update_acls.call_count) + + # Delete the duplicate rule. It's expected to be deleted. + self._delete_sg_rule(sg2_r['id']) + self.assertEqual( + 2, self.mech_driver._nb_ovn.update_acls.call_count) + + def test_update_sg_duplicate_rule_multi_ports(self): + with self.network() as n, self.subnet(n): + sg1 = self._create_empty_sg('sg1') + sg2 = self._create_empty_sg('sg2') + sg3 = self._create_empty_sg('sg3') + self._create_sg_rule(sg1['id'], 'ingress', + const.PROTO_NAME_UDP, + remote_group_id=sg3['id']) + self._create_sg_rule(sg2['id'], 'egress', const.PROTO_NAME_TCP, + port_range_min=60, port_range_max=70) + + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg1['id'], sg2['id']]) + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg1['id'], sg2['id']]) + self._make_port(self.fmt, n['network']['id'], + security_groups=[sg2['id'], sg3['id']]) + # Rules include 5 + 5 + 4 + self.assertEqual( + 14, self.mech_driver._nb_ovn.add_acl.call_count) + + # Add a rule to sg1 duplicate with sg2. It's expected to be added. + sg1_r = self._create_sg_rule(sg1['id'], 'egress', + const.PROTO_NAME_TCP, + port_range_min=60, port_range_max=70) + self.assertEqual( + 1, self.mech_driver._nb_ovn.update_acls.call_count) + + # Add a rule to sg2 duplicate with sg1 but not duplicate with sg3. + # It's expected to be added as well. + sg2_r = self._create_sg_rule(sg2['id'], 'ingress', + const.PROTO_NAME_UDP, + remote_group_id=sg3['id']) + self.assertEqual( + 2, self.mech_driver._nb_ovn.update_acls.call_count) + + # Delete the duplicate rule in sg1. It's expected to be deleted. + self._delete_sg_rule(sg1_r['id']) + self.assertEqual( + 3, self.mech_driver._nb_ovn.update_acls.call_count) + + # Delete the duplicate rule in sg2. It's expected to be deleted. + self._delete_sg_rule(sg2_r['id']) + self.assertEqual( + 4, self.mech_driver._nb_ovn.update_acls.call_count) + + +class TestOVNMechanismDriverMetadataPort(test_plugin.Ml2PluginV2TestCase): + + _mechanism_drivers = ['logger', 'ovn'] + + def setUp(self): + super(TestOVNMechanismDriverMetadataPort, self).setUp() + mm = directory.get_plugin().mechanism_manager + self.mech_driver = mm.mech_drivers['ovn'].obj + self.mech_driver._nb_ovn = fakes.FakeOvsdbNbOvnIdl() + self.mech_driver._sb_ovn = fakes.FakeOvsdbSbOvnIdl() + self.nb_ovn = self.mech_driver._nb_ovn + self.sb_ovn = self.mech_driver._sb_ovn + self.ctx = context.get_admin_context() + ovn_conf.cfg.CONF.set_override('ovn_metadata_enabled', True, + group='ovn') + p = mock.patch.object(ovn_utils, 'get_revision_number', return_value=1) + p.start() + self.addCleanup(p.stop) + + def _create_fake_dhcp_port(self, device_id): + return {'network_id': 'fake', 'device_owner': const.DEVICE_OWNER_DHCP, + 'device_id': device_id} + + @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports') + def test__find_metadata_port(self, mock_get_ports): + ports = [ + self._create_fake_dhcp_port('dhcp-0'), + self._create_fake_dhcp_port('dhcp-1'), + self._create_fake_dhcp_port(const.DEVICE_ID_RESERVED_DHCP_PORT), + self._create_fake_dhcp_port('ovnmeta-0')] + mock_get_ports.return_value = ports + + md_port = self.mech_driver._ovn_client._find_metadata_port( + self.ctx, 'fake-net-id') + self.assertEqual('ovnmeta-0', md_port['device_id']) + + def test_metadata_port_on_network_create(self): + """Check metadata port create. + + Check that a localport is created when a neutron network is + created. + """ + with self.network(): + self.assertEqual(1, self.nb_ovn.create_lswitch_port.call_count) + args, kwargs = self.nb_ovn.create_lswitch_port.call_args + self.assertEqual('localport', kwargs['type']) + + def test_metadata_port_not_created_if_exists(self): + """Check that metadata port is not created if it already exists. + + In the event of a sync, it might happen that a metadata port exists + already. When we are creating the logical switch in OVN we don't want + this port to be created again. + """ + with mock.patch.object( + self.mech_driver._ovn_client, '_find_metadata_port', + return_value={'port': {'id': 'metadata_port1'}}): + with self.network(): + self.assertEqual(0, self.nb_ovn.create_lswitch_port.call_count) + + def test_metadata_ip_on_subnet_create(self): + """Check metadata port update. + + Check that the metadata port is updated with a new IP address when a + subnet is created. + """ + with self.network(set_context=True, tenant_id='test') as net1: + with self.subnet(network=net1, cidr='10.0.0.0/24') as subnet1: + # Create a network:dhcp owner port just as how Neutron DHCP + # agent would do. + with self.port(subnet=subnet1, + device_owner=const.DEVICE_OWNER_DHCP, + device_id='dhcpxxxx', + set_context=True, tenant_id='test'): + with self.subnet(network=net1, cidr='20.0.0.0/24'): + self.assertEqual( + 2, self.nb_ovn.set_lswitch_port.call_count) + args, kwargs = self.nb_ovn.set_lswitch_port.call_args + self.assertEqual('localport', kwargs['type']) + self.assertEqual('10.0.0.2/24 20.0.0.2/24', + kwargs['external_ids'].get( + ovn_const.OVN_CIDRS_EXT_ID_KEY, + '')) + + def test_metadata_port_on_network_delete(self): + """Check metadata port delete. + + Check that the metadata port is deleted when a network is deleted. + """ + net = self._make_network(self.fmt, name="net1", admin_state_up=True) + network_id = net['network']['id'] + req = self.new_delete_request('networks', network_id) + res = req.get_response(self.api) + self.assertEqual(exc.HTTPNoContent.code, + res.status_int) + self.assertEqual(1, self.nb_ovn.delete_lswitch_port.call_count) + + +class TestOVNParentTagPortBinding(OVNMechanismDriverTestCase): + def test_create_port_with_invalid_parent(self): + binding = {OVN_PROFILE: {"parent_name": 'invalid', 'tag': 1}} + with self.network() as n: + with self.subnet(n): + self._create_port( + self.fmt, n['network']['id'], + expected_res_status=404, + arg_list=(OVN_PROFILE,), + **binding) + + @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_port') + def test_create_port_with_parent_and_tag(self, mock_get_port): + binding = {OVN_PROFILE: {"parent_name": '', 'tag': 1}} + with self.network() as n: + with self.subnet(n) as s: + with self.port(s) as p: + binding[OVN_PROFILE]['parent_name'] = p['port']['id'] + res = self._create_port(self.fmt, n['network']['id'], + arg_list=(OVN_PROFILE,), + **binding) + port = self.deserialize(self.fmt, res) + self.assertEqual(port['port'][OVN_PROFILE], + binding[OVN_PROFILE]) + mock_get_port.assert_called_with(mock.ANY, p['port']['id']) + + def test_create_port_with_invalid_tag(self): + binding = {OVN_PROFILE: {"parent_name": '', 'tag': 'a'}} + with self.network() as n: + with self.subnet(n) as s: + with self.port(s) as p: + binding[OVN_PROFILE]['parent_name'] = p['port']['id'] + self._create_port(self.fmt, n['network']['id'], + arg_list=(OVN_PROFILE,), + expected_res_status=400, + **binding) + + +class TestOVNVtepPortBinding(OVNMechanismDriverTestCase): + + def test_create_port_with_vtep_options(self): + binding = {OVN_PROFILE: {"vtep-physical-switch": 'psw1', + "vtep-logical-switch": 'lsw1'}} + with self.network() as n: + with self.subnet(n): + res = self._create_port(self.fmt, n['network']['id'], + arg_list=(OVN_PROFILE,), + **binding) + port = self.deserialize(self.fmt, res) + self.assertEqual(binding[OVN_PROFILE], + port['port'][OVN_PROFILE]) + + def test_create_port_with_only_vtep_physical_switch(self): + binding = {OVN_PROFILE: {"vtep-physical-switch": 'psw'}} + with self.network() as n: + with self.subnet(n): + self._create_port(self.fmt, n['network']['id'], + arg_list=(OVN_PROFILE,), + expected_res_status=400, + **binding) + + def test_create_port_with_only_vtep_logical_switch(self): + binding = {OVN_PROFILE: {"vtep-logical-switch": 'lsw1'}} + with self.network() as n: + with self.subnet(n): + self._create_port(self.fmt, n['network']['id'], + arg_list=(OVN_PROFILE,), + expected_res_status=400, + **binding) + + def test_create_port_with_invalid_vtep_logical_switch(self): + binding = {OVN_PROFILE: {"vtep-logical-switch": 1234, + "vtep-physical-switch": "psw1"}} + with self.network() as n: + with self.subnet(n): + self._create_port(self.fmt, n['network']['id'], + arg_list=(OVN_PROFILE,), + expected_res_status=400, + **binding) + + def test_create_port_with_vtep_options_and_parent_name_tag(self): + binding = {OVN_PROFILE: {"vtep-logical-switch": "lsw1", + "vtep-physical-switch": "psw1", + "parent_name": "pname", "tag": 22}} + with self.network() as n: + with self.subnet(n): + self._create_port(self.fmt, n['network']['id'], + arg_list=(OVN_PROFILE,), + expected_res_status=400, + **binding) + + def test_create_port_with_vtep_options_and_check_vtep_keys(self): + port = { + 'id': 'foo-port', + 'device_owner': 'compute:None', + 'fixed_ips': [{'subnet_id': 'foo-subnet', + 'ip_address': '10.0.0.11'}], + OVN_PROFILE: {"vtep-logical-switch": "lsw1", + "vtep-physical-switch": "psw1"} + } + ovn_port_info = ( + self.mech_driver._ovn_client._get_port_options(port)) + self.assertEqual(port[OVN_PROFILE]["vtep-physical-switch"], + ovn_port_info.options["vtep-physical-switch"]) + self.assertEqual(port[OVN_PROFILE]["vtep-logical-switch"], + ovn_port_info.options["vtep-logical-switch"]) + + +@mock.patch.object(ovn_client.OVNClient, '_is_virtual_port_supported', + lambda *args: True) +class TestOVNVVirtualPort(OVNMechanismDriverTestCase): + + def setUp(self): + super(TestOVNVVirtualPort, self).setUp() + self.context = context.get_admin_context() + self.nb_idl = self.mech_driver._ovn_client._nb_idl + self.net = self._make_network( + self.fmt, name='net1', admin_state_up=True)['network'] + self.subnet = self._make_subnet( + self.fmt, {'network': self.net}, + '10.0.0.1', '10.0.0.0/24')['subnet'] + + @mock.patch.object(ovn_client.OVNClient, 'get_virtual_port_parents') + def test_create_port_with_virtual_type_and_options(self, mock_get_parents): + fake_parents = ['parent-0', 'parent-1'] + mock_get_parents.return_value = fake_parents + port = {'id': 'virt-port', + 'mac_address': '00:00:00:00:00:00', + 'device_owner': '', + 'network_id': self.net['id'], + 'fixed_ips': [{'subnet_id': self.subnet['id'], + 'ip_address': '10.0.0.55'}]} + port_info = self.mech_driver._ovn_client._get_port_options( + port) + self.assertEqual(ovn_const.LSP_TYPE_VIRTUAL, port_info.type) + self.assertEqual( + '10.0.0.55', + port_info.options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY]) + self.assertIn( + 'parent-0', + port_info.options[ + ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) + self.assertIn( + 'parent-1', + port_info.options[ + ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY]) + + @mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_ports') + def _test_set_unset_virtual_port_type(self, mock_get_ports, unset=False): + cmd = self.nb_idl.set_lswitch_port_to_virtual_type + if unset: + cmd = self.nb_idl.unset_lswitch_port_to_virtual_type + + fake_txn = mock.Mock() + parent_port = {'id': 'parent-port', 'network_id': 'fake-network'} + port = {'id': 'virt-port'} + mock_get_ports.return_value = [port] + self.mech_driver._ovn_client._set_unset_virtual_port_type( + self.context, fake_txn, parent_port, ['10.0.0.55'], unset=unset) + + args = {'lport_name': 'virt-port', + 'virtual_parent': 'parent-port', + 'if_exists': True} + if not unset: + args['vip'] = '10.0.0.55' + + cmd.assert_called_once_with(**args) + + def test__set_unset_virtual_port_type_set(self): + self._test_set_unset_virtual_port_type(unset=False) + + def test__set_unset_virtual_port_type_unset(self): + self._test_set_unset_virtual_port_type(unset=True) + + def test_delete_virtual_port_parent(self): + self.nb_idl.ls_get.return_value.execute.return_value = ( + fakes.FakeOvsdbRow.create_one_ovsdb_row(attrs={'ports': []})) + virt_port = self._make_port(self.fmt, self.net['id'])['port'] + virt_ip = virt_port['fixed_ips'][0]['ip_address'] + parent = self._make_port( + self.fmt, self.net['id'], + allowed_address_pairs=[{'ip_address': virt_ip}])['port'] + fake_row = fakes.FakeOvsdbRow.create_one_ovsdb_row( + attrs={'name': virt_port['id'], + 'type': ovn_const.LSP_TYPE_VIRTUAL, + 'options': {ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY: + parent['id']}}) + self.nb_idl.ls_get.return_value.execute.return_value = ( + mock.Mock(ports=[fake_row])) + + self.mech_driver._ovn_client.delete_port(self.context, parent['id']) + self.nb_idl.unset_lswitch_port_to_virtual_type.assert_called_once_with( + virt_port['id'], parent['id'], if_exists=True) diff --git a/setup.cfg b/setup.cfg index 941015f7a51..8c5f17d100d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -95,6 +95,7 @@ neutron.ml2.mechanism_drivers = openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver + ovn = neutron.plugins.ml2.drivers.ovn.mech_driver.mech_driver:OVNMechanismDriver fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver fake_agent_l3 = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriverL3 another_fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:AnotherFakeAgentMechanismDriver