[OVN] Migrate the OVN mech driver

This patch moves the OVN mech driver.

Previous paths in networking-ovn tree:
./networking_ovn/ovn_db_sync.py ->
  ./neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_db_sync.py
./networking_ovn/ml2/mech_driver ->
  ./neutron/plugins/ml2/drivers/ovn/mech_driver/mech_driver.py
./networking_ovn/common/maintenance.py ->
  ./neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/maintenance.py

Co-Authored-By: Amitabha Biswas <abiswas@us.ibm.com>
Co-Authored-By: Aaron Rosen <aaronorosen@gmail.com>
Co-Authored-By: Andrew Austin <aaustin@redhat.com>
Co-Authored-By: Armando Migliaccio <armamig@gmail.com>
Co-Authored-By: Arslan Qadeer <arslanq@xgrid.co>
Co-Authored-By: Boden R <bodenvmw@gmail.com>
Co-Authored-By: Brian Haley <bhaley@redhat.com>
Co-Authored-By: Daniel Alvarez <dalvarez@redhat.com>
Co-Authored-By: Dong Jun <dongj@dtdream.com>
Co-Authored-By: Gal Sagie <gal.sagie@huawei.com>
Co-Authored-By: Gary Kotton <gkotton@vmware.com>
Co-Authored-By: Guoshuai Li <ligs@dtdream.com>
Co-Authored-By: Han Zhou <zhouhan@gmail.com>
Co-Authored-By: Hong Hui Xiao <xiaohhui@cn.ibm.com>
Co-Authored-By: Ihar Hrachyshka <ihrachys@redhat.com>
Co-Authored-By: Jakub Libosvar <libosvar@redhat.com>
Co-Authored-By: John Kasperski <jckasper@us.ibm.com>
Co-Authored-By: Kyle Mestery <mestery@mestery.com>
Co-Authored-By: Lucas Alvares Gomes <lucasagomes@gmail.com>
Co-Authored-By: Maciej Józefczyk <mjozefcz@redhat.com>
Co-Authored-By: Miguel Angel Ajo <majopela@redhat.com>
Co-Authored-By: Nirapada Ghosh <nghosh@us.ibm.com>
Co-Authored-By: Numan Siddique <nusiddiq@redhat.com>
Co-Authored-By: Richard Theis <rtheis@us.ibm.com>
Co-Authored-By: Rodolfo Alonso Hernandez <ralonsoh@redhat.com>
Co-Authored-By: Sean Mooney <work@seanmooney.info>
Co-Authored-By: Terry Wilson <twilson@redhat.com>
Co-Authored-By: venkata anil <anilvenkata@redhat.com>
Co-Authored-By: xurong00037997 <xu.rong@zte.com.cn>
Co-Authored-By: zhufl <zhu.fanglei@zte.com.cn>

Related-Blueprint: neutron-ovn-merge
Change-Id: I3ed2ff4008a3fed8534ee1eb42f0be2b6b779e83
This commit is contained in:
Rodolfo Alonso Hernandez 2019-12-06 11:43:10 +00:00 committed by Lucas Alvares Gomes
parent ac63c570a1
commit 331a195972
8 changed files with 7104 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,469 @@
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import threading
from futurist import periodics
from neutron_lib.api.definitions import external_net
from neutron_lib import constants as n_const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from neutron.common.ovn import constants as ovn_const
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_hash_ring_db as hash_ring_db
from neutron.db import ovn_revision_numbers_db as revision_numbers_db
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync
CONF = cfg.CONF
LOG = log.getLogger(__name__)
DB_CONSISTENCY_CHECK_INTERVAL = 300 # 5 minutes
INCONSISTENCY_TYPE_CREATE_UPDATE = 'create/update'
INCONSISTENCY_TYPE_DELETE = 'delete'
class MaintenanceThread(object):
def __init__(self):
self._callables = []
self._thread = None
self._worker = None
def add_periodics(self, obj):
for name, member in inspect.getmembers(obj):
if periodics.is_periodic(member):
LOG.debug('Periodic task found: %(owner)s.%(member)s',
{'owner': obj.__class__.__name__, 'member': name})
self._callables.append((member, (), {}))
def start(self):
if self._thread is None:
self._worker = periodics.PeriodicWorker(self._callables)
self._thread = threading.Thread(target=self._worker.start)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._worker.stop()
self._worker.wait()
self._thread.join()
self._worker = self._thread = None
class DBInconsistenciesPeriodics(object):
def __init__(self, ovn_client):
self._ovn_client = ovn_client
# FIXME(lucasagomes): We should not be accessing private
# attributes like that, perhaps we should extend the OVNClient
# class and create an interface for the locks ?
self._nb_idl = self._ovn_client._nb_idl
self._idl = self._nb_idl.idl
self._idl.set_lock('ovn_db_inconsistencies_periodics')
self._sync_timer = timeutils.StopWatch()
self._resources_func_map = {
ovn_const.TYPE_NETWORKS: {
'neutron_get': self._ovn_client._plugin.get_network,
'ovn_get': self._nb_idl.get_lswitch,
'ovn_create': self._ovn_client.create_network,
'ovn_update': self._ovn_client.update_network,
'ovn_delete': self._ovn_client.delete_network,
},
ovn_const.TYPE_PORTS: {
'neutron_get': self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lswitch_port,
'ovn_create': self._ovn_client.create_port,
'ovn_update': self._ovn_client.update_port,
'ovn_delete': self._ovn_client.delete_port,
},
ovn_const.TYPE_FLOATINGIPS: {
'neutron_get': self._ovn_client._l3_plugin.get_floatingip,
'ovn_get': self._nb_idl.get_floatingip,
'ovn_create': self._ovn_client.create_floatingip,
'ovn_update': self._ovn_client.update_floatingip,
'ovn_delete': self._ovn_client.delete_floatingip,
},
ovn_const.TYPE_ROUTERS: {
'neutron_get': self._ovn_client._l3_plugin.get_router,
'ovn_get': self._nb_idl.get_lrouter,
'ovn_create': self._ovn_client.create_router,
'ovn_update': self._ovn_client.update_router,
'ovn_delete': self._ovn_client.delete_router,
},
ovn_const.TYPE_SECURITY_GROUPS: {
'neutron_get': self._ovn_client._plugin.get_security_group,
'ovn_get': self._get_security_group,
'ovn_create': self._ovn_client.create_security_group,
'ovn_delete': self._ovn_client.delete_security_group,
},
ovn_const.TYPE_SECURITY_GROUP_RULES: {
'neutron_get':
self._ovn_client._plugin.get_security_group_rule,
'ovn_get': self._nb_idl.get_acl_by_id,
'ovn_create': self._ovn_client.create_security_group_rule,
'ovn_delete': self._ovn_client.delete_security_group_rule,
},
ovn_const.TYPE_ROUTER_PORTS: {
'neutron_get':
self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lrouter_port,
'ovn_create': self._create_lrouter_port,
'ovn_update': self._ovn_client.update_router_port,
'ovn_delete': self._ovn_client.delete_router_port,
},
}
def _get_security_group(self, uuid):
return (self._nb_idl.get_address_set(uuid) or
self._nb_idl.get_port_group(uuid))
@property
def has_lock(self):
return not self._idl.is_lock_contended
def _fix_create_update(self, context, row):
res_map = self._resources_func_map[row.resource_type]
try:
# Get the latest version of the resource in Neutron DB
n_obj = res_map['neutron_get'](context, row.resource_uuid)
except n_exc.NotFound:
LOG.warning('Skip fixing resource %(res_uuid)s (type: '
'%(res_type)s). Resource does not exist in Neutron '
'database anymore', {'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
return
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
res_map['ovn_create'](n_obj)
else:
if row.resource_type == ovn_const.TYPE_SECURITY_GROUP_RULES:
LOG.error("SG rule %s found with a revision number while "
"this resource doesn't support updates",
row.resource_uuid)
elif row.resource_type == ovn_const.TYPE_SECURITY_GROUPS:
# In OVN, we don't care about updates to security groups,
# so just bump the revision number to whatever it's
# supposed to be.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
else:
ext_ids = getattr(ovn_obj, 'external_ids', {})
ovn_revision = int(ext_ids.get(
ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1))
# If the resource exist in the OVN DB but the revision
# number is different from Neutron DB, updated it.
if ovn_revision != n_obj['revision_number']:
res_map['ovn_update'](n_obj)
else:
# If the resource exist and the revision number
# is equal on both databases just bump the revision on
# the cache table.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
def _fix_delete(self, context, row):
res_map = self._resources_func_map[row.resource_type]
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
revision_numbers_db.delete_revision(
context, row.resource_uuid, row.resource_type)
else:
res_map['ovn_delete'](row.resource_uuid)
def _fix_create_update_subnet(self, context, row):
# Get the lasted version of the port in Neutron DB
sn_db_obj = self._ovn_client._plugin.get_subnet(
context, row.resource_uuid)
n_db_obj = self._ovn_client._plugin.get_network(
context, sn_db_obj['network_id'])
if row.revision_number == ovn_const.INITIAL_REV_NUM:
self._ovn_client.create_subnet(sn_db_obj, n_db_obj)
else:
self._ovn_client.update_subnet(sn_db_obj, n_db_obj)
# The migration will run just once per neutron-server instance. If the lock
# is held by some other neutron-server instance in the cloud, we'll attempt
# to perform the migration every 10 seconds until completed.
@periodics.periodic(spacing=10, run_immediately=True)
def migrate_to_port_groups(self):
"""Perform the migration from Address Sets to Port Groups. """
# TODO(dalvarez): Remove this in U cycle when we're sure that all
# versions are running using Port Groups (and OVS >= 2.10).
# If Port Groups are not supported or we've already migrated, we don't
# need to attempt to migrate again.
if (not self._nb_idl.is_port_groups_supported() or
not self._nb_idl.get_address_sets()):
raise periodics.NeverAgain()
# Only the worker holding a valid lock within OVSDB will perform the
# migration.
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
nb_sync = ovn_db_sync.OvnNbSynchronizer(
self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl,
None, None)
nb_sync.migrate_to_port_groups(admin_context)
raise periodics.NeverAgain()
def _log_maintenance_inconsistencies(self, create_update_inconsistencies,
delete_inconsistencies):
if not CONF.debug:
return
def _log(inconsistencies, type_):
if not inconsistencies:
return
c = {}
for f in inconsistencies:
if f.resource_type not in c:
c[f.resource_type] = 1
else:
c[f.resource_type] += 1
fail_str = ', '.join('{}={}'.format(k, v) for k, v in c.items())
LOG.debug('Maintenance task: Number of inconsistencies '
'found at %(type_)s: %(fail_str)s',
{'type_': type_, 'fail_str': fail_str})
_log(create_update_inconsistencies, INCONSISTENCY_TYPE_CREATE_UPDATE)
_log(delete_inconsistencies, INCONSISTENCY_TYPE_DELETE)
@periodics.periodic(spacing=DB_CONSISTENCY_CHECK_INTERVAL,
run_immediately=True)
def check_for_inconsistencies(self):
# Only the worker holding a valid lock within OVSDB will run
# this periodic
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
create_update_inconsistencies = (
revision_numbers_db.get_inconsistent_resources(admin_context))
delete_inconsistencies = (
revision_numbers_db.get_deleted_resources(admin_context))
if not any([create_update_inconsistencies, delete_inconsistencies]):
LOG.debug('Maintenance task: No inconsistencies found. Skipping')
return
LOG.debug('Maintenance task: Synchronizing Neutron '
'and OVN databases')
self._log_maintenance_inconsistencies(create_update_inconsistencies,
delete_inconsistencies)
self._sync_timer.restart()
dbg_log_msg = ('Maintenance task: Fixing resource %(res_uuid)s '
'(type: %(res_type)s) at %(type_)s')
# Fix the create/update resources inconsistencies
for row in create_update_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_CREATE_UPDATE})
try:
# NOTE(lucasagomes): The way to fix subnets is bit
# different than other resources. A subnet in OVN language
# is just a DHCP rule but, this rule only exist if the
# subnet in Neutron has the "enable_dhcp" attribute set
# to True. So, it's possible to have a consistent subnet
# resource even when it does not exist in the OVN database.
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._fix_create_update_subnet(admin_context, row)
else:
self._fix_create_update(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix resource '
'%(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
# Fix the deleted resources inconsistencies
for row in delete_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_DELETE})
try:
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._ovn_client.delete_subnet(row.resource_uuid)
else:
self._fix_delete(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix deleted '
'resource %(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
self._sync_timer.stop()
LOG.info('Maintenance task: Synchronization finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
def _create_lrouter_port(self, port):
admin_context = n_context.get_admin_context()
router_id = port['device_id']
self._ovn_client._l3_plugin.add_router_interface(
admin_context, router_id, {'port_id': port['id']}, may_exist=True)
def _check_subnet_global_dhcp_opts(self):
inconsistent_subnets = []
admin_context = n_context.get_admin_context()
subnet_filter = {'enable_dhcp': [True]}
neutron_subnets = self._ovn_client._plugin.get_subnets(
admin_context, subnet_filter)
global_v4_opts = ovn_conf.get_global_dhcpv4_opts()
global_v6_opts = ovn_conf.get_global_dhcpv6_opts()
LOG.debug('Checking %s subnets for global DHCP option consistency',
len(neutron_subnets))
for subnet in neutron_subnets:
ovn_dhcp_opts = self._nb_idl.get_subnet_dhcp_options(
subnet['id'])['subnet']
inconsistent_opts = []
if ovn_dhcp_opts:
if subnet['ip_version'] == n_const.IP_VERSION_4:
for opt, value in global_v4_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if subnet['ip_version'] == n_const.IP_VERSION_6:
for opt, value in global_v6_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if inconsistent_opts:
LOG.debug('Subnet %s has inconsistent DHCP opts: %s',
subnet['id'], inconsistent_opts)
inconsistent_subnets.append(subnet)
return inconsistent_subnets
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600,
run_immediately=True)
def check_global_dhcp_opts(self):
# This periodic task is included in DBInconsistenciesPeriodics since
# it uses the lock to ensure only one worker is executing
if not self.has_lock:
return
if (not ovn_conf.get_global_dhcpv4_opts() and
not ovn_conf.get_global_dhcpv6_opts()):
# No need to scan the subnets if the settings are unset.
raise periodics.NeverAgain()
LOG.debug('Maintenance task: Checking DHCP options on subnets')
self._sync_timer.restart()
fix_subnets = self._check_subnet_global_dhcp_opts()
if fix_subnets:
admin_context = n_context.get_admin_context()
LOG.debug('Triggering update for %s subnets', len(fix_subnets))
for subnet in fix_subnets:
neutron_net = self._ovn_client._plugin.get_network(
admin_context, subnet['network_id'])
try:
self._ovn_client.update_subnet(subnet, neutron_net)
except Exception:
LOG.exception('Failed to update subnet %s',
subnet['id'])
self._sync_timer.stop()
LOG.info('Maintenance task: DHCP options check finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=1800, run_immediately=True)
def check_metadata_ports(self):
# If OVN metadata is disabled do not run this task again
if not ovn_conf.is_ovn_metadata_enabled():
raise periodics.NeverAgain()
# Make sure that only one worker is executing this
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
for n in self._ovn_client._plugin.get_networks(admin_context):
self._ovn_client.create_metadata_port(admin_context, n)
raise periodics.NeverAgain()
# TODO(lucasagomes): Remove this in the U cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_port_security_unknown_address(self):
if not self.has_lock:
return
for port in self._nb_idl.lsp_list().execute(check_error=True):
if port.type == ovn_const.LSP_TYPE_LOCALNET:
continue
addresses = port.addresses
type_ = port.type.strip()
if not port.port_security:
if not type_ and ovn_const.UNKNOWN_ADDR not in addresses:
addresses.append(ovn_const.UNKNOWN_ADDR)
elif type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
else:
if type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
elif not type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
self._nb_idl.lsp_set_addresses(
port.name, addresses=addresses).execute(check_error=True)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_fragmentation_support(self):
if not self.has_lock:
return
context = n_context.get_admin_context()
for net in self._ovn_client._plugin.get_networks(
context, {external_net.EXTERNAL: [True]}):
self._ovn_client.set_gateway_mtu(context, net)
raise periodics.NeverAgain()
class HashRingHealthCheckPeriodics(object):
def __init__(self, group):
self._group = group
self.ctx = n_context.get_admin_context()
@periodics.periodic(spacing=ovn_const.HASH_RING_TOUCH_INTERVAL)
def touch_hash_ring_nodes(self):
# NOTE(lucasagomes): Note that we do not rely on the OVSDB lock
# here because we want the maintenance tasks from each instance to
# execute this task.
hash_ring_db.touch_nodes_from_host(self.ctx, self._group)

File diff suppressed because it is too large Load Diff

View File

@ -164,6 +164,7 @@ class FakeOvsdbSbOvnIdl(object):
self.get_gateway_chassis_from_cms_options = mock.Mock()
self.is_col_present = mock.Mock()
self.is_col_present.return_value = False
self.db_set = mock.Mock()
class FakeOvsdbTransaction(object):

View File

@ -0,0 +1,271 @@
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from futurist import periodics
from neutron_lib import context
from neutron.common.ovn import constants
from neutron.common.ovn import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_revision_numbers_db
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import maintenance
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync
from neutron.tests.unit.plugins.ml2 import test_security_group as test_sg
from neutron.tests.unit import testlib_api
@mock.patch.object(maintenance.DBInconsistenciesPeriodics,
'has_lock', mock.PropertyMock(return_value=True))
class TestDBInconsistenciesPeriodics(testlib_api.SqlTestCaseLight,
test_sg.Ml2SecurityGroupsTestCase):
def setUp(self):
super(TestDBInconsistenciesPeriodics, self).setUp()
self.net = self._make_network(
self.fmt, name='net1', admin_state_up=True)['network']
self.port = self._make_port(
self.fmt, self.net['id'], name='port1')['port']
self.fake_ovn_client = mock.Mock()
self.periodic = maintenance.DBInconsistenciesPeriodics(
self.fake_ovn_client)
self.ctx = context.get_admin_context()
@mock.patch.object(maintenance.DBInconsistenciesPeriodics,
'_fix_create_update')
@mock.patch.object(ovn_revision_numbers_db, 'get_inconsistent_resources')
def test_check_for_inconsistencies(self, mock_get_incon_res, mock_fix_net):
fake_row = mock.Mock(resource_type=constants.TYPE_NETWORKS)
mock_get_incon_res.return_value = [fake_row, ]
self.periodic.check_for_inconsistencies()
mock_fix_net.assert_called_once_with(mock.ANY, fake_row)
def _test_migrate_to_port_groups_helper(self, pg_supported, a_sets,
migration_expected, never_again):
self.fake_ovn_client._nb_idl.is_port_groups_supported.return_value = (
pg_supported)
self.fake_ovn_client._nb_idl.get_address_sets.return_value = a_sets
with mock.patch.object(ovn_db_sync.OvnNbSynchronizer,
'migrate_to_port_groups') as mtpg:
if never_again:
self.assertRaises(periodics.NeverAgain,
self.periodic.migrate_to_port_groups)
else:
self.periodic.migrate_to_port_groups()
if migration_expected:
mtpg.assert_called_once_with(mock.ANY)
else:
mtpg.assert_not_called()
def test_migrate_to_port_groups_port_groups_not_supported(self):
self._test_migrate_to_port_groups_helper(pg_supported=False,
a_sets=None,
migration_expected=False,
never_again=True)
def test_migrate_to_port_groups_not_needed(self):
self._test_migrate_to_port_groups_helper(pg_supported=True,
a_sets=None,
migration_expected=False,
never_again=True)
def test_migrate_to_port_groups(self):
# Check normal migration path: if port groups are supported by the
# schema and the migration has to be done, it will take place and
# won't be attempted in the future.
self._test_migrate_to_port_groups_helper(pg_supported=True,
a_sets=['as1', 'as2'],
migration_expected=True,
never_again=True)
def test_migrate_to_port_groups_no_lock(self):
with mock.patch.object(maintenance.DBInconsistenciesPeriodics,
'has_lock', mock.PropertyMock(
return_value=False)):
# Check that if this worker doesn't have the lock, it won't
# perform the migration and it will try again later.
self._test_migrate_to_port_groups_helper(pg_supported=True,
a_sets=['as1', 'as2'],
migration_expected=False,
never_again=False)
def _test_fix_create_update_network(self, ovn_rev, neutron_rev):
self.net['revision_number'] = neutron_rev
# Create an entry to the revision_numbers table and assert the
# initial revision_number for our test object is the expected
ovn_revision_numbers_db.create_initial_revision(
self.ctx, self.net['id'], constants.TYPE_NETWORKS,
revision_number=ovn_rev)
row = ovn_revision_numbers_db.get_revision_row(self.ctx,
self.net['id'])
self.assertEqual(ovn_rev, row.revision_number)
if ovn_rev < 0:
self.fake_ovn_client._nb_idl.get_lswitch.return_value = None
else:
fake_ls = mock.Mock(external_ids={
constants.OVN_REV_NUM_EXT_ID_KEY: ovn_rev})
self.fake_ovn_client._nb_idl.get_lswitch.return_value = fake_ls
self.fake_ovn_client._plugin.get_network.return_value = self.net
self.periodic._fix_create_update(self.ctx, row)
# Since the revision number was < 0, make sure create_network()
# is invoked with the latest version of the object in the neutron
# database
if ovn_rev < 0:
self.fake_ovn_client.create_network.assert_called_once_with(
self.net)
# If the revision number is > 0 it means that the object already
# exist and we just need to update to match the latest in the
# neutron database so, update_network() should be called.
else:
self.fake_ovn_client.update_network.assert_called_once_with(
self.net)
def test_fix_network_create(self):
self._test_fix_create_update_network(ovn_rev=-1, neutron_rev=2)
def test_fix_network_update(self):
self._test_fix_create_update_network(ovn_rev=5, neutron_rev=7)
def _test_fix_create_update_port(self, ovn_rev, neutron_rev):
self.port['revision_number'] = neutron_rev
# Create an entry to the revision_numbers table and assert the
# initial revision_number for our test object is the expected
ovn_revision_numbers_db.create_initial_revision(
self.ctx, self.port['id'], constants.TYPE_PORTS,
revision_number=ovn_rev)
row = ovn_revision_numbers_db.get_revision_row(self.ctx,
self.port['id'])
self.assertEqual(ovn_rev, row.revision_number)
if ovn_rev < 0:
self.fake_ovn_client._nb_idl.get_lswitch_port.return_value = None
else:
fake_lsp = mock.Mock(external_ids={
constants.OVN_REV_NUM_EXT_ID_KEY: ovn_rev})
self.fake_ovn_client._nb_idl.get_lswitch_port.return_value = (
fake_lsp)
self.fake_ovn_client._plugin.get_port.return_value = self.port
self.periodic._fix_create_update(self.ctx, row)
# Since the revision number was < 0, make sure create_port()
# is invoked with the latest version of the object in the neutron
# database
if ovn_rev < 0:
self.fake_ovn_client.create_port.assert_called_once_with(
self.port)
# If the revision number is > 0 it means that the object already
# exist and we just need to update to match the latest in the
# neutron database so, update_port() should be called.
else:
self.fake_ovn_client.update_port.assert_called_once_with(
self.port)
def test_fix_port_create(self):
self._test_fix_create_update_port(ovn_rev=-1, neutron_rev=2)
def test_fix_port_update(self):
self._test_fix_create_update_port(ovn_rev=5, neutron_rev=7)
@mock.patch.object(ovn_revision_numbers_db, 'bump_revision')
def _test_fix_security_group_create(self, mock_bump, revision_number):
sg_name = utils.ovn_addrset_name('fake_id', 'ip4')
sg = self._make_security_group(self.fmt, sg_name, '')['security_group']
ovn_revision_numbers_db.create_initial_revision(
self.ctx, sg['id'], constants.TYPE_SECURITY_GROUPS,
revision_number=revision_number)
row = ovn_revision_numbers_db.get_revision_row(self.ctx, sg['id'])
self.assertEqual(revision_number, row.revision_number)
if revision_number < 0:
self.fake_ovn_client._nb_idl.get_address_set.return_value = None
self.fake_ovn_client._nb_idl.get_port_group.return_value = None
else:
self.fake_ovn_client._nb_idl.get_address_set.return_value = (
mock.sentinel.AddressSet)
self.fake_ovn_client._plugin.get_security_group.return_value = sg
self.periodic._fix_create_update(self.ctx, row)
if revision_number < 0:
self.fake_ovn_client.create_security_group.assert_called_once_with(
sg)
else:
# If the object already exist let's make sure we just bump
# the revision number in the ovn_revision_numbers table
self.assertFalse(self.fake_ovn_client.create_security_group.called)
mock_bump.assert_called_once_with(
self.ctx, sg, constants.TYPE_SECURITY_GROUPS)
def test_fix_security_group_create_doesnt_exist(self):
self._test_fix_security_group_create(revision_number=-1)
def test_fix_security_group_create_version_mismatch(self):
self._test_fix_security_group_create(revision_number=2)
def test__create_lrouter_port(self):
port = {'id': 'port-id',
'device_id': 'router-id'}
self.periodic._create_lrouter_port(port)
l3_mock = self.periodic._ovn_client._l3_plugin
l3_mock.add_router_interface.assert_called_once_with(
mock.ANY, port['device_id'], {'port_id': port['id']},
may_exist=True)
@mock.patch.object(maintenance.LOG, 'debug')
def test__log_maintenance_inconsistencies(self, mock_log):
ovn_conf.cfg.CONF.set_override('debug', True)
# Create fake inconsistencies: 2 networks, 4 subnets and 8 ports
incst = []
incst += [mock.Mock(resource_type=constants.TYPE_NETWORKS)] * 2
incst += [mock.Mock(resource_type=constants.TYPE_SUBNETS)] * 4
incst += [mock.Mock(resource_type=constants.TYPE_PORTS)] * 8
# Create fake inconsistencies for delete: 3 routers and 6 router ports
incst_del = []
incst_del += [mock.Mock(resource_type=constants.TYPE_ROUTERS)] * 3
incst_del += [mock.Mock(resource_type=constants.TYPE_ROUTER_PORTS)] * 6
self.periodic._log_maintenance_inconsistencies(incst, incst_del)
# Assert LOG.debug was called twice
self.assertEqual(2, len(mock_log.call_args_list))
# Assert the log matches the number of inconsistencies
fail_str_create_update = mock_log.call_args_list[0][0][1]['fail_str']
self.assertIn('networks=2', fail_str_create_update)
self.assertIn('subnets=4', fail_str_create_update)
self.assertIn('ports=8', fail_str_create_update)
fail_str_delete = mock_log.call_args_list[1][0][1]['fail_str']
self.assertIn('routers=3', fail_str_delete)
self.assertIn('router_ports=6', fail_str_delete)
@mock.patch.object(maintenance.LOG, 'debug')
def test__log_maintenance_inconsistencies_debug_disabled(self, mock_log):
ovn_conf.cfg.CONF.set_override('debug', False)
incst = [mock.Mock(resource_type=constants.TYPE_NETWORKS)] * 2
self.periodic._log_maintenance_inconsistencies(incst, [])
self.assertFalse(mock_log.called)

View File

@ -0,0 +1,984 @@
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from neutron_lib import constants as const
from neutron.common.ovn import acl
from neutron.common.ovn import constants as ovn_const
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import impl_idl_ovn
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_client
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync
from neutron.services.ovn_l3 import plugin as ovn_plugin
from neutron.tests.unit.plugins.ml2.drivers.ovn.mech_driver import \
test_mech_driver
OvnPortInfo = collections.namedtuple('OvnPortInfo', ['name'])
@mock.patch.object(ovn_plugin.OVNL3RouterPlugin, '_sb_ovn', mock.Mock())
class TestOvnNbSyncML2(test_mech_driver.OVNMechanismDriverTestCase):
l3_plugin = 'ovn-router'
def setUp(self):
super(TestOvnNbSyncML2, self).setUp()
self.subnet = {'cidr': '10.0.0.0/24',
'id': 'subnet1',
'subnetpool_id': None,
'name': 'private-subnet',
'enable_dhcp': True,
'network_id': 'n1',
'tenant_id': 'tenant1',
'gateway_ip': '10.0.0.1',
'ip_version': 4,
'shared': False}
self.matches = ["", "", "", ""]
self.networks = [{'id': 'n1',
'mtu': 1450,
'provider:physical_network': 'physnet1',
'provider:segmentation_id': 1000},
{'id': 'n2',
'mtu': 1450},
{'id': 'n4',
'mtu': 1450,
'provider:physical_network': 'physnet2'}]
self.subnets = [{'id': 'n1-s1',
'network_id': 'n1',
'enable_dhcp': True,
'cidr': '10.0.0.0/24',
'tenant_id': 'tenant1',
'gateway_ip': '10.0.0.1',
'dns_nameservers': [],
'host_routes': [],
'ip_version': 4},
{'id': 'n1-s2',
'network_id': 'n1',
'enable_dhcp': True,
'cidr': 'fd79:e1c:a55::/64',
'tenant_id': 'tenant1',
'gateway_ip': 'fd79:e1c:a55::1',
'dns_nameservers': [],
'host_routes': [],
'ip_version': 6},
{'id': 'n2',
'network_id': 'n2',
'enable_dhcp': True,
'cidr': '20.0.0.0/24',
'tenant_id': 'tenant1',
'gateway_ip': '20.0.0.1',
'dns_nameservers': [],
'host_routes': [],
'ip_version': 4}]
self.security_groups = [
{'id': 'sg1', 'tenant_id': 'tenant1',
'security_group_rules': [{'remote_group_id': None,
'direction': 'ingress',
'remote_ip_prefix': '0.0.0.0/0',
'protocol': 'tcp',
'ethertype': 'IPv4',
'tenant_id': 'tenant1',
'port_range_max': 65535,
'port_range_min': 1,
'id': 'ruleid1',
'security_group_id': 'sg1'}],
'name': 'all-tcp'},
{'id': 'sg2', 'tenant_id': 'tenant1',
'security_group_rules': [{'remote_group_id': 'sg2',
'direction': 'egress',
'remote_ip_prefix': '0.0.0.0/0',
'protocol': 'tcp',
'ethertype': 'IPv4',
'tenant_id': 'tenant1',
'port_range_max': 65535,
'port_range_min': 1,
'id': 'ruleid1',
'security_group_id': 'sg2'}],
'name': 'all-tcpe'}]
self.port_groups_ovn = [mock.Mock(), mock.Mock(), mock.Mock()]
self.port_groups_ovn[0].configure_mock(
name='pg_sg1',
external_ids={ovn_const.OVN_SG_EXT_ID_KEY: 'sg1'},
ports=[],
acls=[])
self.port_groups_ovn[1].configure_mock(
name='pg_unknown_del',
external_ids={ovn_const.OVN_SG_EXT_ID_KEY: 'sg2'},
ports=[],
acls=[])
self.port_groups_ovn[2].configure_mock(
name='neutron_pg_drop',
external_ids=[],
ports=[],
acls=[])
self.ports = [
{'id': 'p1n1',
'device_owner': 'compute:None',
'fixed_ips':
[{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40',
'ip_address': '10.0.0.4'},
{'subnet_id': 'subnet1',
'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}],
'security_groups': ['sg1'],
'network_id': 'n1'},
{'id': 'p2n1',
'device_owner': 'compute:None',
'fixed_ips':
[{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40',
'ip_address': '10.0.0.4'},
{'subnet_id': 'subnet1',
'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}],
'security_groups': ['sg2'],
'network_id': 'n1',
'extra_dhcp_opts': [{'ip_version': 6,
'opt_name': 'domain-search',
'opt_value': 'foo-domain'}]},
{'id': 'p1n2',
'device_owner': 'compute:None',
'fixed_ips':
[{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40',
'ip_address': '10.0.0.4'},
{'subnet_id': 'subnet1',
'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}],
'security_groups': ['sg1'],
'network_id': 'n2',
'extra_dhcp_opts': [{'ip_version': 4,
'opt_name': 'tftp-server',
'opt_value': '20.0.0.20'},
{'ip_version': 4,
'opt_name': 'dns-server',
'opt_value': '8.8.8.8'},
{'ip_version': 6,
'opt_name': 'domain-search',
'opt_value': 'foo-domain'}]},
{'id': 'p2n2',
'device_owner': 'compute:None',
'fixed_ips':
[{'subnet_id': 'b142f5e3-d434-4740-8e88-75e8e5322a40',
'ip_address': '10.0.0.4'},
{'subnet_id': 'subnet1',
'ip_address': 'fd79:e1c:a55::816:eff:eff:ff2'}],
'security_groups': ['sg2'],
'network_id': 'n2'},
{'id': 'fp1',
'device_owner': 'network:floatingip',
'fixed_ips':
[{'subnet_id': 'ext-subnet',
'ip_address': '90.0.0.10'}],
'network_id': 'ext-net'}]
self.ports_ovn = [OvnPortInfo('p1n1'), OvnPortInfo('p1n2'),
OvnPortInfo('p2n1'), OvnPortInfo('p2n2'),
OvnPortInfo('p3n1'), OvnPortInfo('p3n3')]
self.acls_ovn = {
'lport1':
# ACLs need to be removed by the sync tool
[{'id': 'acl1', 'priority': 00, 'policy': 'allow',
'lswitch': 'lswitch1', 'lport': 'lport1'}],
'lport2':
[{'id': 'acl2', 'priority': 00, 'policy': 'drop',
'lswitch': 'lswitch2', 'lport': 'lport2'}],
# ACLs need to be kept as-is by the sync tool
'p2n2':
[{'lport': 'p2n2', 'direction': 'to-lport',
'log': False, 'lswitch': 'neutron-n2',
'priority': 1001, 'action': 'drop',
'external_ids': {'neutron:lport': 'p2n2'},
'match': 'outport == "p2n2" && ip'},
{'lport': 'p2n2', 'direction': 'to-lport',
'log': False, 'lswitch': 'neutron-n2',
'priority': 1002, 'action': 'allow',
'external_ids': {'neutron:lport': 'p2n2'},
'match': 'outport == "p2n2" && ip4 && '
'ip4.src == 10.0.0.0/24 && udp && '
'udp.src == 67 && udp.dst == 68'}]}
self.address_sets_ovn = {
'as_ip4_sg1': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY:
'all-tcp'},
'name': 'as_ip4_sg1',
'addresses': ['10.0.0.4']},
'as_ip4_sg2': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY:
'all-tcpe'},
'name': 'as_ip4_sg2',
'addresses': []},
'as_ip6_sg2': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY:
'all-tcpe'},
'name': 'as_ip6_sg2',
'addresses': ['fd79:e1c:a55::816:eff:eff:ff2',
'fd79:e1c:a55::816:eff:eff:ff3']},
'as_ip4_del': {'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY:
'all-delete'},
'name': 'as_ip4_delete',
'addresses': ['10.0.0.4']},
}
self.routers = [{'id': 'r1', 'routes': [{'nexthop': '20.0.0.100',
'destination': '11.0.0.0/24'}, {
'nexthop': '20.0.0.101',
'destination': '12.0.0.0/24'}],
'gw_port_id': 'gpr1',
'external_gateway_info': {
'network_id': "ext-net", 'enable_snat': True,
'external_fixed_ips': [
{'subnet_id': 'ext-subnet',
'ip_address': '90.0.0.2'}]}},
{'id': 'r2', 'routes': [{'nexthop': '40.0.0.100',
'destination': '30.0.0.0/24'}],
'gw_port_id': 'gpr2',
'external_gateway_info': {
'network_id': "ext-net", 'enable_snat': True,
'external_fixed_ips': [
{'subnet_id': 'ext-subnet',
'ip_address': '100.0.0.2'}]}},
{'id': 'r4', 'routes': []}]
self.get_sync_router_ports = [
{'fixed_ips': [{'subnet_id': 'subnet1',
'ip_address': '192.168.1.1'}],
'id': 'p1r1',
'device_id': 'r1',
'mac_address': 'fa:16:3e:d7:fd:5f'},
{'fixed_ips': [{'subnet_id': 'subnet2',
'ip_address': '192.168.2.1'}],
'id': 'p1r2',
'device_id': 'r2',
'mac_address': 'fa:16:3e:d6:8b:ce'},
{'fixed_ips': [{'subnet_id': 'subnet4',
'ip_address': '192.168.4.1'}],
'id': 'p1r4',
'device_id': 'r4',
'mac_address': 'fa:16:3e:12:34:56'}]
self.floating_ips = [{'id': 'fip1', 'router_id': 'r1',
'floating_ip_address': '90.0.0.10',
'fixed_ip_address': '172.16.0.10'},
{'id': 'fip2', 'router_id': 'r1',
'floating_ip_address': '90.0.0.12',
'fixed_ip_address': '172.16.2.12'},
{'id': 'fip3', 'router_id': 'r2',
'floating_ip_address': '100.0.0.10',
'fixed_ip_address': '192.168.2.10'},
{'id': 'fip4', 'router_id': 'r2',
'floating_ip_address': '100.0.0.11',
'fixed_ip_address': '192.168.2.11'}]
self.lrouters_with_rports = [{'name': 'r3',
'ports': {'p1r3': ['fake']},
'static_routes': [],
'snats': [],
'dnat_and_snats': []},
{'name': 'r4',
'ports': {'p1r4':
['fdad:123:456::1/64',
'fdad:789:abc::1/64']},
'static_routes': [],
'snats': [],
'dnat_and_snats': []},
{'name': 'r1',
'ports': {'p3r1': ['fake']},
'static_routes':
[{'nexthop': '20.0.0.100',
'destination': '11.0.0.0/24'},
{'nexthop': '20.0.0.100',
'destination': '10.0.0.0/24'}],
'snats':
[{'logical_ip': '172.16.0.0/24',
'external_ip': '90.0.0.2',
'type': 'snat'},
{'logical_ip': '172.16.1.0/24',
'external_ip': '90.0.0.2',
'type': 'snat'}],
'dnat_and_snats':
[{'logical_ip': '172.16.0.10',
'external_ip': '90.0.0.10',
'type': 'dnat_and_snat'},
{'logical_ip': '172.16.1.11',
'external_ip': '90.0.0.11',
'type': 'dnat_and_snat'},
{'logical_ip': '192.168.2.11',
'external_ip': '100.0.0.11',
'type': 'dnat_and_snat',
'external_mac': '01:02:03:04:05:06',
'logical_port': 'vm1'}]}]
self.lswitches_with_ports = [{'name': 'neutron-n1',
'ports': ['p1n1', 'p3n1'],
'provnet_port': None},
{'name': 'neutron-n3',
'ports': ['p1n3', 'p2n3'],
'provnet_port': None},
{'name': 'neutron-n4',
'ports': [],
'provnet_port': 'provnet-n4'}]
self.lrport_networks = ['fdad:123:456::1/64', 'fdad:cafe:a1b2::1/64']
def _fake_get_ovn_dhcp_options(self, subnet, network, server_mac=None):
if subnet['id'] == 'n1-s1':
return {'cidr': '10.0.0.0/24',
'options': {'server_id': '10.0.0.1',
'server_mac': '01:02:03:04:05:06',
'lease_time': str(12 * 60 * 60),
'mtu': '1450',
'router': '10.0.0.1'},
'external_ids': {'subnet_id': 'n1-s1'}}
return {'cidr': '', 'options': '', 'external_ids': {}}
def _fake_get_gw_info(self, ctx, router):
return {
'r1': [ovn_client.GW_INFO(router_ip='90.0.0.2',
gateway_ip='90.0.0.1',
network_id='', subnet_id='',
ip_version=4,
ip_prefix=const.IPv4_ANY)],
'r2': [ovn_client.GW_INFO(router_ip='100.0.0.2',
gateway_ip='100.0.0.1',
network_id='', subnet_id='',
ip_version=4,
ip_prefix=const.IPv4_ANY)]
}.get(router['id'], [])
def _fake_get_v4_network_of_all_router_ports(self, ctx, router_id):
return {'r1': ['172.16.0.0/24', '172.16.2.0/24'],
'r2': ['192.168.2.0/24']}.get(router_id, [])
def _test_mocks_helper(self, ovn_nb_synchronizer):
core_plugin = ovn_nb_synchronizer.core_plugin
ovn_api = ovn_nb_synchronizer.ovn_api
ovn_driver = ovn_nb_synchronizer.ovn_driver
l3_plugin = ovn_nb_synchronizer.l3_plugin
core_plugin.get_networks = mock.Mock()
core_plugin.get_networks.return_value = self.networks
core_plugin.get_subnets = mock.Mock()
core_plugin.get_subnets.return_value = self.subnets
# following block is used for acl syncing unit-test
# With the given set of values in the unit testing,
# 19 neutron acls should have been there,
# 4 acls are returned as current ovn acls,
# two of which will match with neutron.
# So, in this example 17 will be added, 2 removed
core_plugin.get_ports = mock.Mock()
core_plugin.get_ports.return_value = self.ports
mock.patch.object(acl, '_get_subnet_from_cache',
return_value=self.subnet).start()
mock.patch.object(acl, 'acl_remote_group_id',
side_effect=self.matches).start()
core_plugin.get_security_group = mock.MagicMock(
side_effect=self.security_groups)
ovn_nb_synchronizer.get_acls = mock.Mock()
ovn_nb_synchronizer.get_acls.return_value = self.acls_ovn
core_plugin.get_security_groups = mock.MagicMock(
return_value=self.security_groups)
ovn_nb_synchronizer.get_address_sets = mock.Mock()
ovn_nb_synchronizer.get_address_sets.return_value =\
self.address_sets_ovn
get_port_groups = mock.MagicMock()
get_port_groups.execute.return_value = self.port_groups_ovn
ovn_api.db_list_rows.return_value = get_port_groups
ovn_api.lsp_list.execute.return_value = self.ports_ovn
# end of acl-sync block
# The following block is used for router and router port syncing tests
# With the give set of values in the unit test,
# The Neutron db has Routers r1 and r2 present.
# The OVN db has Routers r1 and r3 present.
# During the sync r2 will need to be created and r3 will need
# to be deleted from the OVN db. When Router r3 is deleted, all LRouter
# ports associated with r3 is deleted too.
#
# Neutron db has Router ports p1r1 in Router r1 and p1r2 in Router r2
# OVN db has p1r3 in Router 3 and p3r1 in Router 1.
# During the sync p1r1 and p1r2 will be added and p1r3 and p3r1
# will be deleted from the OVN db
l3_plugin.get_routers = mock.Mock()
l3_plugin.get_routers.return_value = self.routers
l3_plugin._get_sync_interfaces = mock.Mock()
l3_plugin._get_sync_interfaces.return_value = (
self.get_sync_router_ports)
ovn_nb_synchronizer._ovn_client = mock.Mock()
ovn_nb_synchronizer._ovn_client.\
_get_nets_and_ipv6_ra_confs_for_router_port.return_value = (
self.lrport_networks, {})
ovn_nb_synchronizer._ovn_client._get_v4_network_of_all_router_ports. \
side_effect = self._fake_get_v4_network_of_all_router_ports
ovn_nb_synchronizer._ovn_client._get_gw_info = mock.Mock()
ovn_nb_synchronizer._ovn_client._get_gw_info.side_effect = (
self._fake_get_gw_info)
# end of router-sync block
l3_plugin.get_floatingips = mock.Mock()
l3_plugin.get_floatingips.return_value = self.floating_ips
ovn_api.get_all_logical_switches_with_ports = mock.Mock()
ovn_api.get_all_logical_switches_with_ports.return_value = (
self.lswitches_with_ports)
ovn_api.get_all_logical_routers_with_rports = mock.Mock()
ovn_api.get_all_logical_routers_with_rports.return_value = (
self.lrouters_with_rports)
ovn_api.transaction = mock.MagicMock()
ovn_nb_synchronizer._ovn_client.create_network = mock.Mock()
ovn_nb_synchronizer._ovn_client.create_port = mock.Mock()
ovn_driver.validate_and_get_data_from_binding_profile = mock.Mock()
ovn_nb_synchronizer._ovn_client.create_port = mock.Mock()
ovn_nb_synchronizer._ovn_client.create_port.return_value = mock.ANY
ovn_nb_synchronizer._ovn_client._create_provnet_port = mock.Mock()
ovn_api.ls_del = mock.Mock()
ovn_api.delete_lswitch_port = mock.Mock()
ovn_api.delete_lrouter = mock.Mock()
ovn_api.delete_lrouter_port = mock.Mock()
ovn_api.add_static_route = mock.Mock()
ovn_api.delete_static_route = mock.Mock()
ovn_api.get_all_dhcp_options.return_value = {
'subnets': {'n1-s1': {'cidr': '10.0.0.0/24',
'options':
{'server_id': '10.0.0.1',
'server_mac': '01:02:03:04:05:06',
'lease_time': str(12 * 60 * 60),
'mtu': '1450',
'router': '10.0.0.1'},
'external_ids': {'subnet_id': 'n1-s1'},
'uuid': 'UUID1'},
'n1-s3': {'cidr': '30.0.0.0/24',
'options':
{'server_id': '30.0.0.1',
'server_mac': '01:02:03:04:05:06',
'lease_time': str(12 * 60 * 60),
'mtu': '1450',
'router': '30.0.0.1'},
'external_ids': {'subnet_id': 'n1-s3'},
'uuid': 'UUID2'}},
'ports_v4': {'p1n2': {'cidr': '10.0.0.0/24',
'options': {'server_id': '10.0.0.1',
'server_mac':
'01:02:03:04:05:06',
'lease_time': '1000',
'mtu': '1400',
'router': '10.0.0.1'},
'external_ids': {'subnet_id': 'n1-s1',
'port_id': 'p1n2'},
'uuid': 'UUID3'},
'p5n2': {'cidr': '10.0.0.0/24',
'options': {'server_id': '10.0.0.1',
'server_mac':
'01:02:03:04:05:06',
'lease_time': '1000',
'mtu': '1400',
'router': '10.0.0.1'},
'external_ids': {'subnet_id': 'n1-s1',
'port_id': 'p5n2'},
'uuid': 'UUID4'}},
'ports_v6': {'p1n1': {'cidr': 'fd79:e1c:a55::/64',
'options': {'server_id': '01:02:03:04:05:06',
'mtu': '1450'},
'external_ids': {'subnet_id': 'fake',
'port_id': 'p1n1'},
'uuid': 'UUID5'},
'p1n2': {'cidr': 'fd79:e1c:a55::/64',
'options': {'server_id': '01:02:03:04:05:06',
'mtu': '1450'},
'external_ids': {'subnet_id': 'fake',
'port_id': 'p1n2'},
'uuid': 'UUID6'}}}
ovn_api.create_address_set = mock.Mock()
ovn_api.delete_address_set = mock.Mock()
ovn_api.update_address_set = mock.Mock()
ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options = mock.Mock()
ovn_nb_synchronizer._ovn_client._get_ovn_dhcp_options = mock.Mock()
ovn_nb_synchronizer._ovn_client._get_ovn_dhcp_options.side_effect = (
self._fake_get_ovn_dhcp_options)
ovn_api.delete_dhcp_options = mock.Mock()
ovn_nb_synchronizer._ovn_client.get_port_dns_records = mock.Mock()
ovn_nb_synchronizer._ovn_client.get_port_dns_records.return_value = {}
def _test_ovn_nb_sync_helper(self, ovn_nb_synchronizer,
networks, ports,
routers, router_ports,
create_router_list, create_router_port_list,
update_router_port_list,
del_router_list, del_router_port_list,
create_network_list, create_port_list,
create_provnet_port_list,
del_network_list, del_port_list,
add_static_route_list, del_static_route_list,
add_snat_list, del_snat_list,
add_floating_ip_list, del_floating_ip_list,
add_address_set_list, del_address_set_list,
update_address_set_list,
add_subnet_dhcp_options_list,
delete_dhcp_options_list,
add_port_groups_list,
del_port_groups_list,
port_groups_supported=False):
self._test_mocks_helper(ovn_nb_synchronizer)
core_plugin = ovn_nb_synchronizer.core_plugin
ovn_api = ovn_nb_synchronizer.ovn_api
ovn_api.is_port_groups_supported.return_value = port_groups_supported
mock.patch.object(impl_idl_ovn, 'get_connection').start()
ovn_nb_synchronizer.do_sync()
if not ovn_api.is_port_groups_supported():
get_security_group_calls = [mock.call(mock.ANY, sg['id'])
for sg in self.security_groups]
self.assertEqual(len(self.security_groups),
core_plugin.get_security_group.call_count)
core_plugin.get_security_group.assert_has_calls(
get_security_group_calls, any_order=True)
create_address_set_calls = [mock.call(**a)
for a in add_address_set_list]
self.assertEqual(
len(add_address_set_list),
ovn_api.create_address_set.call_count)
ovn_api.create_address_set.assert_has_calls(
create_address_set_calls, any_order=True)
del_address_set_calls = [mock.call(**d)
for d in del_address_set_list]
self.assertEqual(
len(del_address_set_list),
ovn_api.delete_address_set.call_count)
ovn_api.delete_address_set.assert_has_calls(
del_address_set_calls, any_order=True)
update_address_set_calls = [mock.call(**u)
for u in update_address_set_list]
self.assertEqual(
len(update_address_set_list),
ovn_api.update_address_set.call_count)
ovn_api.update_address_set.assert_has_calls(
update_address_set_calls, any_order=True)
create_port_groups_calls = [mock.call(**a)
for a in add_port_groups_list]
self.assertEqual(
len(add_port_groups_list),
ovn_api.pg_add.call_count)
ovn_api.pg_add.assert_has_calls(
create_port_groups_calls, any_order=True)
del_port_groups_calls = [mock.call(d)
for d in del_port_groups_list]
self.assertEqual(
len(del_port_groups_list),
ovn_api.pg_del.call_count)
ovn_api.pg_del.assert_has_calls(
del_port_groups_calls, any_order=True)
self.assertEqual(
len(create_network_list),
ovn_nb_synchronizer._ovn_client.create_network.call_count)
create_network_calls = [mock.call(net['net'])
for net in create_network_list]
ovn_nb_synchronizer._ovn_client.create_network.assert_has_calls(
create_network_calls, any_order=True)
self.assertEqual(
len(create_port_list),
ovn_nb_synchronizer._ovn_client.create_port.call_count)
create_port_calls = [mock.call(port) for port in create_port_list]
ovn_nb_synchronizer._ovn_client.create_port.assert_has_calls(
create_port_calls, any_order=True)
create_provnet_port_calls = [
mock.call(mock.ANY, mock.ANY,
network['provider:physical_network'],
network['provider:segmentation_id'])
for network in create_provnet_port_list]
self.assertEqual(
len(create_provnet_port_list),
ovn_nb_synchronizer._ovn_client._create_provnet_port.call_count)
ovn_nb_synchronizer._ovn_client._create_provnet_port.assert_has_calls(
create_provnet_port_calls, any_order=True)
self.assertEqual(len(del_network_list),
ovn_api.ls_del.call_count)
ls_del_calls = [mock.call(net_name)
for net_name in del_network_list]
ovn_api.ls_del.assert_has_calls(
ls_del_calls, any_order=True)
self.assertEqual(len(del_port_list),
ovn_api.delete_lswitch_port.call_count)
delete_lswitch_port_calls = [mock.call(lport_name=port['id'],
lswitch_name=port['lswitch'])
for port in del_port_list]
ovn_api.delete_lswitch_port.assert_has_calls(
delete_lswitch_port_calls, any_order=True)
add_route_calls = [mock.call(mock.ANY, ip_prefix=route['destination'],
nexthop=route['nexthop'])
for route in add_static_route_list]
ovn_api.add_static_route.assert_has_calls(add_route_calls,
any_order=True)
self.assertEqual(len(add_static_route_list),
ovn_api.add_static_route.call_count)
del_route_calls = [mock.call(mock.ANY, ip_prefix=route['destination'],
nexthop=route['nexthop'])
for route in del_static_route_list]
ovn_api.delete_static_route.assert_has_calls(del_route_calls,
any_order=True)
self.assertEqual(len(del_static_route_list),
ovn_api.delete_static_route.call_count)
add_nat_calls = [mock.call(mock.ANY, **nat) for nat in add_snat_list]
ovn_api.add_nat_rule_in_lrouter.assert_has_calls(add_nat_calls,
any_order=True)
self.assertEqual(len(add_snat_list),
ovn_api.add_nat_rule_in_lrouter.call_count)
add_fip_calls = [mock.call(nat, txn=mock.ANY)
for nat in add_floating_ip_list]
(ovn_nb_synchronizer._ovn_client._create_or_update_floatingip.
assert_has_calls(add_fip_calls))
self.assertEqual(
len(add_floating_ip_list),
ovn_nb_synchronizer._ovn_client._create_or_update_floatingip.
call_count)
del_nat_calls = [mock.call(mock.ANY, **nat) for nat in del_snat_list]
ovn_api.delete_nat_rule_in_lrouter.assert_has_calls(del_nat_calls,
any_order=True)
self.assertEqual(len(del_snat_list),
ovn_api.delete_nat_rule_in_lrouter.call_count)
del_fip_calls = [mock.call(nat, mock.ANY, txn=mock.ANY) for nat in
del_floating_ip_list]
ovn_nb_synchronizer._ovn_client._delete_floatingip.assert_has_calls(
del_fip_calls, any_order=True)
self.assertEqual(
len(del_floating_ip_list),
ovn_nb_synchronizer._ovn_client._delete_floatingip.call_count)
create_router_calls = [mock.call(r, add_external_gateway=False)
for r in create_router_list]
self.assertEqual(
len(create_router_list),
ovn_nb_synchronizer._ovn_client.create_router.call_count)
ovn_nb_synchronizer._ovn_client.create_router.assert_has_calls(
create_router_calls, any_order=True)
create_router_port_calls = [mock.call(p['device_id'],
mock.ANY)
for p in create_router_port_list]
self.assertEqual(
len(create_router_port_list),
ovn_nb_synchronizer._ovn_client._create_lrouter_port.call_count)
ovn_nb_synchronizer._ovn_client._create_lrouter_port.assert_has_calls(
create_router_port_calls,
any_order=True)
self.assertEqual(len(del_router_list),
ovn_api.delete_lrouter.call_count)
update_router_port_calls = [mock.call(p)
for p in update_router_port_list]
self.assertEqual(
len(update_router_port_list),
ovn_nb_synchronizer._ovn_client.update_router_port.call_count)
ovn_nb_synchronizer._ovn_client.update_router_port.assert_has_calls(
update_router_port_calls,
any_order=True)
delete_lrouter_calls = [mock.call(r['router'])
for r in del_router_list]
ovn_api.delete_lrouter.assert_has_calls(
delete_lrouter_calls, any_order=True)
self.assertEqual(
len(del_router_port_list),
ovn_api.delete_lrouter_port.call_count)
delete_lrouter_port_calls = [mock.call(port['id'],
port['router'], if_exists=False)
for port in del_router_port_list]
ovn_api.delete_lrouter_port.assert_has_calls(
delete_lrouter_port_calls, any_order=True)
self.assertEqual(
len(add_subnet_dhcp_options_list),
ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options.
call_count)
add_subnet_dhcp_options_calls = [
mock.call(subnet, net, mock.ANY)
for (subnet, net) in add_subnet_dhcp_options_list]
ovn_nb_synchronizer._ovn_client._add_subnet_dhcp_options. \
assert_has_calls(add_subnet_dhcp_options_calls, any_order=True)
self.assertEqual(ovn_api.delete_dhcp_options.call_count,
len(delete_dhcp_options_list))
delete_dhcp_options_calls = [
mock.call(dhcp_opt_uuid)
for dhcp_opt_uuid in delete_dhcp_options_list]
ovn_api.delete_dhcp_options.assert_has_calls(
delete_dhcp_options_calls, any_order=True)
def _test_ovn_nb_sync_mode_repair_helper(self, port_groups_supported=True):
create_network_list = [{'net': {'id': 'n2', 'mtu': 1450},
'ext_ids': {}}]
del_network_list = ['neutron-n3']
del_port_list = [{'id': 'p3n1', 'lswitch': 'neutron-n1'},
{'id': 'p1n1', 'lswitch': 'neutron-n1'}]
create_port_list = self.ports
for port in create_port_list:
if port['id'] in ['p1n1', 'fp1']:
# this will be skipped by the logic,
# because p1n1 is already in lswitch-port list
# and fp1 is a floating IP port
create_port_list.remove(port)
create_provnet_port_list = [{'id': 'n1', 'mtu': 1450,
'provider:physical_network': 'physnet1',
'provider:segmentation_id': 1000}]
create_router_list = [{
'id': 'r2', 'routes': [
{'nexthop': '40.0.0.100', 'destination': '30.0.0.0/24'}],
'gw_port_id': 'gpr2',
'external_gateway_info': {
'network_id': "ext-net", 'enable_snat': True,
'external_fixed_ips': [{
'subnet_id': 'ext-subnet',
'ip_address': '100.0.0.2'}]}}]
# Test adding and deleting routes snats fips behaviors for router r1
# existing in both neutron DB and OVN DB.
# Test adding behaviors for router r2 only existing in neutron DB.
# Static routes with destination 0.0.0.0/0 are default gateway routes
add_static_route_list = [{'nexthop': '20.0.0.101',
'destination': '12.0.0.0/24'},
{'nexthop': '90.0.0.1',
'destination': '0.0.0.0/0'},
{'nexthop': '40.0.0.100',
'destination': '30.0.0.0/24'},
{'nexthop': '100.0.0.1',
'destination': '0.0.0.0/0'}]
del_static_route_list = [{'nexthop': '20.0.0.100',
'destination': '10.0.0.0/24'}]
add_snat_list = [{'logical_ip': '172.16.2.0/24',
'external_ip': '90.0.0.2',
'type': 'snat'},
{'logical_ip': '192.168.2.0/24',
'external_ip': '100.0.0.2',
'type': 'snat'}]
del_snat_list = [{'logical_ip': '172.16.1.0/24',
'external_ip': '90.0.0.2',
'type': 'snat'}]
# fip 100.0.0.11 exists in OVN with distributed type and in Neutron
# with centralized type. This fip is used to test
# enable_distributed_floating_ip switch and migration
add_floating_ip_list = [{'id': 'fip2', 'router_id': 'r1',
'floating_ip_address': '90.0.0.12',
'fixed_ip_address': '172.16.2.12'},
{'id': 'fip3', 'router_id': 'r2',
'floating_ip_address': '100.0.0.10',
'fixed_ip_address': '192.168.2.10'},
{'id': 'fip4', 'router_id': 'r2',
'floating_ip_address': '100.0.0.11',
'fixed_ip_address': '192.168.2.11'}]
del_floating_ip_list = [{'logical_ip': '172.16.1.11',
'external_ip': '90.0.0.11',
'type': 'dnat_and_snat'},
{'logical_ip': '192.168.2.11',
'external_ip': '100.0.0.11',
'type': 'dnat_and_snat',
'external_mac': '01:02:03:04:05:06',
'logical_port': 'vm1'}]
del_router_list = [{'router': 'neutron-r3'}]
del_router_port_list = [{'id': 'lrp-p3r1', 'router': 'neutron-r1'}]
create_router_port_list = self.get_sync_router_ports[:2]
update_router_port_list = [self.get_sync_router_ports[2]]
update_router_port_list[0].update(
{'networks': self.lrport_networks})
if not port_groups_supported:
add_address_set_list = [
{'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'sg1'},
'name': 'as_ip6_sg1',
'addresses': ['fd79:e1c:a55::816:eff:eff:ff2']}]
del_address_set_list = [{'name': 'as_ip4_del'}]
update_address_set_list = [
{'addrs_remove': [],
'addrs_add': ['10.0.0.4'],
'name': 'as_ip4_sg2'},
{'addrs_remove': ['fd79:e1c:a55::816:eff:eff:ff3'],
'addrs_add': [],
'name': 'as_ip6_sg2'}]
# If Port Groups are not supported, we don't expect any of those
# to be created/deleted.
add_port_groups_list = []
del_port_groups_list = []
else:
add_port_groups_list = [
{'external_ids': {ovn_const.OVN_SG_EXT_ID_KEY: 'sg2'},
'name': 'pg_sg2',
'acls': []}]
del_port_groups_list = ['pg_unknown_del']
# If using Port Groups, no Address Set shall be created/updated
# and all the existing ones have to be removed.
add_address_set_list = []
update_address_set_list = []
del_address_set_list = [{'name': 'as_ip4_sg1'},
{'name': 'as_ip4_sg2'},
{'name': 'as_ip6_sg2'},
{'name': 'as_ip4_del'}]
add_subnet_dhcp_options_list = [(self.subnets[2], self.networks[1]),
(self.subnets[1], self.networks[0])]
delete_dhcp_options_list = ['UUID2', 'UUID4', 'UUID5']
ovn_nb_synchronizer = ovn_db_sync.OvnNbSynchronizer(
self.plugin, self.mech_driver._nb_ovn, self.mech_driver._sb_ovn,
'repair', self.mech_driver)
self._test_ovn_nb_sync_helper(ovn_nb_synchronizer,
self.networks,
self.ports,
self.routers,
self.get_sync_router_ports,
create_router_list,
create_router_port_list,
update_router_port_list,
del_router_list, del_router_port_list,
create_network_list, create_port_list,
create_provnet_port_list,
del_network_list, del_port_list,
add_static_route_list,
del_static_route_list,
add_snat_list,
del_snat_list,
add_floating_ip_list,
del_floating_ip_list,
add_address_set_list,
del_address_set_list,
update_address_set_list,
add_subnet_dhcp_options_list,
delete_dhcp_options_list,
add_port_groups_list,
del_port_groups_list,
port_groups_supported)
def test_ovn_nb_sync_mode_repair_no_pgs(self):
self._test_ovn_nb_sync_mode_repair_helper(port_groups_supported=False)
def test_ovn_nb_sync_mode_repair_pgs(self):
self._test_ovn_nb_sync_mode_repair_helper(port_groups_supported=True)
def _test_ovn_nb_sync_mode_log_helper(self, port_groups_supported=True):
create_network_list = []
create_port_list = []
create_provnet_port_list = []
del_network_list = []
del_port_list = []
create_router_list = []
create_router_port_list = []
update_router_port_list = []
del_router_list = []
del_router_port_list = []
add_static_route_list = []
del_static_route_list = []
add_snat_list = []
del_snat_list = []
add_floating_ip_list = []
del_floating_ip_list = []
add_address_set_list = []
del_address_set_list = []
update_address_set_list = []
add_subnet_dhcp_options_list = []
delete_dhcp_options_list = []
add_port_groups_list = []
del_port_groups_list = []
ovn_nb_synchronizer = ovn_db_sync.OvnNbSynchronizer(
self.plugin, self.mech_driver._nb_ovn, self.mech_driver._sb_ovn,
'log', self.mech_driver)
self._test_ovn_nb_sync_helper(ovn_nb_synchronizer,
self.networks,
self.ports,
self.routers,
self.get_sync_router_ports,
create_router_list,
create_router_port_list,
update_router_port_list,
del_router_list, del_router_port_list,
create_network_list, create_port_list,
create_provnet_port_list,
del_network_list, del_port_list,
add_static_route_list,
del_static_route_list,
add_snat_list,
del_snat_list,
add_floating_ip_list,
del_floating_ip_list,
add_address_set_list,
del_address_set_list,
update_address_set_list,
add_subnet_dhcp_options_list,
delete_dhcp_options_list,
add_port_groups_list,
del_port_groups_list,
port_groups_supported)
def test_ovn_nb_sync_mode_log_pgs(self):
self._test_ovn_nb_sync_mode_log_helper(port_groups_supported=True)
def test_ovn_nb_sync_mode_log_no_pgs(self):
self._test_ovn_nb_sync_mode_log_helper(port_groups_supported=False)
class TestOvnSbSyncML2(test_mech_driver.OVNMechanismDriverTestCase):
def test_ovn_sb_sync(self):
ovn_sb_synchronizer = ovn_db_sync.OvnSbSynchronizer(
self.plugin,
self.mech_driver._sb_ovn,
self.mech_driver)
ovn_api = ovn_sb_synchronizer.ovn_api
hostname_with_physnets = {'hostname1': ['physnet1', 'physnet2'],
'hostname2': ['physnet1']}
ovn_api.get_chassis_hostname_and_physnets.return_value = (
hostname_with_physnets)
ovn_driver = ovn_sb_synchronizer.ovn_driver
ovn_driver.update_segment_host_mapping = mock.Mock()
hosts_in_neutron = {'hostname2', 'hostname3'}
with mock.patch.object(ovn_db_sync.segments_db,
'get_hosts_mapped_with_segments',
return_value=hosts_in_neutron):
ovn_sb_synchronizer.sync_hostname_and_physical_networks(mock.ANY)
all_hosts = set(hostname_with_physnets.keys()) | hosts_in_neutron
self.assertEqual(
len(all_hosts),
ovn_driver.update_segment_host_mapping.call_count)
update_segment_host_mapping_calls = [mock.call(
host, hostname_with_physnets[host])
for host in hostname_with_physnets]
update_segment_host_mapping_calls += [
mock.call(host, []) for host in
hosts_in_neutron - set(hostname_with_physnets.keys())]
ovn_driver.update_segment_host_mapping.assert_has_calls(
update_segment_host_mapping_calls, any_order=True)

File diff suppressed because it is too large Load Diff

View File

@ -95,6 +95,7 @@ neutron.ml2.mechanism_drivers =
openvswitch = neutron.plugins.ml2.drivers.openvswitch.mech_driver.mech_openvswitch:OpenvswitchMechanismDriver
l2population = neutron.plugins.ml2.drivers.l2pop.mech_driver:L2populationMechanismDriver
sriovnicswitch = neutron.plugins.ml2.drivers.mech_sriov.mech_driver.mech_driver:SriovNicSwitchMechanismDriver
ovn = neutron.plugins.ml2.drivers.ovn.mech_driver.mech_driver:OVNMechanismDriver
fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriver
fake_agent_l3 = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:FakeAgentMechanismDriverL3
another_fake_agent = neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent:AnotherFakeAgentMechanismDriver