From f564dcad4d8c072767ae235353a982653b156c76 Mon Sep 17 00:00:00 2001 From: Hong Hui Xiao Date: Sun, 31 Jul 2016 09:01:33 +0800 Subject: [PATCH] Enable create and delete segments in ML2 [1] has been merged for Basic CRUD for segments. This patch will enable create and delete segments in ML2. This patch will do following things: a) When creating a segment, reserve the segment in ML2. b) When deleting a segment, release the segment in ML2. c) Prevent deleting segment if it is in use. d) Add segment_index. [1] https://review.openstack.org/#/c/296603 Change-Id: Ie8beeccf2294f1af8baa758eba230da2de1fff28 Partially-Implements: blueprint routed-networks --- neutron/plugins/ml2/db.py | 33 ++++- neutron/plugins/ml2/managers.py | 37 ++++- neutron/plugins/ml2/plugin.py | 59 +++++++- neutron/services/segments/db.py | 62 +++++++- neutron/services/segments/exceptions.py | 4 + neutron/services/segments/plugin.py | 22 +++ neutron/tests/unit/extensions/test_segment.py | 111 +++++++++++++-- .../plugins/ml2/drivers/mechanism_test.py | 1 - neutron/tests/unit/plugins/ml2/test_plugin.py | 132 ++++++++++++++++++ .../scheduler/test_dhcp_agent_scheduler.py | 14 +- 10 files changed, 442 insertions(+), 33 deletions(-) diff --git a/neutron/plugins/ml2/db.py b/neutron/plugins/ml2/db.py index 39b17a35574..cd47d577dce 100644 --- a/neutron/plugins/ml2/db.py +++ b/neutron/plugins/ml2/db.py @@ -22,13 +22,17 @@ import six from sqlalchemy import or_ from sqlalchemy.orm import exc -from neutron._i18n import _LE +from neutron._i18n import _, _LE +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron.db.models import securitygroup as sg_models from neutron.db import models_v2 from neutron.db import segments_db from neutron.extensions import portbindings from neutron import manager from neutron.plugins.ml2 import models +from neutron.services.segments import exceptions as seg_exc LOG = log.getLogger(__name__) @@ -305,3 +309,30 @@ def is_dhcp_active_on_any_subnet(context, subnet_ids): return bool(context.session.query(models_v2.Subnet). enable_eagerloads(False).filter_by(enable_dhcp=True). filter(models_v2.Subnet.id.in_(subnet_ids)).count()) + + +def _prevent_segment_delete_with_port_bound(resource, event, trigger, + context, segment): + """Raise exception if there are any ports bound with segment_id.""" + segment_id = segment['id'] + query = context.session.query(models_v2.Port) + query = query.join( + models.PortBindingLevel, + models.PortBindingLevel.port_id == models_v2.Port.id) + query = query.filter(models.PortBindingLevel.segment_id == segment_id) + port_ids = [p.id for p in query] + + # There are still some ports in the segment, segment should not be deleted + # TODO(xiaohhui): Should we delete the dhcp port automatically here? + if port_ids: + reason = _("The segment is still bound with port(s) " + "%s") % ", ".join(port_ids) + raise seg_exc.SegmentInUse(segment_id=segment_id, reason=reason) + + +def subscribe(): + registry.subscribe(_prevent_segment_delete_with_port_bound, + resources.SEGMENT, + events.BEFORE_DELETE) + +subscribe() diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index 8a0527a833a..60489212f3c 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -208,6 +208,26 @@ class TypeManager(stevedore.named.NamedExtensionManager): segment = self._allocate_tenant_net_segment(session) self._add_network_segment(context, network_id, segment) + def reserve_network_segment(self, session, segment_data): + """Call type drivers to reserve a network segment.""" + # Validate the data of segment + if not validators.is_attr_set(segment_data[api.NETWORK_TYPE]): + msg = _("network_type required") + raise exc.InvalidInput(error_message=msg) + + net_type = self._get_attribute(segment_data, api.NETWORK_TYPE) + phys_net = self._get_attribute(segment_data, api.PHYSICAL_NETWORK) + seg_id = self._get_attribute(segment_data, api.SEGMENTATION_ID) + segment = {api.NETWORK_TYPE: net_type, + api.PHYSICAL_NETWORK: phys_net, + api.SEGMENTATION_ID: seg_id} + + self.validate_provider_segment(segment) + + # Reserve segment in type driver + with session.begin(subtransactions=True): + return self.reserve_provider_segment(session, segment) + def is_partial_segment(self, segment): network_type = segment[api.NETWORK_TYPE] driver = self.drivers.get(network_type) @@ -254,13 +274,16 @@ class TypeManager(stevedore.named.NamedExtensionManager): filter_dynamic=None) for segment in segments: - network_type = segment.get(api.NETWORK_TYPE) - driver = self.drivers.get(network_type) - if driver: - driver.obj.release_segment(session, segment) - else: - LOG.error(_LE("Failed to release segment '%s' because " - "network type is not supported."), segment) + self.release_network_segment(session, segment) + + def release_network_segment(self, session, segment): + network_type = segment.get(api.NETWORK_TYPE) + driver = self.drivers.get(network_type) + if driver: + driver.obj.release_segment(session, segment) + else: + LOG.error(_LE("Failed to release segment '%s' because " + "network type is not supported."), segment) def allocate_dynamic_segment(self, context, network_id, segment): """Allocate a dynamic segment using a partial or full segment dict.""" diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 1c7a32ea32d..0832f5b8a24 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -84,6 +84,7 @@ from neutron.plugins.ml2 import models from neutron.plugins.ml2 import rpc from neutron.quota import resource_registry from neutron.services.qos import qos_consts +from neutron.services.segments import plugin as segments_plugin LOG = log.getLogger(__name__) @@ -163,6 +164,14 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self.mechanism_manager.initialize() registry.subscribe(self._port_provisioned, resources.PORT, provisioning_blocks.PROVISIONING_COMPLETE) + registry.subscribe(self._handle_segment_change, resources.SEGMENT, + events.PRECOMMIT_CREATE) + registry.subscribe(self._handle_segment_change, resources.SEGMENT, + events.PRECOMMIT_DELETE) + registry.subscribe(self._handle_segment_change, resources.SEGMENT, + events.AFTER_CREATE) + registry.subscribe(self._handle_segment_change, resources.SEGMENT, + events.AFTER_DELETE) self._setup_dhcp() self._start_rpc_notifiers() self.add_agent_status_check_worker(self.agent_health_check) @@ -927,7 +936,11 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self.mechanism_manager.delete_network_precommit( mech_context) - self.type_manager.release_network_segments(session, id) + registry.notify(resources.NETWORK, + events.PRECOMMIT_DELETE, + self, + context=context, + network_id=id) record = self._get_network(context, id) LOG.debug("Deleting network record %s", record) session.delete(record) @@ -1797,3 +1810,47 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, if mech_driver.obj.check_segment_for_agent(segment, agent): return True return False + + def _handle_segment_change(self, rtype, event, trigger, context, segment): + if (event == events.PRECOMMIT_CREATE and + not isinstance(trigger, segments_plugin.Plugin)): + # TODO(xiaohhui): Now, when create network, ml2 will reserve + # segment and trigger this event handler. This event handler + # will reserve segment again, which will lead to error as the + # segment has already been reserved. This check could be removed + # by unifying segment creation procedure. + return + + session = context.session + network_id = segment.get('network_id') + + if event == events.PRECOMMIT_CREATE: + updated_segment = self.type_manager.reserve_network_segment( + session, segment) + # The segmentation id might be from ML2 type driver, update it + # in the original segment. + segment[api.SEGMENTATION_ID] = updated_segment[api.SEGMENTATION_ID] + elif event == events.PRECOMMIT_DELETE: + self.type_manager.release_network_segment(session, segment) + + try: + self._notify_mechanism_driver_for_segment_change( + event, context, network_id) + except ml2_exc.MechanismDriverError: + with excutils.save_and_reraise_exception(): + LOG.error(_LE("mechanism_manager error occurred when " + "handle event %(event)s for segment " + "'%(segment)s'"), + {'event': event, 'segment': segment['id']}) + + def _notify_mechanism_driver_for_segment_change(self, event, + context, network_id): + network_with_segments = self.get_network(context, network_id) + mech_context = driver_context.NetworkContext( + self, context, network_with_segments, + original_network=network_with_segments) + if (event == events.PRECOMMIT_CREATE or + event == events.PRECOMMIT_DELETE): + self.mechanism_manager.update_network_precommit(mech_context) + elif event == events.AFTER_CREATE or event == events.AFTER_DELETE: + self.mechanism_manager.update_network_postcommit(mech_context) diff --git a/neutron/services/segments/db.py b/neutron/services/segments/db.py index 4a37b63324f..33a9b894d0f 100644 --- a/neutron/services/segments/db.py +++ b/neutron/services/segments/db.py @@ -69,7 +69,8 @@ class SegmentDbMixin(common_db_mixin.CommonDbMixin): db.NETWORK_TYPE: segment_db[db.NETWORK_TYPE], db.SEGMENTATION_ID: segment_db[db.SEGMENTATION_ID], 'hosts': [mapping.host for mapping in - segment_db.segment_host_mapping]} + segment_db.segment_host_mapping], + 'segment_index': segment_db['segment_index']} return self._fields(res, fields) def _get_segment(self, context, segment_id): @@ -98,15 +99,33 @@ class SegmentDbMixin(common_db_mixin.CommonDbMixin): db.PHYSICAL_NETWORK: physical_network, db.NETWORK_TYPE: network_type, db.SEGMENTATION_ID: segmentation_id} + # Calculate the index of segment + segment_index = 0 + segments = self.get_segments( + context, + filters={'network_id': [network_id]}, + fields=['segment_index'], + sorts=[('segment_index', True)]) + if segments: + # NOTE(xiaohhui): The new index is the last index + 1, this + # may casue discontinuous segment_index. But segment_index + # can functionally work as the order index for segments. + segment_index = (segments[-1].get('segment_index') + 1) + args['segment_index'] = segment_index + new_segment = db.NetworkSegment(**args) try: context.session.add(new_segment) context.session.flush([new_segment]) except db_exc.DBReferenceError: raise n_exc.NetworkNotFound(net_id=network_id) + # Do some preliminary operations before commiting the segment to db registry.notify(resources.SEGMENT, events.PRECOMMIT_CREATE, self, context=context, segment=new_segment) + registry.notify(resources.SEGMENT, events.AFTER_CREATE, self, + context=context, segment=new_segment) + return self._make_segment_dict(new_segment) @log_helpers.log_method_call @@ -156,11 +175,26 @@ class SegmentDbMixin(common_db_mixin.CommonDbMixin): @log_helpers.log_method_call def delete_segment(self, context, uuid): """Delete an existing segment.""" + segment = self.get_segment(context, uuid) + # Do some preliminary operations before deleting the segment + registry.notify(resources.SEGMENT, events.BEFORE_DELETE, + self.delete_segment, context=context, + segment=segment) + + # Delete segment in DB with context.session.begin(subtransactions=True): query = self._model_query(context, db.NetworkSegment) query = query.filter(db.NetworkSegment.id == uuid) if 0 == query.delete(): raise exceptions.SegmentNotFound(segment_id=uuid) + # Do some preliminary operations before deleting segment in db + registry.notify(resources.SEGMENT, events.PRECOMMIT_DELETE, + self.delete_segment, context=context, + segment=segment) + + registry.notify(resources.SEGMENT, events.AFTER_DELETE, + self.delete_segment, context=context, + segment=segment) def update_segment_host_mapping(context, host, current_segment_ids): @@ -199,6 +233,10 @@ def _get_phys_nets(agent): reported_hosts = set() +# NOTE: Module level variable of segments plugin. It should be removed once +# segments becomes a default plugin. +segments_plugin = None + def get_segments_with_phys_nets(context, phys_nets): """Get segments from physical networks. @@ -245,6 +283,12 @@ def _update_segment_host_mapping_for_agent(resource, event, trigger, def _add_segment_host_mapping_for_segment(resource, event, trigger, context, segment): + if not context.session.is_active: + # The session might be in partial rollback state, due to errors in + # peer callback. In that case, there is no need to add the mapping. + # Just return here. + return + if not segment.physical_network: return cp = manager.NeutronManager.get_plugin() @@ -259,6 +303,19 @@ def _add_segment_host_mapping_for_segment(resource, event, trigger, map_segment_to_hosts(context, segment.id, hosts) +def _delete_segments_for_network(resource, event, trigger, + context, network_id): + admin_ctx = context.elevated() + global segments_plugin + if not segments_plugin: + segments_plugin = manager.NeutronManager.load_class_for_provider( + 'neutron.service_plugins', 'segments')() + segments = segments_plugin.get_segments( + admin_ctx, filters={'network_id': [network_id]}) + for segment in segments: + segments_plugin.delete_segment(admin_ctx, segment['id']) + + def subscribe(): registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, @@ -268,5 +325,8 @@ def subscribe(): events.AFTER_UPDATE) registry.subscribe(_add_segment_host_mapping_for_segment, resources.SEGMENT, events.PRECOMMIT_CREATE) + registry.subscribe(_delete_segments_for_network, + resources.NETWORK, + events.PRECOMMIT_DELETE) subscribe() diff --git a/neutron/services/segments/exceptions.py b/neutron/services/segments/exceptions.py index 72c2b965aa5..ef7c13bb41e 100644 --- a/neutron/services/segments/exceptions.py +++ b/neutron/services/segments/exceptions.py @@ -54,3 +54,7 @@ class HostNotCompatibleWithFixedIps(exceptions.Conflict): message = _("Host %(host)s is not connected to a segment where the " "existing fixed_ips on port %(port_id)s will function given " "the routed network topology.") + + +class SegmentInUse(exceptions.InUse): + message = _("Segment '%(segment_id)s' cannot be deleted: %(reason)s.") diff --git a/neutron/services/segments/plugin.py b/neutron/services/segments/plugin.py index 7dcc77c12f6..26df22b451a 100644 --- a/neutron/services/segments/plugin.py +++ b/neutron/services/segments/plugin.py @@ -16,7 +16,11 @@ from sqlalchemy.orm import session +from neutron._i18n import _ from neutron.api.v2 import attributes +from neutron.callbacks import events +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron.db import common_db_mixin from neutron.db import models_v2 from neutron.extensions import ip_allocation @@ -24,6 +28,7 @@ from neutron.extensions import l2_adjacency from neutron.extensions import segment from neutron import manager from neutron.services.segments import db +from neutron.services.segments import exceptions def _extend_network_dict_binding(plugin, network_res, network_db): @@ -71,8 +76,25 @@ class Plugin(db.SegmentDbMixin, segment.SegmentPluginBase): common_db_mixin.CommonDbMixin.register_dict_extend_funcs( attributes.PORTS, [_extend_port_dict_binding]) + registry.subscribe( + self._prevent_segment_delete_with_subnet_associated, + resources.SEGMENT, + events.BEFORE_DELETE) + @classmethod def get_instance(cls): if cls._instance is None: cls._instance = cls() return cls._instance + + def _prevent_segment_delete_with_subnet_associated( + self, resource, event, trigger, context, segment): + """Raise exception if there are any subnets associated with segment.""" + segment_id = segment['id'] + query = context.session.query(models_v2.Subnet.id) + query = query.filter(models_v2.Subnet.segment_id == segment_id) + subnet_ids = [s[0] for s in query] + if subnet_ids: + reason = _("The segment is still associated with subnet(s) " + "%s") % ", ".join(subnet_ids) + raise exceptions.SegmentInUse(segment_id=segment_id, reason=reason) diff --git a/neutron/tests/unit/extensions/test_segment.py b/neutron/tests/unit/extensions/test_segment.py index 93b14215340..fc358e19241 100644 --- a/neutron/tests/unit/extensions/test_segment.py +++ b/neutron/tests/unit/extensions/test_segment.py @@ -21,6 +21,7 @@ import webob.exc from neutron.api.v2 import attributes from neutron.callbacks import events +from neutron.callbacks import exceptions from neutron.callbacks import registry from neutron.callbacks import resources from neutron import context @@ -175,24 +176,82 @@ class TestSegment(SegmentTestCase): expected=expected_segment) def test_create_segment_no_segmentation_id(self): + + def _mock_reserve_segmentation_id(rtype, event, trigger, + context, segment): + if not segment.get('segmentation_id'): + segment['segmentation_id'] = 200 + with self.network() as network: network = network['network'] + + registry.subscribe(_mock_reserve_segmentation_id, resources.SEGMENT, + events.PRECOMMIT_CREATE) expected_segment = {'network_id': network['id'], 'physical_network': 'phys_net', 'network_type': 'net_type', - 'segmentation_id': None} + 'segmentation_id': 200} self._test_create_segment(network_id=network['id'], physical_network='phys_net', expected=expected_segment) + def test_create_segment_with_exception_in_core_plugin(self): + cxt = context.get_admin_context() + with self.network() as network: + network = network['network'] + + with mock.patch.object(registry, 'notify') as notify: + notify.side_effect = exceptions.CallbackFailure(errors=Exception) + self.assertRaises(webob.exc.HTTPClientError, + self.segment, + network_id=network['id'], + segmentation_id=200) + + network_segments = segments_db.get_network_segments(cxt.session, + network['id']) + self.assertEqual([], network_segments) + + def test_create_segments_in_certain_order(self): + cxt = context.get_admin_context() + with self.network() as network: + network = network['network'] + segment1 = self.segment( + network_id=network['id'], segmentation_id=200) + segment2 = self.segment( + network_id=network['id'], segmentation_id=201) + segment3 = self.segment( + network_id=network['id'], segmentation_id=202) + network_segments = segments_db.get_network_segments(cxt.session, + network['id']) + self.assertEqual(segment1['segment']['id'], + network_segments[0]['id']) + self.assertEqual(segment2['segment']['id'], + network_segments[1]['id']) + self.assertEqual(segment3['segment']['id'], + network_segments[2]['id']) + def test_delete_segment(self): with self.network() as network: network = network['network'] - segment = self.segment(network_id=network['id']) + self.segment(network_id=network['id'], segmentation_id=200) + segment = self.segment(network_id=network['id'], segmentation_id=201) self._delete('segments', segment['segment']['id']) self._show('segments', segment['segment']['id'], expected_code=webob.exc.HTTPNotFound.code) + def test_delete_segment_failed_with_subnet_associated(self): + with self.network() as network: + net = network['network'] + + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) + segment_id = segment['segment']['id'] + with self.subnet(network=network, segment_id=segment_id): + self._delete('segments', segment_id, + expected_code=webob.exc.HTTPConflict.code) + exist_segment = self._show('segments', segment_id) + self.assertEqual(segment_id, exist_segment['segment']['id']) + def test_get_segment(self): with self.network() as network: network = network['network'] @@ -211,10 +270,19 @@ class TestSegment(SegmentTestCase): segmentation_id=200) self._test_create_segment(network_id=network['id'], physical_network='phys_net2', - segmentation_id=200) + segmentation_id=201) res = self._list('segments') self.assertEqual(2, len(res['segments'])) + def test_update_segments(self): + with self.network() as network: + net = network['network'] + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) + segment['segment']['segmentation_id'] = '201' + self._update('segments', segment['segment']['id'], segment, + expected_code=webob.exc.HTTPClientError.code) + class TestSegmentML2(SegmentTestCase): def setUp(self): @@ -236,7 +304,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase): with self.network() as network: net = network['network'] - segment = self._test_create_segment(network_id=net['id']) + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) segment_id = segment['segment']['id'] with self.subnet(network=network, segment_id=segment_id) as subnet: @@ -253,7 +322,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase): with self.network() as network2: net = network1['network'] - segment = self._test_create_segment(network_id=net['id']) + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) res = self._create_subnet(self.fmt, net_id=network2['network']['id'], @@ -282,7 +352,8 @@ class TestSegmentSubnetAssociation(SegmentTestCase): with self.subnet(network=network): net = network['network'] - segment = self._test_create_segment(network_id=net['id']) + segment = self._test_create_segment(network_id=net['id'], + segmentation_id=200) res = self._create_subnet(self.fmt, net_id=net['id'], @@ -322,6 +393,9 @@ class HostSegmentMappingTestCase(SegmentTestCase): config.cfg.CONF.set_override('mechanism_drivers', self._mechanism_drivers, group='ml2') + config.cfg.CONF.set_override('network_vlan_ranges', + ['phys_net1', 'phys_net2'], + group='ml2_type_vlan') if not plugin: plugin = 'ml2' super(HostSegmentMappingTestCase, self).setUp(plugin=plugin) @@ -511,7 +585,7 @@ class TestMl2HostSegmentMappingOVS(HostSegmentMappingTestCase): network = network['network'] segment2 = self._test_create_segment( network_id=network['id'], physical_network=physical_network, - segmentation_id=200, network_type=p_constants.TYPE_VLAN)['segment'] + segmentation_id=201, network_type=p_constants.TYPE_VLAN)['segment'] segments_host_db = self._get_segments_for_host(host1) self.assertEqual(set((segment['id'], segment2['id'])), set(segments_host_db)) @@ -663,7 +737,8 @@ class TestSegmentAwareIpam(SegmentTestCase): segment = self._test_create_segment( network_id=network['network']['id'], - physical_network=physnet) + physical_network=physnet, + network_type=p_constants.TYPE_VLAN) ip_version = netaddr.IPNetwork(cidr).version if cidr else None with self.subnet(network=network, @@ -730,7 +805,8 @@ class TestSegmentAwareIpam(SegmentTestCase): with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], - physical_network='physnet') + physical_network='physnet', + network_type=p_constants.TYPE_VLAN) # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) @@ -753,7 +829,8 @@ class TestSegmentAwareIpam(SegmentTestCase): cidr='2001:db8:0:0::/64') as subnet: segment = self._test_create_segment( network_id=network['network']['id'], - physical_network='physnet') + physical_network='physnet', + network_type=p_constants.TYPE_VLAN) self._validate_l2_adjacency(network['network']['id'], is_adjacent=True) @@ -823,7 +900,8 @@ class TestSegmentAwareIpam(SegmentTestCase): with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], - physical_network='physnet') + physical_network='physnet', + network_type=p_constants.TYPE_VLAN) # Create a port with no IP address (since there is no subnet) port = self._create_deferred_ip_port(network) @@ -864,7 +942,8 @@ class TestSegmentAwareIpam(SegmentTestCase): with self.network() as network: segment = self._test_create_segment( network_id=network['network']['id'], - physical_network='physnet') + physical_network='physnet', + network_type=p_constants.TYPE_VLAN) # Map the host to the segment self._setup_host_mappings([(segment['segment']['id'], 'fakehost')]) @@ -1155,6 +1234,10 @@ class TestSegmentAwareIpam(SegmentTestCase): class TestSegmentAwareIpamML2(TestSegmentAwareIpam): def setUp(self): + config.cfg.CONF.set_override('network_vlan_ranges', + ['physnet:200:209', 'physnet0:200:209', + 'physnet1:200:209', 'physnet2:200:209'], + group='ml2_type_vlan') super(TestSegmentAwareIpamML2, self).setUp(plugin='ml2') @@ -1174,7 +1257,7 @@ class TestDhcpAgentSegmentScheduling(HostSegmentMappingTestCase): segment = self._test_create_segment(network_id=network['id'], physical_network=phys_net, segmentation_id=200, - network_type='vxlan') + network_type='vlan') dhcp_agents = self.dhcp_agent_db.get_dhcp_agents_hosting_networks( self.ctx, [network['id']]) self.assertEqual(0, len(dhcp_agents)) @@ -1230,7 +1313,7 @@ class TestDhcpAgentSegmentScheduling(HostSegmentMappingTestCase): segment2 = self._test_create_segment(network_id=network['id'], physical_network=phys_net2, segmentation_id=200, - network_type='vxlan')['segment'] + network_type='vlan')['segment'] self._register_agent(DHCP_HOSTA, mappings={phys_net1: 'br-eth-1'}, plugin=self.plugin) diff --git a/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py b/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py index 1ec73073430..e7c33caf0c3 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mechanism_test.py @@ -34,7 +34,6 @@ class TestMechanismDriver(api.MechanismDriver): assert(context.current['id'] == context.original['id']) else: assert(not context.original) - assert(context.network_segments) def create_network_precommit(self, context): self._check_network_context(context, False) diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index 91b063f6a6d..061bb051840 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -30,6 +30,7 @@ from sqlalchemy.orm import exc as sqla_exc from neutron._i18n import _ from neutron.callbacks import events +from neutron.callbacks import exceptions as c_exc from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import utils @@ -58,6 +59,8 @@ from neutron.plugins.ml2 import managers from neutron.plugins.ml2 import models from neutron.plugins.ml2 import plugin as ml2_plugin from neutron.services.qos import qos_consts +from neutron.services.segments import db as segments_plugin_db +from neutron.services.segments import plugin as segments_plugin from neutron.tests import base from neutron.tests.common import helpers from neutron.tests.unit import _test_extension_portbindings as test_bindings @@ -1840,6 +1843,7 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase): with mock.patch.object(type_vlan.VlanTypeDriver, 'release_segment') as rs: + segments_plugin_db.subscribe() req = self.new_delete_request('networks', network_id) res = req.get_response(self.api) self.assertEqual(2, rs.call_count) @@ -2474,3 +2478,131 @@ class TestTransactionGuard(Ml2PluginV2TestCase): with ctx.session.begin(subtransactions=True): with testtools.ExpectedException(RuntimeError): plugin.delete_subnet(ctx, 'id') + + +class TestML2Segments(Ml2PluginV2TestCase): + + def _reserve_segment(self, network, seg_id=None): + segment = {'id': 'fake_id', + 'network_id': network['network']['id'], + 'tenant_id': network['network']['tenant_id'], + driver_api.NETWORK_TYPE: 'vlan', + driver_api.PHYSICAL_NETWORK: self.physnet} + if seg_id: + segment[driver_api.SEGMENTATION_ID] = seg_id + + self.driver._handle_segment_change( + mock.ANY, events.PRECOMMIT_CREATE, segments_plugin.Plugin(), + self.context, segment) + + if seg_id: + # Assert it is not changed + self.assertEqual(seg_id, segment[driver_api.SEGMENTATION_ID]) + else: + self.assertTrue(segment[driver_api.SEGMENTATION_ID] > 0) + + return segment + + def test_reserve_segment_success_with_partial_segment(self): + with self.network() as network: + self._reserve_segment(network) + + def test_reserve_segment_fail_with_duplicate_param(self): + with self.network() as network: + self._reserve_segment(network, 10) + + self.assertRaises( + exc.VlanIdInUse, self._reserve_segment, network, 10) + + def test_reserve_segment_update_network_mtu(self): + with self.network() as network: + network_id = network['network']['id'] + with mock.patch.object( + self.driver, '_get_network_mtu') as mtu: + mtu.return_value = 100 + self._reserve_segment(network) + updated_network = self.driver.get_network(self.context, + network_id) + self.assertEqual(100, updated_network[driver_api.MTU]) + + mtu.return_value = 200 + self._reserve_segment(network) + updated_network = self.driver.get_network(self.context, + network_id) + self.assertEqual(200, updated_network[driver_api.MTU]) + + def _test_nofity_mechanism_manager(self, event): + seg1 = {driver_api.NETWORK_TYPE: 'vlan', + driver_api.PHYSICAL_NETWORK: self.physnet, + driver_api.SEGMENTATION_ID: 1000} + seg2 = {driver_api.NETWORK_TYPE: 'vlan', + driver_api.PHYSICAL_NETWORK: self.physnet, + driver_api.SEGMENTATION_ID: 1001} + seg3 = {driver_api.NETWORK_TYPE: 'vlan', + driver_api.PHYSICAL_NETWORK: self.physnet, + driver_api.SEGMENTATION_ID: 1002} + with self.network() as network: + network = network['network'] + + for stale_seg in segments_db.get_network_segments(self.context.session, + network['id']): + segments_db.delete_network_segment(self.context.session, + stale_seg['id']) + + for seg in [seg1, seg2, seg3]: + seg['network_id'] = network['id'] + segments_db.add_network_segment(self.context, network['id'], seg) + + self.net_context = None + + def record_network_context(net_context): + self.net_context = net_context + + with mock.patch.object(managers.MechanismManager, + 'update_network_precommit', + side_effect=record_network_context): + self.driver._handle_segment_change( + mock.ANY, event, segments_plugin.Plugin(), self.context, seg1) + # Make sure the mechanism manager can get the right amount of + # segments of network + self.assertEqual(3, len(self.net_context.current[mpnet.SEGMENTS])) + + def test_reserve_segment_nofity_mechanism_manager(self): + self._test_nofity_mechanism_manager(events.PRECOMMIT_CREATE) + + def test_release_segment(self): + with self.network() as network: + segment = self._reserve_segment(network, 10) + segment['network_id'] = network['network']['id'] + self.driver._handle_segment_change( + mock.ANY, events.PRECOMMIT_DELETE, mock.ANY, + self.context, segment) + # Check that the segment_id is not reserved + segment = self._reserve_segment( + network, segment[driver_api.SEGMENTATION_ID]) + + def test_release_segment_nofity_mechanism_manager(self): + self._test_nofity_mechanism_manager(events.PRECOMMIT_DELETE) + + def test_prevent_delete_segment_with_tenant_port(self): + fake_owner_compute = constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake' + ml2_db.subscribe() + plugin = manager.NeutronManager.get_plugin() + with self.port(device_owner=fake_owner_compute) as port: + binding = ml2_db.get_locked_port_and_binding(self.context.session, + port['port']['id'])[1] + binding['host'] = 'host-ovs-no_filter' + mech_context = driver_context.PortContext( + plugin, self.context, port['port'], + plugin.get_network(self.context, port['port']['network_id']), + binding, None) + plugin._bind_port_if_needed(mech_context) + segment = segments_db.get_network_segments( + self.context.session, port['port']['network_id'])[0] + segment['network_id'] = port['port']['network_id'] + self.assertRaises(c_exc.CallbackFailure, registry.notify, + resources.SEGMENT, events.BEFORE_DELETE, + mock.ANY, + context=self.context, segment=segment) + exist_port = self._show('ports', port['port']['id']) + self.assertEqual(port['port']['id'], exist_port['port']['id']) diff --git a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py index 2dfbe9bd492..f309f9e208d 100644 --- a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py @@ -298,8 +298,7 @@ class TestAutoScheduleSegments(test_plugin.Ml2PluginV2TestCase, def setUp(self): super(TestAutoScheduleSegments, self).setUp() - self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.' - 'Ml2Plugin') + self.plugin = self.driver self.segments_plugin = importutils.import_object( 'neutron.services.segments.plugin.Plugin') self.ctx = context.get_admin_context() @@ -317,8 +316,8 @@ class TestAutoScheduleSegments(test_plugin.Ml2PluginV2TestCase, seg = self.segments_plugin.create_segment( self.ctx, {'segment': {'network_id': network_id, - 'physical_network': constants.ATTR_NOT_SPECIFIED, - 'network_type': 'meh', + 'physical_network': 'physnet1', + 'network_type': 'vlan', 'segmentation_id': constants.ATTR_NOT_SPECIFIED}}) return seg['id'] @@ -506,8 +505,7 @@ class DHCPAgentWeightSchedulerTestCase(test_plugin.Ml2PluginV2TestCase): weight_scheduler = ( 'neutron.scheduler.dhcp_agent_scheduler.WeightScheduler') cfg.CONF.set_override('network_scheduler_driver', weight_scheduler) - self.plugin = importutils.import_object('neutron.plugins.ml2.plugin.' - 'Ml2Plugin') + self.plugin = self.driver mock.patch.object( self.plugin, 'filter_hosts_with_network_access', side_effect=lambda context, network_id, hosts: hosts).start() @@ -531,8 +529,8 @@ class DHCPAgentWeightSchedulerTestCase(test_plugin.Ml2PluginV2TestCase): seg = self.segments_plugin.create_segment( self.ctx, {'segment': {'network_id': network_id, - 'physical_network': constants.ATTR_NOT_SPECIFIED, - 'network_type': 'meh', + 'physical_network': 'physnet1', + 'network_type': 'vlan', 'segmentation_id': constants.ATTR_NOT_SPECIFIED}}) return seg['id']