Delete segment RPs when network is deleted
When a network is deleted, only one Placement call per segment is done to remove the associated (if existing) resource provider. Before this patch, each time a subnet was deleted, the segment resource provider was updated. When no subnets were present in the related segment, the associated resource provider was deleted. This optimization improves the network deletion time (see Launchpad bug). E.g.: a network with two segments and ten subnets, the Neutron server processing time dropped from 8.2 seconds to 4.4 seconds (note that the poor performance was due to the modest testing environment). Along with the segment RP optimization during the network deletion, this patch also skips the router subnet update. Because all subnets in the network are going to be deleted, there is no need to update them during the network deletion process. Change-Id: Ifd50027911a9ca3508e80e0de9a6cc45b67006cf Closes-Bug: #1878916
This commit is contained in:
parent
cb55643a06
commit
7f40e626d6
neutron
db
plugins/ml2
services/segments
tests/unit/extensions
@ -500,8 +500,10 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
||||
# cleanup if a network-owned port snuck in without failing
|
||||
for subnet in subnets:
|
||||
self._delete_subnet(context, subnet)
|
||||
# TODO(ralonsoh): use payloads
|
||||
registry.notify(resources.SUBNET, events.AFTER_DELETE,
|
||||
self, context=context, subnet=subnet.to_dict())
|
||||
self, context=context, subnet=subnet.to_dict(),
|
||||
for_net_delete=True)
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
network_db = self._get_network(context, id)
|
||||
network = self._make_network_dict(network_db, context=context)
|
||||
|
@ -33,6 +33,7 @@ from neutron.db import models_v2
|
||||
from neutron.objects import base as objects_base
|
||||
from neutron.objects import ports as port_obj
|
||||
from neutron.plugins.ml2 import models
|
||||
from neutron.services.segments import db as seg_db
|
||||
from neutron.services.segments import exceptions as seg_exc
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
@ -316,7 +317,7 @@ def is_dhcp_active_on_any_subnet(context, subnet_ids):
|
||||
def _prevent_segment_delete_with_port_bound(resource, event, trigger,
|
||||
payload=None):
|
||||
"""Raise exception if there are any ports bound with segment_id."""
|
||||
if payload.metadata.get('for_net_delete'):
|
||||
if payload.metadata.get(seg_db.FOR_NET_DELETE):
|
||||
# don't check for network deletes
|
||||
return
|
||||
|
||||
|
@ -38,6 +38,7 @@ from neutron.services.segments import exceptions
|
||||
|
||||
|
||||
_USER_CONFIGURED_SEGMENT_PLUGIN = None
|
||||
FOR_NET_DELETE = 'for_net_delete'
|
||||
|
||||
|
||||
def check_user_configured_segment_plugin():
|
||||
@ -189,7 +190,7 @@ class SegmentDbMixin(object):
|
||||
self.delete_segment,
|
||||
payload=events.DBEventPayload(
|
||||
context, metadata={
|
||||
'for_net_delete': for_net_delete},
|
||||
FOR_NET_DELETE: for_net_delete},
|
||||
states=(segment_dict,),
|
||||
resource_id=uuid))
|
||||
|
||||
|
@ -121,7 +121,7 @@ class Plugin(db.SegmentDbMixin, segment.SegmentPluginBase):
|
||||
def _prevent_segment_delete_with_subnet_associated(
|
||||
self, resource, event, trigger, payload=None):
|
||||
"""Raise exception if there are any subnets associated with segment."""
|
||||
if payload.metadata.get('for_net_delete'):
|
||||
if payload.metadata.get(db.FOR_NET_DELETE):
|
||||
# don't check if this is a part of a network delete operation
|
||||
return
|
||||
segment_id = payload.resource_id
|
||||
@ -343,6 +343,9 @@ class NovaSegmentNotifier(object):
|
||||
@registry.receives(resources.SUBNET, [events.AFTER_DELETE])
|
||||
def _notify_subnet_deleted(self, resource, event, trigger, context,
|
||||
subnet, **kwargs):
|
||||
if kwargs.get(db.FOR_NET_DELETE):
|
||||
return # skip segment RP update if it is going to be deleted
|
||||
|
||||
segment_id = subnet.get('segment_id')
|
||||
if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4:
|
||||
return
|
||||
@ -359,23 +362,32 @@ class NovaSegmentNotifier(object):
|
||||
self._delete_nova_inventory, segment_id))
|
||||
|
||||
def _get_aggregate_id(self, segment_id):
|
||||
aggregate_uuid = self.p_client.list_aggregates(
|
||||
segment_id)['aggregates'][0]
|
||||
aggregates = self.n_client.aggregates.list()
|
||||
for aggregate in aggregates:
|
||||
try:
|
||||
aggregate_uuid = self.p_client.list_aggregates(
|
||||
segment_id)['aggregates'][0]
|
||||
except placement_exc.PlacementAggregateNotFound:
|
||||
LOG.info('Segment %s resource provider aggregate not found',
|
||||
segment_id)
|
||||
return
|
||||
|
||||
for aggregate in self.n_client.aggregates.list():
|
||||
nc_aggregate_uuid = self._get_nova_aggregate_uuid(aggregate)
|
||||
if nc_aggregate_uuid == aggregate_uuid:
|
||||
return aggregate.id
|
||||
|
||||
def _delete_nova_inventory(self, event):
|
||||
aggregate_id = self._get_aggregate_id(event.segment_id)
|
||||
aggregate = self.n_client.aggregates.get_details(
|
||||
aggregate_id)
|
||||
for host in aggregate.hosts:
|
||||
self.n_client.aggregates.remove_host(aggregate_id,
|
||||
host)
|
||||
self.n_client.aggregates.delete(aggregate_id)
|
||||
self.p_client.delete_resource_provider(event.segment_id)
|
||||
if aggregate_id:
|
||||
aggregate = self.n_client.aggregates.get_details(aggregate_id)
|
||||
for host in aggregate.hosts:
|
||||
self.n_client.aggregates.remove_host(aggregate_id, host)
|
||||
self.n_client.aggregates.delete(aggregate_id)
|
||||
|
||||
try:
|
||||
self.p_client.delete_resource_provider(event.segment_id)
|
||||
except placement_exc.PlacementClientError as exc:
|
||||
LOG.info('Segment %s resource provider not found; error: %s',
|
||||
event.segment_id, str(exc))
|
||||
|
||||
@registry.receives(resources.SEGMENT_HOST_MAPPING, [events.AFTER_CREATE])
|
||||
def _notify_host_addition_to_aggregate(self, resource, event, trigger,
|
||||
@ -390,9 +402,8 @@ class NovaSegmentNotifier(object):
|
||||
|
||||
def _add_host_to_aggregate(self, event):
|
||||
for segment_id in event.segment_ids:
|
||||
try:
|
||||
aggregate_id = self._get_aggregate_id(segment_id)
|
||||
except placement_exc.PlacementAggregateNotFound:
|
||||
aggregate_id = self._get_aggregate_id(segment_id)
|
||||
if not aggregate_id:
|
||||
LOG.info('When adding host %(host)s, aggregate not found '
|
||||
'for routed network segment %(segment_id)s',
|
||||
{'host': event.host, 'segment_id': segment_id})
|
||||
@ -472,6 +483,13 @@ class NovaSegmentNotifier(object):
|
||||
ipv4_subnet_ids.append(ip['subnet_id'])
|
||||
return ipv4_subnet_ids
|
||||
|
||||
@registry.receives(resources.SEGMENT, [events.AFTER_DELETE])
|
||||
def _notify_segment_deleted(
|
||||
self, resource, event, trigger, payload=None):
|
||||
if payload:
|
||||
self.batch_notifier.queue_event(Event(
|
||||
self._delete_nova_inventory, payload.resource_id))
|
||||
|
||||
|
||||
@registry.has_registry_receivers
|
||||
class SegmentHostRoutes(object):
|
||||
@ -650,6 +668,9 @@ class SegmentHostRoutes(object):
|
||||
subnet, **kwargs):
|
||||
# If this is a routed network, remove any routes to this subnet on
|
||||
# this networks remaining subnets.
|
||||
if kwargs.get(db.FOR_NET_DELETE):
|
||||
return # skip subnet update if the network is going to be deleted
|
||||
|
||||
if subnet.get('segment_id'):
|
||||
self._update_routed_network_host_routes(
|
||||
context, subnet['network_id'], deleted_cidr=subnet['cidr'])
|
||||
|
@ -2478,6 +2478,33 @@ class TestNovaSegmentNotifier(SegmentAwareIpamTestCase):
|
||||
self.segments_plugin.nova_updater._send_notifications([event])
|
||||
self.assertTrue(log.called)
|
||||
|
||||
def _test_create_network_and_segment(self, phys_net):
|
||||
with self.network() as net:
|
||||
network = net['network']
|
||||
segment = self._test_create_segment(
|
||||
network_id=network['id'], physical_network=phys_net,
|
||||
segmentation_id=200, network_type='vlan')
|
||||
return network, segment['segment']
|
||||
|
||||
def test_delete_network_and_owned_segments(self):
|
||||
db.subscribe()
|
||||
aggregate = mock.MagicMock()
|
||||
aggregate.uuid = uuidutils.generate_uuid()
|
||||
aggregate.id = 1
|
||||
aggregate.hosts = ['fakehost1']
|
||||
self.mock_p_client.list_aggregates.return_value = {
|
||||
'aggregates': [aggregate.uuid]}
|
||||
self.mock_n_client.aggregates.list.return_value = [aggregate]
|
||||
self.mock_n_client.aggregates.get_details.return_value = aggregate
|
||||
network, segment = self._test_create_network_and_segment('physnet')
|
||||
self._delete('networks', network['id'])
|
||||
self.mock_n_client.aggregates.remove_host.assert_has_calls(
|
||||
[mock.call(aggregate.id, 'fakehost1')])
|
||||
self.mock_n_client.aggregates.delete.assert_has_calls(
|
||||
[mock.call(aggregate.id)])
|
||||
self.mock_p_client.delete_resource_provider.assert_has_calls(
|
||||
[mock.call(segment['id'])])
|
||||
|
||||
|
||||
class TestDhcpAgentSegmentScheduling(HostSegmentMappingTestCase):
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user