Browse Source

Bulk up port context retrieval

With the switch to subquery relationships, individual get_port calls
can get expensive with large numbers of ports
(100ms per port in my dev environment). This patch bulks up the
retrieval of the port contexts so one set of queries covers all
of the devices in an RPC call.

Partial-Bug: #1665215
Change-Id: I63757e143b23c24c349be98dc5a09115b8709a25
tags/11.0.0.0b2
Kevin Benton 2 years ago
parent
commit
529da4e583

+ 47
- 1
neutron/plugins/ml2/plugin.py View File

@@ -893,7 +893,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
893 893
                                                   filters=net_filters)
894 894
         }
895 895
         segments_by_netid = segments_db.get_networks_segments(
896
-            context, nets_by_netid.keys())
896
+            context, list(nets_by_netid.keys()))
897 897
         netctxs_by_netid = {
898 898
             net_id: driver_context.NetworkContext(
899 899
                 self, context, nets_by_netid[net_id],
@@ -1539,6 +1539,52 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
1539 1539
 
1540 1540
         return self._bind_port_if_needed(port_context)
1541 1541
 
1542
+    @utils.transaction_guard
1543
+    @db_api.retry_if_session_inactive(context_var_name='plugin_context')
1544
+    def get_bound_ports_contexts(self, plugin_context, dev_ids, host=None):
1545
+        result = {}
1546
+        with db_api.context_manager.reader.using(plugin_context):
1547
+            dev_to_full_pids = db.partial_port_ids_to_full_ids(
1548
+                plugin_context, dev_ids)
1549
+            # get all port objects for IDs
1550
+            port_dbs_by_id = db.get_port_db_objects(
1551
+                plugin_context, dev_to_full_pids.values())
1552
+            # get all networks for PortContext construction
1553
+            netctxs_by_netid = self.get_network_contexts(
1554
+                plugin_context,
1555
+                {p.network_id for p in port_dbs_by_id.values()})
1556
+            for dev_id in dev_ids:
1557
+                port_id = dev_to_full_pids.get(dev_id)
1558
+                port_db = port_dbs_by_id.get(port_id)
1559
+                if (not port_id or not port_db or
1560
+                        port_db.network_id not in netctxs_by_netid):
1561
+                    result[dev_id] = None
1562
+                    continue
1563
+                port = self._make_port_dict(port_db)
1564
+                if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
1565
+                    binding = db.get_distributed_port_binding_by_host(
1566
+                        plugin_context, port['id'], host)
1567
+                    bindlevelhost_match = host
1568
+                else:
1569
+                    binding = port_db.port_binding
1570
+                    bindlevelhost_match = binding.host if binding else None
1571
+                if not binding:
1572
+                    LOG.info(_LI("Binding info for port %s was not found, "
1573
+                                 "it might have been deleted already."),
1574
+                             port_id)
1575
+                    result[dev_id] = None
1576
+                    continue
1577
+                levels = [l for l in port_db.binding_levels
1578
+                          if l.host == bindlevelhost_match]
1579
+                levels = sorted(levels, key=lambda l: l.level)
1580
+                network_ctx = netctxs_by_netid.get(port_db.network_id)
1581
+                port_context = driver_context.PortContext(
1582
+                    self, plugin_context, port, network_ctx, binding, levels)
1583
+                result[dev_id] = port_context
1584
+
1585
+        return {d: self._bind_port_if_needed(pctx) if pctx else None
1586
+                for d, pctx in result.items()}
1587
+
1542 1588
     @utils.transaction_guard
1543 1589
     @db_api.retry_if_session_inactive()
1544 1590
     def update_port_status(self, context, port_id, status, host=None,

+ 31
- 8
neutron/plugins/ml2/rpc.py View File

@@ -82,13 +82,22 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
82 82
                       {'device': device, 'agent_id': agent_id})
83 83
             return {'device': device}
84 84
 
85
-        segment = port_context.bottom_bound_segment
86 85
         port = port_context.current
87 86
         # caching information about networks for future use
88 87
         if cached_networks is not None:
89 88
             if port['network_id'] not in cached_networks:
90 89
                 cached_networks[port['network_id']] = (
91 90
                     port_context.network.current)
91
+        return self._get_device_details(rpc_context, agent_id=agent_id,
92
+                                        host=host, device=device,
93
+                                        port_context=port_context)
94
+
95
+    def _get_device_details(self, rpc_context, agent_id, host, device,
96
+                            port_context):
97
+        segment = port_context.bottom_bound_segment
98
+        port = port_context.current
99
+        plugin = directory.get_plugin()
100
+        port_id = port_context.current['id']
92 101
 
93 102
         if not segment:
94 103
             LOG.warning(_LW("Device %(device)s requested by agent "
@@ -148,17 +157,31 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin):
148 157
                                                     **kwargs):
149 158
         devices = []
150 159
         failed_devices = []
151
-        cached_networks = {}
152
-        for device in kwargs.pop('devices', []):
160
+        devices_to_fetch = kwargs.pop('devices', [])
161
+        plugin = directory.get_plugin()
162
+        host = kwargs.get('host')
163
+        bound_contexts = plugin.get_bound_ports_contexts(rpc_context,
164
+                                                         devices_to_fetch,
165
+                                                         host)
166
+        for device in devices_to_fetch:
167
+            if not bound_contexts.get(device):
168
+                # unbound bound
169
+                LOG.debug("Device %(device)s requested by agent "
170
+                          "%(agent_id)s not found in database",
171
+                          {'device': device,
172
+                           'agent_id': kwargs.get('agent_id')})
173
+                devices.append({'device': device})
174
+                continue
153 175
             try:
154
-                devices.append(self.get_device_details(
176
+                devices.append(self._get_device_details(
155 177
                                rpc_context,
178
+                               agent_id=kwargs.get('agent_id'),
179
+                               host=host,
156 180
                                device=device,
157
-                               cached_networks=cached_networks,
158
-                               **kwargs))
181
+                               port_context=bound_contexts[device]))
159 182
             except Exception:
160
-                LOG.error(_LE("Failed to get details for device %s"),
161
-                          device)
183
+                LOG.exception(_LE("Failed to get details for device %s"),
184
+                              device)
162 185
                 failed_devices.append(device)
163 186
 
164 187
         return {'devices': devices,

+ 0
- 9
neutron/tests/unit/plugins/ml2/test_port_binding.py View File

@@ -132,15 +132,6 @@ class PortBindingTestCase(test_plugin.NeutronDbPluginV2TestCase):
132 132
                                                cached_networks=cached_networks)
133 133
             self.assertFalse(self.plugin.get_network.called)
134 134
 
135
-    def test_get_bound_port_context_cache_miss(self):
136
-        ctx = context.get_admin_context()
137
-        with self.port(name='name') as port:
138
-            some_network = {'id': u'2ac23560-7638-44e2-9875-c1888b02af72'}
139
-            self.plugin.get_network = mock.Mock(return_value=some_network)
140
-            self.plugin.get_bound_port_context(ctx, port['port']['id'],
141
-                                               cached_networks={})
142
-            self.assertEqual(1, self.plugin.get_network.call_count)
143
-
144 135
     def _test_update_port_binding(self, host, new_host=None):
145 136
         with mock.patch.object(self.plugin,
146 137
                                '_notify_port_updated') as notify_mock:

+ 2
- 2
neutron/tests/unit/plugins/ml2/test_rpc.py View File

@@ -166,13 +166,13 @@ class RpcCallbacksTestCase(base.BaseTestCase):
166 166
     def _test_get_devices_list(self, callback, side_effect, expected):
167 167
         devices = [1, 2, 3, 4, 5]
168 168
         kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'}
169
-        with mock.patch.object(self.callbacks, 'get_device_details',
169
+        with mock.patch.object(self.callbacks, '_get_device_details',
170 170
                                side_effect=side_effect) as f:
171 171
             res = callback('fake_context', devices=devices, **kwargs)
172 172
             self.assertEqual(expected, res)
173 173
             self.assertEqual(len(devices), f.call_count)
174 174
             calls = [mock.call('fake_context', device=i,
175
-                               cached_networks={}, **kwargs)
175
+                               port_context=mock.ANY, **kwargs)
176 176
                      for i in devices]
177 177
             f.assert_has_calls(calls)
178 178
 

Loading…
Cancel
Save