From a391178c218f08b0c5e7580b5a4b79513ebffcc2 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 17 Jun 2015 04:36:02 -0700 Subject: [PATCH 01/54] Add policy files specific to NSX plugins This patch simply adds a 'policy' directory with a few json files into ./etc/neutron/plugins/vmware to provide default policies specific to the VMware NSX plugin family. These policy files can be loaded leveraging the policy_dirs configuration option. Change-Id: Icce41a6ee63715bc145694f27a2166a7fa884dba --- .../plugins/vmware/policy/network-gateways.json | 10 ++++++++++ etc/neutron/plugins/vmware/policy/routers.json | 7 +++++++ 2 files changed, 17 insertions(+) create mode 100644 etc/neutron/plugins/vmware/policy/network-gateways.json create mode 100644 etc/neutron/plugins/vmware/policy/routers.json diff --git a/etc/neutron/plugins/vmware/policy/network-gateways.json b/etc/neutron/plugins/vmware/policy/network-gateways.json new file mode 100644 index 00000000000..48575070898 --- /dev/null +++ b/etc/neutron/plugins/vmware/policy/network-gateways.json @@ -0,0 +1,10 @@ +{ + "create_network_gateway": "rule:admin_or_owner", + "update_network_gateway": "rule:admin_or_owner", + "delete_network_gateway": "rule:admin_or_owner", + "connect_network": "rule:admin_or_owner", + "disconnect_network": "rule:admin_or_owner", + "create_gateway_device": "rule:admin_or_owner", + "update_gateway_device": "rule:admin_or_owner", + "delete_gateway_device": "rule_admin_or_owner" +} diff --git a/etc/neutron/plugins/vmware/policy/routers.json b/etc/neutron/plugins/vmware/policy/routers.json new file mode 100644 index 00000000000..48665dba836 --- /dev/null +++ b/etc/neutron/plugins/vmware/policy/routers.json @@ -0,0 +1,7 @@ +{ + "create_router:external_gateway_info:enable_snat": "rule:admin_or_owner", + "create_router:distributed": "rule:admin_or_owner", + "get_router:distributed": "rule:admin_or_owner", + "update_router:external_gateway_info:enable_snat": "rule:admin_or_owner", + "update_router:distributed": "rule:admin_or_owner" +} From 9efa1fdeed86d249b2d3dde987a1fb98290140f0 Mon Sep 17 00:00:00 2001 From: Oleg Bondarev Date: Thu, 11 Jun 2015 15:40:33 +0300 Subject: [PATCH 02/54] l3 agent: do router cleanup for unknown routers The patch adds cleanup on router delete for routers which are unknown to agent. This should cover the case when router is deleted during resync on agent init. Functional tests were updated and now handle 3 cases for l3 sync: - no routers were deleted during agent downtime, - some routers were deleted during agent downtime - some routers were deleted during agent resync Closes-Bug: #1464238 Change-Id: Id98111849fa88d6807f757864187b059c491aaac --- neutron/agent/l3/agent.py | 3 +- neutron/agent/l3/namespace_manager.py | 41 +++++---- .../tests/functional/agent/test_l3_agent.py | 89 +++++++++++++------ .../unit/agent/l3/test_namespace_manager.py | 38 +++++--- 4 files changed, 113 insertions(+), 58 deletions(-) diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index d9e4db8de21..8e54d9617ec 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -335,7 +335,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, ri = self.router_info.get(router_id) if ri is None: LOG.warn(_LW("Info for router %s was not found. " - "Skipping router removal"), router_id) + "Performing router cleanup"), router_id) + self.namespaces_manager.ensure_router_cleanup(router_id) return registry.notify(resources.ROUTER, events.BEFORE_DELETE, diff --git a/neutron/agent/l3/namespace_manager.py b/neutron/agent/l3/namespace_manager.py index e7d029fcdca..51464e4e5bd 100644 --- a/neutron/agent/l3/namespace_manager.py +++ b/neutron/agent/l3/namespace_manager.py @@ -81,24 +81,7 @@ class NamespaceManager(object): _ns_prefix, ns_id = self.get_prefix_and_id(ns) if ns_id in self._ids_to_keep: continue - if _ns_prefix == namespaces.NS_PREFIX: - ns = namespaces.RouterNamespace(ns_id, - self.agent_conf, - self.driver, - use_ipv6=False) - else: - ns = dvr_snat_ns.SnatNamespace(ns_id, - self.agent_conf, - self.driver, - use_ipv6=False) - try: - if self.metadata_driver: - # cleanup stale metadata proxy processes first - self.metadata_driver.destroy_monitored_metadata_proxy( - self.process_monitor, ns_id, self.agent_conf) - ns.delete() - except RuntimeError: - LOG.exception(_LE('Failed to destroy stale namespace %s'), ns) + self._cleanup(_ns_prefix, ns_id) return True @@ -131,3 +114,25 @@ class NamespaceManager(object): LOG.exception(_LE('RuntimeError in obtaining namespace list for ' 'namespace cleanup.')) return set() + + def ensure_router_cleanup(self, router_id): + """Performs cleanup for a router""" + for ns in self.list_all(): + if ns.endswith(router_id): + ns_prefix, ns_id = self.get_prefix_and_id(ns) + self._cleanup(ns_prefix, ns_id) + + def _cleanup(self, ns_prefix, ns_id): + if ns_prefix == namespaces.NS_PREFIX: + ns_class = namespaces.RouterNamespace + else: + ns_class = dvr_snat_ns.SnatNamespace + ns = ns_class(ns_id, self.agent_conf, self.driver, use_ipv6=False) + try: + if self.metadata_driver: + # cleanup stale metadata proxy processes first + self.metadata_driver.destroy_monitored_metadata_proxy( + self.process_monitor, ns_id, self.agent_conf) + ns.delete() + except RuntimeError: + LOG.exception(_LE('Failed to destroy stale namespace %s'), ns) diff --git a/neutron/tests/functional/agent/test_l3_agent.py b/neutron/tests/functional/agent/test_l3_agent.py index 7788654bf2c..4c2fd6ea1e2 100644 --- a/neutron/tests/functional/agent/test_l3_agent.py +++ b/neutron/tests/functional/agent/test_l3_agent.py @@ -497,26 +497,24 @@ class L3AgentTestCase(L3AgentTestFramework): (new_external_device_ip, external_device_name), new_config) - def test_periodic_sync_routers_task(self): - routers_to_keep = [] - routers_to_delete = [] + def _test_periodic_sync_routers_task(self, + routers_to_keep, + routers_deleted, + routers_deleted_during_resync): ns_names_to_retrieve = set() - routers_info_to_delete = [] - for i in range(2): - routers_to_keep.append(self.generate_router_info(False)) - ri = self.manage_router(self.agent, routers_to_keep[i]) + deleted_routers_info = [] + for r in routers_to_keep: + ri = self.manage_router(self.agent, r) ns_names_to_retrieve.add(ri.ns_name) - for i in range(2): - routers_to_delete.append(self.generate_router_info(False)) - ri = self.manage_router(self.agent, routers_to_delete[i]) - routers_info_to_delete.append(ri) + for r in routers_deleted + routers_deleted_during_resync: + ri = self.manage_router(self.agent, r) + deleted_routers_info.append(ri) ns_names_to_retrieve.add(ri.ns_name) - # Mock the plugin RPC API to Simulate a situation where the agent - # was handling the 4 routers created above, it went down and after - # starting up again, two of the routers were deleted via the API - self.mock_plugin_api.get_routers.return_value = routers_to_keep - # also clear agent router_info as it will be after restart + mocked_get_routers = self.mock_plugin_api.get_routers + mocked_get_routers.return_value = (routers_to_keep + + routers_deleted_during_resync) + # clear agent router_info as it will be after restart self.agent.router_info = {} # Synchonize the agent with the plug-in @@ -533,23 +531,58 @@ class L3AgentTestCase(L3AgentTestFramework): # Plug external_gateway_info in the routers that are not going to be # deleted by the agent when it processes the updates. Otherwise, # _process_router_if_compatible in the agent fails - for i in range(2): - routers_to_keep[i]['external_gateway_info'] = {'network_id': - external_network_id} + for r in routers_to_keep: + r['external_gateway_info'] = {'network_id': external_network_id} - # Have the agent process the update from the plug-in and verify - # expected behavior - for _ in routers_to_keep: + # while sync updates are still in the queue, higher priority + # router_deleted events may be added there as well + for r in routers_deleted_during_resync: + self.agent.router_deleted(self.agent.context, r['id']) + + # make sure all events are processed + while not self.agent._queue._queue.empty(): self.agent._process_router_update() - for i in range(2): - self.assertIn(routers_to_keep[i]['id'], self.agent.router_info) + for r in routers_to_keep: + self.assertIn(r['id'], self.agent.router_info) self.assertTrue(self._namespace_exists(namespaces.NS_PREFIX + - routers_to_keep[i]['id'])) - for i in range(2): - self.assertNotIn(routers_info_to_delete[i].router_id, + r['id'])) + for ri in deleted_routers_info: + self.assertNotIn(ri.router_id, self.agent.router_info) - self._assert_router_does_not_exist(routers_info_to_delete[i]) + self._assert_router_does_not_exist(ri) + + def test_periodic_sync_routers_task(self): + routers_to_keep = [] + for i in range(2): + routers_to_keep.append(self.generate_router_info(False)) + self._test_periodic_sync_routers_task(routers_to_keep, + routers_deleted=[], + routers_deleted_during_resync=[]) + + def test_periodic_sync_routers_task_routers_deleted_while_agent_down(self): + routers_to_keep = [] + routers_deleted = [] + for i in range(2): + routers_to_keep.append(self.generate_router_info(False)) + for i in range(2): + routers_deleted.append(self.generate_router_info(False)) + self._test_periodic_sync_routers_task(routers_to_keep, + routers_deleted, + routers_deleted_during_resync=[]) + + def test_periodic_sync_routers_task_routers_deleted_while_agent_sync(self): + routers_to_keep = [] + routers_deleted_during_resync = [] + for i in range(2): + routers_to_keep.append(self.generate_router_info(False)) + for i in range(2): + routers_deleted_during_resync.append( + self.generate_router_info(False)) + self._test_periodic_sync_routers_task( + routers_to_keep, + routers_deleted=[], + routers_deleted_during_resync=routers_deleted_during_resync) def _router_lifecycle(self, enable_ha, ip_version=4, dual_stack=False, v6_ext_gw_with_sub=True): diff --git a/neutron/tests/unit/agent/l3/test_namespace_manager.py b/neutron/tests/unit/agent/l3/test_namespace_manager.py index 4d219ec2c13..fb1b79eeb43 100644 --- a/neutron/tests/unit/agent/l3/test_namespace_manager.py +++ b/neutron/tests/unit/agent/l3/test_namespace_manager.py @@ -36,35 +36,36 @@ class NamespaceManagerTestCaseFramework(base.BaseTestCase): class TestNamespaceManager(NamespaceManagerTestCaseFramework): + def setUp(self): + super(TestNamespaceManager, self).setUp() + self.ns_manager = self._create_namespace_manager() + def test_get_prefix_and_id(self): - ns_manager = self._create_namespace_manager() router_id = _uuid() - ns_prefix, ns_id = ns_manager.get_prefix_and_id( + ns_prefix, ns_id = self.ns_manager.get_prefix_and_id( namespaces.NS_PREFIX + router_id) self.assertEqual(ns_prefix, namespaces.NS_PREFIX) self.assertEqual(ns_id, router_id) - ns_prefix, ns_id = ns_manager.get_prefix_and_id( + ns_prefix, ns_id = self.ns_manager.get_prefix_and_id( dvr_snat_ns.SNAT_NS_PREFIX + router_id) self.assertEqual(ns_prefix, dvr_snat_ns.SNAT_NS_PREFIX) self.assertEqual(ns_id, router_id) ns_name = 'dhcp-' + router_id - self.assertIsNone(ns_manager.get_prefix_and_id(ns_name)) + self.assertIsNone(self.ns_manager.get_prefix_and_id(ns_name)) def test_is_managed(self): - ns_manager = self._create_namespace_manager() router_id = _uuid() router_ns_name = namespaces.NS_PREFIX + router_id - self.assertTrue(ns_manager.is_managed(router_ns_name)) + self.assertTrue(self.ns_manager.is_managed(router_ns_name)) router_ns_name = dvr_snat_ns.SNAT_NS_PREFIX + router_id - self.assertTrue(ns_manager.is_managed(router_ns_name)) - self.assertFalse(ns_manager.is_managed('dhcp-' + router_id)) + self.assertTrue(self.ns_manager.is_managed(router_ns_name)) + self.assertFalse(self.ns_manager.is_managed('dhcp-' + router_id)) def test_list_all(self): - ns_manager = self._create_namespace_manager() ns_names = [namespaces.NS_PREFIX + _uuid(), dvr_snat_ns.SNAT_NS_PREFIX + _uuid(), 'dhcp-' + _uuid(), ] @@ -72,7 +73,7 @@ class TestNamespaceManager(NamespaceManagerTestCaseFramework): # Test the normal path with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces', return_value=ns_names): - retrieved_ns_names = ns_manager.list_all() + retrieved_ns_names = self.ns_manager.list_all() self.assertEqual(len(ns_names) - 1, len(retrieved_ns_names)) for i in range(len(retrieved_ns_names)): self.assertIn(ns_names[i], retrieved_ns_names) @@ -81,5 +82,20 @@ class TestNamespaceManager(NamespaceManagerTestCaseFramework): # Test path where IPWrapper raises exception with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces', side_effect=RuntimeError): - retrieved_ns_names = ns_manager.list_all() + retrieved_ns_names = self.ns_manager.list_all() self.assertFalse(retrieved_ns_names) + + def test_ensure_router_cleanup(self): + router_id = _uuid() + ns_names = [namespaces.NS_PREFIX + _uuid() for _ in range(5)] + ns_names += [dvr_snat_ns.SNAT_NS_PREFIX + _uuid() for _ in range(5)] + ns_names += [namespaces.NS_PREFIX + router_id, + dvr_snat_ns.SNAT_NS_PREFIX + router_id] + with mock.patch.object(ip_lib.IPWrapper, 'get_namespaces', + return_value=ns_names), \ + mock.patch.object(self.ns_manager, '_cleanup') as mock_cleanup: + self.ns_manager.ensure_router_cleanup(router_id) + expected = [mock.call(namespaces.NS_PREFIX, router_id), + mock.call(dvr_snat_ns.SNAT_NS_PREFIX, router_id)] + mock_cleanup.assert_has_calls(expected, any_order=True) + self.assertEqual(2, mock_cleanup.call_count) From aaa070868e8fb891e6ab5f8355bb03ee3e837c9e Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Thu, 11 Jun 2015 15:04:48 +0300 Subject: [PATCH 03/54] Use sets to calculate added/original/removed ips Original algorithm to calculate added/removed ips had O(n^2) complexity. Using sets achieves O(n) for average case. After refactoring input is no longer affected, updated tests to reflect that. However, dataset is too small to get any significant performance improvement. Using sets requires additional preparation and post operations: - converting 'original_ips' and 'new_ips' to sets from ip_addresses - building map(dict) for storing reference from ip_address to 'ips' element - converting calculated add/orignal/remove sets back to list of dicts using map (dict of references). Partially-Implements: blueprint neutron-ipam Change-Id: Iecddc406f7b91cfdfb976882504113734e19b565 --- neutron/db/ipam_backend_mixin.py | 66 ++++++++++----- .../tests/unit/db/test_ipam_backend_mixin.py | 83 +++++++++++++++---- 2 files changed, 114 insertions(+), 35 deletions(-) diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index ea4ec26b6f9..e50ef53197c 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -14,6 +14,7 @@ # under the License. import collections +import itertools import netaddr from oslo_config import cfg @@ -220,6 +221,15 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): pool=pool_range, ip_address=gateway_ip) + def _is_ip_required_by_subnet(self, context, subnet_id, device_owner): + # For ports that are not router ports, retain any automatic + # (non-optional, e.g. IPv6 SLAAC) addresses. + if device_owner in constants.ROUTER_INTERFACE_OWNERS: + return True + + subnet = self._get_subnet(context, subnet_id) + return not ipv6_utils.is_auto_address_subnet(subnet) + def _get_changed_ips_for_port(self, context, original_ips, new_ips, device_owner): """Calculate changes in IPs for the port.""" @@ -228,30 +238,44 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): msg = _('Exceeded maximum amount of fixed ips per port') raise n_exc.InvalidInput(error_message=msg) - # These ips are still on the port and haven't been removed - prev_ips = [] + add_ips = [] + remove_ips = [] + ips_map = {ip['ip_address']: ip + for ip in itertools.chain(new_ips, original_ips) + if 'ip_address' in ip} - # Remove all of the intersecting elements - for original_ip in original_ips[:]: - for new_ip in new_ips[:]: - if ('ip_address' in new_ip and - original_ip['ip_address'] == new_ip['ip_address']): - original_ips.remove(original_ip) - new_ips.remove(new_ip) - prev_ips.append(original_ip) - break + new = set() + for ip in new_ips: + if 'ip_address' in ip: + new.add(ip['ip_address']) else: - # For ports that are not router ports, retain any automatic - # (non-optional, e.g. IPv6 SLAAC) addresses. - if device_owner not in constants.ROUTER_INTERFACE_OWNERS: - subnet = self._get_subnet(context, - original_ip['subnet_id']) - if (ipv6_utils.is_auto_address_subnet(subnet)): - original_ips.remove(original_ip) - prev_ips.append(original_ip) - return self.Changes(add=new_ips, + add_ips.append(ip) + + # Convert original ip addresses to sets + orig = set(ip['ip_address'] for ip in original_ips) + + add = new - orig + unchanged = new & orig + remove = orig - new + + # Convert results back to list of dicts + add_ips += [ips_map[ip] for ip in add] + prev_ips = [ips_map[ip] for ip in unchanged] + + # Mark ip for removing if it is not found in new_ips + # and subnet requires ip to be set manually. + # For auto addresses leave ip unchanged + for ip in remove: + subnet_id = ips_map[ip]['subnet_id'] + if self._is_ip_required_by_subnet(context, subnet_id, + device_owner): + remove_ips.append(ips_map[ip]) + else: + prev_ips.append(ips_map[ip]) + + return self.Changes(add=add_ips, original=prev_ips, - remove=original_ips) + remove=remove_ips) def _delete_port(self, context, port_id): query = (context.session.query(models_v2.Port). diff --git a/neutron/tests/unit/db/test_ipam_backend_mixin.py b/neutron/tests/unit/db/test_ipam_backend_mixin.py index 3488759dbd5..1a5183ddc92 100644 --- a/neutron/tests/unit/db/test_ipam_backend_mixin.py +++ b/neutron/tests/unit/db/test_ipam_backend_mixin.py @@ -30,11 +30,23 @@ class TestIpamBackendMixin(base.BaseTestCase): ('id-2', '192.168.1.2')) self.default_original_ips = (('id-1', '192.168.1.1'), ('id-5', '172.20.16.5')) + self.owner_non_router = constants.DEVICE_OWNER_DHCP + self.owner_router = constants.DEVICE_OWNER_ROUTER_INTF def _prepare_ips(self, ips): return [{'ip_address': ip[1], 'subnet_id': ip[0]} for ip in ips] + def _mock_slaac_subnet_on(self): + slaac_subnet = {'ipv6_address_mode': constants.IPV6_SLAAC, + 'ipv6_ra_mode': constants.IPV6_SLAAC} + self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet) + + def _mock_slaac_subnet_off(self): + non_slaac_subnet = {'ipv6_address_mode': None, + 'ipv6_ra_mode': None} + self.mixin._get_subnet = mock.Mock(return_value=non_slaac_subnet) + def _test_get_changed_ips_for_port(self, expected_change, original_ips, new_ips, owner): change = self.mixin._get_changed_ips_for_port(self.ctx, @@ -44,36 +56,79 @@ class TestIpamBackendMixin(base.BaseTestCase): self.assertEqual(expected_change, change) def test__get_changed_ips_for_port(self): - owner_router = constants.DEVICE_OWNER_ROUTER_INTF new_ips = self._prepare_ips(self.default_new_ips) original_ips = self._prepare_ips(self.default_original_ips) - # generate changes before calling _get_changed_ips_for_port - # because new_ips and original_ips are affected during call expected_change = self.mixin.Changes(add=[new_ips[1]], original=[original_ips[0]], remove=[original_ips[1]]) self._test_get_changed_ips_for_port(expected_change, original_ips, - new_ips, owner_router) + new_ips, self.owner_router) def test__get_changed_ips_for_port_autoaddress(self): - owner_not_router = constants.DEVICE_OWNER_DHCP new_ips = self._prepare_ips(self.default_new_ips) original = (('id-1', '192.168.1.1'), ('id-5', '2000:1234:5678::12FF:FE34:5678')) original_ips = self._prepare_ips(original) - # mock to test auto address part - slaac_subnet = {'ipv6_address_mode': constants.IPV6_SLAAC, - 'ipv6_ra_mode': constants.IPV6_SLAAC} - self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet) + self._mock_slaac_subnet_on() - # make a copy of original_ips - # since it is changed by _get_changed_ips_for_port expected_change = self.mixin.Changes(add=[new_ips[1]], - original=original_ips[:], + original=original_ips, remove=[]) - self._test_get_changed_ips_for_port(expected_change, original_ips, - new_ips, owner_not_router) + new_ips, self.owner_non_router) + + def _test_get_changed_ips_for_port_no_ip_address(self): + # IP address should be added if only subnet_id is provided, + # independently from auto_address status for subnet + new_ips = [{'subnet_id': 'id-3'}] + original_ips = [] + + expected_change = self.mixin.Changes(add=[new_ips[0]], + original=[], + remove=[]) + self._test_get_changed_ips_for_port(expected_change, original_ips, + new_ips, self.owner_non_router) + + def test__get_changed_ips_for_port_no_ip_address_no_slaac(self): + self._mock_slaac_subnet_off() + self._test_get_changed_ips_for_port_no_ip_address() + + def test__get_changed_ips_for_port_no_ip_address_slaac(self): + self._mock_slaac_subnet_on() + self._test_get_changed_ips_for_port_no_ip_address() + + def test__is_ip_required_by_subnet_for_router_port(self): + # Owner -> router: + # _get_subnet should not be called, + # expected True + self._mock_slaac_subnet_off() + + result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', + self.owner_router) + self.assertTrue(result) + self.assertFalse(self.mixin._get_subnet.called) + + def test__is_ip_required_by_subnet_for_non_router_port(self): + # Owner -> not router: + # _get_subnet should be called, + # expected True, because subnet is not slaac + self._mock_slaac_subnet_off() + + result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', + self.owner_non_router) + self.assertTrue(result) + self.assertTrue(self.mixin._get_subnet.called) + + def test__is_ip_required_by_subnet_for_non_router_port_and_slaac(self): + # Owner -> not router: + # _get_subnet should be called, + # expected False, because subnet is slaac + self._mock_slaac_subnet_on() + + result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id', + self.owner_non_router) + self.assertFalse(result) + self.assertTrue(self.mixin._get_subnet.called) From 0eb44ca1f23ee4d031ddf2e03a1ebc6a16428d3f Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Thu, 28 May 2015 02:17:52 -0700 Subject: [PATCH 04/54] NSXv: update ini file to support dhcp_lease_time Add the variable to enable the admin to set the DHCP lease time. This was added in commit 7681e4c50afda18fd75fe7207352d1a26ee0755b DocImpact Change-Id: Ic37932c09d3b4c88363a7f1f38a687cd6e090c1f --- etc/neutron/plugins/vmware/nsx.ini | 3 +++ 1 file changed, 3 insertions(+) diff --git a/etc/neutron/plugins/vmware/nsx.ini b/etc/neutron/plugins/vmware/nsx.ini index 2eef2254eca..8566ac81cfb 100644 --- a/etc/neutron/plugins/vmware/nsx.ini +++ b/etc/neutron/plugins/vmware/nsx.ini @@ -156,6 +156,9 @@ # lock management. # locking_coordinator_url = +# (Optional) DHCP lease time +# dhcp_lease_time = 86400 + [nsx] # Maximum number of ports for each bridged logical switch # The recommended value for this parameter varies with NSX version From 481d9a4f356d325e60e4c208c93693d755097bcd Mon Sep 17 00:00:00 2001 From: venkata anil Date: Wed, 24 Jun 2015 07:33:09 +0000 Subject: [PATCH 05/54] dhcp fails if extra_dhcp_opts for stateless subnet enabled vm on a network having IPv4 and IPv6 dhcpv6 stateless subnets, fails to get IPv4 address, when vm uses a port with extra_dhcp_opts. neutron creates entries in dhcp host file for each subnet of a port. Each of these entries will have same mac address as first field, and may have client_id, fqdn, ipv4/ipv6 address for dhcp/dhcpv6 stateful, or tag as other fields. For dhcpv6 stateless subnet with extra_dhcp_opts, host file will have only mac address and tag. If the last entry in host file for the port with extra_dhcp_opts, is for dhcpv6 stateless subnet, then dnsmasq tries to use this entry, (as dnsmasq reads the hosts file from EOF) to resolve dhcp request even for IPv4, treats as 'no address found' and fails to send DHCPOFFER. So we sort the fixed_ips, so that ipv6 subnets for the port are added first in host file, to avoid this issue. Change-Id: I3bea58d86a3508e49cbac1d03c6b640836b4a7a2 Closes-bug: #1466144 --- neutron/agent/linux/dhcp.py | 43 ++++++++++++++++- neutron/tests/unit/agent/linux/test_dhcp.py | 52 +++++++++++++++++++++ 2 files changed, 94 insertions(+), 1 deletion(-) diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index 084a67d4171..53af635e933 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -434,6 +434,44 @@ class Dnsmasq(DhcpLocalProcess): LOG.debug('Reloading allocations for network: %s', self.network.id) self.device_manager.update(self.network, self.interface_name) + def _sort_fixed_ips_for_dnsmasq(self, fixed_ips, v6_nets): + """Sort fixed_ips so that stateless IPv6 subnets appear first. + + For example, If a port with v6 extra_dhcp_opts is on a network with + IPv4 and IPv6 stateless subnets. Then dhcp host file will have + below 2 entries for same MAC, + + fa:16:3e:8f:9d:65,30.0.0.5,set:aabc7d33-4874-429e-9637-436e4232d2cd + (entry for IPv4 dhcp) + fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd + (entry for stateless IPv6 for v6 options) + + dnsmasq internal details for processing host file entries + 1) dnsmaq reads the host file from EOF. + 2) So it first picks up stateless IPv6 entry, + fa:16:3e:8f:9d:65,set:aabc7d33-4874-429e-9637-436e4232d2cd + 3) But dnsmasq doesn't have sufficient checks to skip this entry and + pick next entry, to process dhcp IPv4 request. + 4) So dnsmaq uses this this entry to process dhcp IPv4 request. + 5) As there is no ip in this entry, dnsmaq logs "no address available" + and fails to send DHCPOFFER message. + + As we rely on internal details of dnsmasq to understand and fix the + issue, Ihar sent a mail to dnsmasq-discuss mailing list + http://lists.thekelleys.org.uk/pipermail/dnsmasq-discuss/2015q2/ + 009650.html + + So If we reverse the order of writing entries in host file, + so that entry for stateless IPv6 comes first, + then dnsmasq can correctly fetch the IPv4 address. + """ + return sorted( + fixed_ips, + key=lambda fip: ((fip.subnet_id in v6_nets) and ( + v6_nets[fip.subnet_id].ipv6_address_mode == ( + constants.DHCPV6_STATELESS))), + reverse=True) + def _iter_hosts(self): """Iterate over hosts. @@ -449,8 +487,11 @@ class Dnsmasq(DhcpLocalProcess): """ v6_nets = dict((subnet.id, subnet) for subnet in self.network.subnets if subnet.ip_version == 6) + for port in self.network.ports: - for alloc in port.fixed_ips: + fixed_ips = self._sort_fixed_ips_for_dnsmasq(port.fixed_ips, + v6_nets) + for alloc in fixed_ips: # Note(scollins) Only create entries that are # associated with the subnet being managed by this # dhcp agent diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py index 44a017245a3..0850c27d6c9 100644 --- a/neutron/tests/unit/agent/linux/test_dhcp.py +++ b/neutron/tests/unit/agent/linux/test_dhcp.py @@ -160,6 +160,23 @@ class FakeV6PortExtraOpt(object): ip_version=6)] +class FakeDualPortWithV6ExtraOpt(object): + id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' + admin_state_up = True + device_owner = 'foo3' + fixed_ips = [FakeIPAllocation('192.168.0.3', + 'dddddddd-dddd-dddd-dddd-dddddddddddd'), + FakeIPAllocation('ffea:3ba5:a17a:4ba3:0216:3eff:fec2:771d', + 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee')] + mac_address = '00:16:3e:c2:77:1d' + + def __init__(self): + self.extra_dhcp_opts = [ + DhcpOpt(opt_name='dns-server', + opt_value='ffea:3ba5:a17a:4ba3::100', + ip_version=6)] + + class FakeDualPort(object): id = 'hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh' admin_state_up = True @@ -617,6 +634,14 @@ class FakeV6NetworkStatelessDHCP(object): namespace = 'qdhcp-ns' +class FakeNetworkWithV6SatelessAndV4DHCPSubnets(object): + id = 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb' + + subnets = [FakeV6SubnetStateless(), FakeV4Subnet()] + ports = [FakeDualPortWithV6ExtraOpt(), FakeRouterPort()] + namespace = 'qdhcp-ns' + + class LocalChild(dhcp.DhcpLocalProcess): PORTS = {4: [4], 6: [6]} @@ -1638,6 +1663,33 @@ class TestDnsmasq(TestBase): self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), mock.call(exp_opt_name, exp_opt_data)]) + def test_host_and_opts_file_on_net_with_V6_stateless_and_V4_subnets( + self): + exp_host_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/host' + exp_host_data = ( + '00:16:3e:c2:77:1d,set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n' + '00:16:3e:c2:77:1d,host-192-168-0-3.openstacklocal,' + '192.168.0.3,set:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh\n' + '00:00:0f:rr:rr:rr,' + 'host-192-168-0-1.openstacklocal,192.168.0.1\n').lstrip() + exp_opt_name = '/dhcp/bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb/opts' + exp_opt_data = ( + 'tag:tag0,option6:domain-search,openstacklocal\n' + 'tag:tag1,option:dns-server,8.8.8.8\n' + 'tag:tag1,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag1,249,20.0.0.1/24,20.0.0.1,169.254.169.254/32,' + '192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag1,option:router,192.168.0.1\n' + 'tag:hhhhhhhh-hhhh-hhhh-hhhh-hhhhhhhhhhhh,' + 'option6:dns-server,ffea:3ba5:a17a:4ba3::100').lstrip() + + dm = self._get_dnsmasq(FakeNetworkWithV6SatelessAndV4DHCPSubnets()) + dm._output_hosts_file() + dm._output_opts_file() + self.safe.assert_has_calls([mock.call(exp_host_name, exp_host_data), + mock.call(exp_opt_name, exp_opt_data)]) + def test_should_enable_metadata_namespaces_disabled_returns_false(self): self.conf.set_override('use_namespaces', False) self.assertFalse(dhcp.Dnsmasq.should_enable_metadata(self.conf, From e2a99fa3c456a57e6e74e53ab04ad4899d1a9cf2 Mon Sep 17 00:00:00 2001 From: Darragh O'Reilly Date: Tue, 2 Dec 2014 18:28:38 +0000 Subject: [PATCH 06/54] lb-agent: handle security group updates in main loop Patch I1574544734865506ff5383404516cc9349c16ec4 introduced deferring firewall refreshes to the main loop of the ovs-agent to improve performance. This patch enables the same on the linuxbridge agent. Change-Id: Ia8fe229910d2be718da52cb341be163b86ace571 Closes-Bug: #1368281 --- .../linuxbridge/agent/linuxbridge_neutron_agent.py | 12 +++++------- .../agent/test_linuxbridge_neutron_agent.py | 9 ++++----- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index 66be308a29a..d539fe38f91 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -788,7 +788,7 @@ class LinuxBridgeNeutronAgentRPC(service.Service): self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context, - self.sg_plugin_rpc) + self.sg_plugin_rpc, defer_refresh_firewall=True) self.setup_rpc(self.interface_mappings.values()) self.daemon_loop() @@ -859,11 +859,8 @@ class LinuxBridgeNeutronAgentRPC(service.Service): resync_a = False resync_b = False - self.sg_agent.prepare_devices_filter(device_info.get('added')) - - if device_info.get('updated'): - self.sg_agent.refresh_firewall() - + self.sg_agent.setup_port_filters(device_info.get('added'), + device_info.get('updated')) # Updated devices are processed the same as new ones, as their # admin_state_up may have changed. The set union prevents duplicating # work when a device is new and updated in the same polling iteration. @@ -1011,7 +1008,8 @@ class LinuxBridgeNeutronAgentRPC(service.Service): LOG.info(_LI("Agent out of sync with plugin!")) sync = False - if self._device_info_has_changes(device_info): + if (self._device_info_has_changes(device_info) + or self.sg_agent.firewall_refresh_needed()): LOG.debug("Agent loop found changes! %s", device_info) try: sync = self.process_network_devices(device_info) diff --git a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py index 8651a14d8ff..134b313eef8 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py @@ -279,16 +279,15 @@ class TestLinuxBridgeAgent(base.BaseTestCase): 'added': set(['tap3', 'tap4']), 'updated': set(['tap2', 'tap3']), 'removed': set(['tap1'])} - agent.sg_agent.prepare_devices_filter = mock.Mock() - agent.sg_agent.refresh_firewall = mock.Mock() + agent.sg_agent.setup_port_filters = mock.Mock() agent.treat_devices_added_updated = mock.Mock(return_value=False) agent.treat_devices_removed = mock.Mock(return_value=False) agent.process_network_devices(device_info) - agent.sg_agent.prepare_devices_filter.assert_called_with( - set(['tap3', 'tap4'])) - self.assertTrue(agent.sg_agent.refresh_firewall.called) + agent.sg_agent.setup_port_filters.assert_called_with( + device_info['added'], + device_info['updated']) agent.treat_devices_added_updated.assert_called_with(set(['tap2', 'tap3', 'tap4'])) From b622e6538ae5a606c1bc9830a2afe816a92a2ca5 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 16 Jun 2015 15:29:17 +0000 Subject: [PATCH 07/54] ip_lib: Add flush() command to IpNeigh to clean arp cache Change-Id: I938974e3d67373cd18d8a9c6538f1f8b2d09e965 --- neutron/agent/linux/ip_lib.py | 11 +++++++++++ neutron/tests/unit/agent/linux/test_ip_lib.py | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index e6d20c78348..890444a01f9 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -634,6 +634,17 @@ class IpNeighCommand(IpDeviceCommandBase): ('show', 'dev', self.name)) + def flush(self, ip_version, ip_address): + """Flush neighbour entries + + Given address entry is removed from neighbour cache (ARP or NDP). To + flush all entries pass string 'all' as an address. + + :param ip_version: Either 4 or 6 for IPv4 or IPv6 respectively + :param ip_address: The prefix selecting the neighbours to flush + """ + self._as_root([ip_version], ('flush', 'to', ip_address)) + class IpNetnsCommand(IpCommandBase): COMMAND = 'netns' diff --git a/neutron/tests/unit/agent/linux/test_ip_lib.py b/neutron/tests/unit/agent/linux/test_ip_lib.py index 4ed06426119..42c3befa3c9 100644 --- a/neutron/tests/unit/agent/linux/test_ip_lib.py +++ b/neutron/tests/unit/agent/linux/test_ip_lib.py @@ -1031,6 +1031,10 @@ class TestIpNeighCommand(TestIPCmdBase): 'lladdr', 'cc:dd:ee:ff:ab:cd', 'dev', 'tap0')) + def test_flush(self): + self.neigh_cmd.flush(4, '192.168.0.1') + self._assert_sudo([4], ('flush', 'to', '192.168.0.1')) + class TestArpPing(TestIPCmdBase): @mock.patch.object(ip_lib, 'IPWrapper') From b9e9cfb08bf0609dcfea46403c510607e858926a Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Wed, 17 Jun 2015 13:10:13 +0000 Subject: [PATCH 08/54] Move NetcatTester to common/net_helpers The NetcatTester is a testing tool that can be used also in fullstack tests so I think it should go there to avoid imports in fullstack tests from functional. Tests for original helpers module was removed. Change-Id: I7229eba1dbc2ca3d524a1a021256b6202f4aecee --- neutron/tests/common/net_helpers.py | 222 ++++++++++++++++++ neutron/tests/fullstack/config_fixtures.py | 5 +- .../tests/functional/agent/linux/helpers.py | 222 ------------------ .../functional/agent/linux/test_helpers.py | 34 --- .../functional/agent/linux/test_iptables.py | 23 +- .../tests/functional/agent/test_l3_agent.py | 16 +- 6 files changed, 244 insertions(+), 278 deletions(-) delete mode 100644 neutron/tests/functional/agent/linux/test_helpers.py diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py index 2c5bb94b5a0..d0e03cb5feb 100644 --- a/neutron/tests/common/net_helpers.py +++ b/neutron/tests/common/net_helpers.py @@ -14,14 +14,23 @@ # import abc +import functools +import os +import random +import re +import select +import shlex +import subprocess import netaddr from oslo_utils import uuidutils import six +from neutron.agent.common import config from neutron.agent.common import ovs_lib from neutron.agent.linux import bridge_lib from neutron.agent.linux import ip_lib +from neutron.agent.linux import utils from neutron.common import constants as n_const from neutron.tests import base as tests_base from neutron.tests.common import base as common_base @@ -33,6 +42,16 @@ PORT_PREFIX = 'test-port' VETH0_PREFIX = 'test-veth0' VETH1_PREFIX = 'test-veth1' +SS_SOURCE_PORT_PATTERN = re.compile( + r'^.*\s+\d+\s+.*:(?P\d+)\s+[0-9:].*') + +READ_TIMEOUT = os.environ.get('OS_TEST_READ_TIMEOUT', 5) + +CHILD_PROCESS_TIMEOUT = os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20) +CHILD_PROCESS_SLEEP = os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5) + +TRANSPORT_PROTOCOLS = (n_const.PROTO_NAME_TCP, n_const.PROTO_NAME_UDP) + def get_rand_port_name(): return tests_base.get_rand_name(max_length=n_const.DEVICE_NAME_MAX_LEN, @@ -105,6 +124,209 @@ def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1): {'ns': src_namespace, 'destination': dst_ip}) +def _get_source_ports_from_ss_output(output): + ports = set() + for line in output.splitlines(): + match = SS_SOURCE_PORT_PATTERN.match(line) + if match: + ports.add(match.group('port')) + return ports + + +def get_unused_port(used, start=1024, end=65535): + candidates = set(range(start, end + 1)) + return random.choice(list(candidates - used)) + + +def get_free_namespace_port(protocol, namespace=None): + """Return an unused port from given namespace + + WARNING: This function returns a port that is free at the execution time of + this function. If this port is used later for binding then there + is a potential danger that port will be no longer free. It's up to + the programmer to handle error if port is already in use. + + :param protocol: Return free port for given protocol. Supported protocols + are 'tcp' and 'udp'. + """ + if protocol == n_const.PROTO_NAME_TCP: + param = '-tna' + elif protocol == n_const.PROTO_NAME_UDP: + param = '-una' + else: + raise ValueError("Unsupported procotol %s" % protocol) + + ip_wrapper = ip_lib.IPWrapper(namespace=namespace) + output = ip_wrapper.netns.execute(['ss', param]) + used_ports = _get_source_ports_from_ss_output(output) + + return get_unused_port(used_ports) + + +class RootHelperProcess(subprocess.Popen): + def __init__(self, cmd, *args, **kwargs): + for arg in ('stdin', 'stdout', 'stderr'): + kwargs.setdefault(arg, subprocess.PIPE) + self.namespace = kwargs.pop('namespace', None) + self.cmd = cmd + if self.namespace is not None: + cmd = ['ip', 'netns', 'exec', self.namespace] + cmd + root_helper = config.get_root_helper(utils.cfg.CONF) + cmd = shlex.split(root_helper) + cmd + self.child_pid = None + super(RootHelperProcess, self).__init__(cmd, *args, **kwargs) + self._wait_for_child_process() + + def kill(self): + pid = self.child_pid or str(self.pid) + utils.execute(['kill', '-9', pid], run_as_root=True) + + def read_stdout(self, timeout=None): + return self._read_stream(self.stdout, timeout) + + @staticmethod + def _read_stream(stream, timeout): + if timeout: + poller = select.poll() + poller.register(stream.fileno()) + poll_predicate = functools.partial(poller.poll, 1) + utils.wait_until_true(poll_predicate, timeout, 0.1, + RuntimeError( + 'No output in %.2f seconds' % timeout)) + return stream.readline() + + def writeline(self, data): + self.stdin.write(data + os.linesep) + self.stdin.flush() + + def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT, + sleep=CHILD_PROCESS_SLEEP): + def child_is_running(): + child_pid = utils.get_root_helper_child_pid( + self.pid, run_as_root=True) + if utils.pid_invoked_with_cmdline(child_pid, self.cmd): + return True + + utils.wait_until_true( + child_is_running, + timeout, + exception=RuntimeError("Process %s hasn't been spawned " + "in %d seconds" % (self.cmd, timeout))) + self.child_pid = utils.get_root_helper_child_pid( + self.pid, run_as_root=True) + + +class NetcatTester(object): + TESTING_STRING = 'foo' + TCP = n_const.PROTO_NAME_TCP + UDP = n_const.PROTO_NAME_UDP + + def __init__(self, client_namespace, server_namespace, address, + dst_port, protocol, server_address='0.0.0.0', src_port=None): + """ + Tool for testing connectivity on transport layer using netcat + executable. + + The processes are spawned lazily. + + :param client_namespace: Namespace in which netcat process that + connects to other netcat will be spawned + :param server_namespace: Namespace in which listening netcat process + will be spawned + :param address: Server address from client point of view + :param dst_port: Port on which netcat listens + :param protocol: Transport protocol, either 'tcp' or 'udp' + :param server_address: Address in server namespace on which netcat + should listen + :param src_port: Source port of netcat process spawned in client + namespace - packet will have src_port in TCP/UDP + header with this value + + """ + self.client_namespace = client_namespace + self.server_namespace = server_namespace + self._client_process = None + self._server_process = None + self.address = address + self.server_address = server_address + self.dst_port = str(dst_port) + self.src_port = str(src_port) if src_port else None + if protocol not in TRANSPORT_PROTOCOLS: + raise ValueError("Unsupported protocol %s" % protocol) + self.protocol = protocol + + @property + def client_process(self): + if not self._client_process: + self.establish_connection() + return self._client_process + + @property + def server_process(self): + if not self._server_process: + self._spawn_server_process() + return self._server_process + + def _spawn_server_process(self): + self._server_process = self._spawn_nc_in_namespace( + self.server_namespace, + address=self.server_address, + listen=True) + + def establish_connection(self): + if self._client_process: + raise RuntimeError('%(proto)s connection to $(ip_addr)s is already' + ' established' % + {'proto': self.protocol, + 'ip_addr': self.address}) + + if not self._server_process: + self._spawn_server_process() + self._client_process = self._spawn_nc_in_namespace( + self.client_namespace, + address=self.address) + if self.protocol == self.UDP: + # Create an entry in conntrack table for UDP packets + self.client_process.writeline(self.TESTING_STRING) + + def test_connectivity(self, respawn=False): + stop_required = (respawn and self._client_process and + self._client_process.poll() is not None) + if stop_required: + self.stop_processes() + + self.client_process.writeline(self.TESTING_STRING) + message = self.server_process.read_stdout(READ_TIMEOUT).strip() + self.server_process.writeline(message) + message = self.client_process.read_stdout(READ_TIMEOUT).strip() + + return message == self.TESTING_STRING + + def _spawn_nc_in_namespace(self, namespace, address, listen=False): + cmd = ['nc', address, self.dst_port] + if self.protocol == self.UDP: + cmd.append('-u') + if listen: + cmd.append('-l') + if self.protocol == self.TCP: + cmd.append('-k') + else: + cmd.extend(['-w', '20']) + if self.src_port: + cmd.extend(['-p', self.src_port]) + proc = RootHelperProcess(cmd, namespace=namespace) + return proc + + def stop_processes(self): + for proc_attr in ('_client_process', '_server_process'): + proc = getattr(self, proc_attr) + if proc: + if proc.poll() is None: + proc.kill() + proc.wait() + setattr(self, proc_attr, None) + + class NamespaceFixture(tools.SafeFixture): """Create a namespace. diff --git a/neutron/tests/fullstack/config_fixtures.py b/neutron/tests/fullstack/config_fixtures.py index ec1248d1b05..f07993cfaa2 100644 --- a/neutron/tests/fullstack/config_fixtures.py +++ b/neutron/tests/fullstack/config_fixtures.py @@ -20,7 +20,7 @@ import six from neutron.common import constants from neutron.tests import base from neutron.tests.common import helpers as c_helpers -from neutron.tests.functional.agent.linux import helpers +from neutron.tests.common import net_helpers from neutron.tests import tools @@ -140,7 +140,8 @@ class NeutronConfigFixture(ConfigFixture): This might fail if some other process occupies this port after this function finished but before the neutron-server process started. """ - return str(helpers.get_free_namespace_port(constants.PROTO_NAME_TCP)) + return str(net_helpers.get_free_namespace_port( + constants.PROTO_NAME_TCP)) def _generate_api_paste(self): return c_helpers.find_sample_file('api-paste.ini') diff --git a/neutron/tests/functional/agent/linux/helpers.py b/neutron/tests/functional/agent/linux/helpers.py index 92ac9c92bbd..f7dc76099e1 100644 --- a/neutron/tests/functional/agent/linux/helpers.py +++ b/neutron/tests/functional/agent/linux/helpers.py @@ -12,29 +12,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -import functools import os -import random -import re -import select -import shlex -import subprocess -from neutron.agent.common import config -from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils -from neutron.common import constants from neutron.tests import tools -CHILD_PROCESS_TIMEOUT = os.environ.get('OS_TEST_CHILD_PROCESS_TIMEOUT', 20) -CHILD_PROCESS_SLEEP = os.environ.get('OS_TEST_CHILD_PROCESS_SLEEP', 0.5) -READ_TIMEOUT = os.environ.get('OS_TEST_READ_TIMEOUT', 5) - -SS_SOURCE_PORT_PATTERN = re.compile( - r'^.*\s+\d+\s+.*:(?P\d+)\s+[0-9:].*') - -TRANSPORT_PROTOCOLS = (constants.PROTO_NAME_TCP, constants.PROTO_NAME_UDP) - class RecursivePermDirFixture(tools.SafeFixture): """Ensure at least perms permissions on directory and ancestors.""" @@ -54,206 +35,3 @@ class RecursivePermDirFixture(tools.SafeFixture): os.chmod(current_directory, perms | self.least_perms) previous_directory = current_directory current_directory = os.path.dirname(current_directory) - - -def get_free_namespace_port(protocol, namespace=None): - """Return an unused port from given namespace - - WARNING: This function returns a port that is free at the execution time of - this function. If this port is used later for binding then there - is a potential danger that port will be no longer free. It's up to - the programmer to handle error if port is already in use. - - :param protocol: Return free port for given protocol. Supported protocols - are 'tcp' and 'udp'. - """ - if protocol == constants.PROTO_NAME_TCP: - param = '-tna' - elif protocol == constants.PROTO_NAME_UDP: - param = '-una' - else: - raise ValueError("Unsupported procotol %s" % protocol) - - ip_wrapper = ip_lib.IPWrapper(namespace=namespace) - output = ip_wrapper.netns.execute(['ss', param]) - used_ports = _get_source_ports_from_ss_output(output) - - return get_unused_port(used_ports) - - -def _get_source_ports_from_ss_output(output): - ports = set() - for line in output.splitlines(): - match = SS_SOURCE_PORT_PATTERN.match(line) - if match: - ports.add(match.group('port')) - return ports - - -def get_unused_port(used, start=1024, end=65535): - candidates = set(range(start, end + 1)) - return random.choice(list(candidates - used)) - - -class RootHelperProcess(subprocess.Popen): - def __init__(self, cmd, *args, **kwargs): - for arg in ('stdin', 'stdout', 'stderr'): - kwargs.setdefault(arg, subprocess.PIPE) - self.namespace = kwargs.pop('namespace', None) - self.cmd = cmd - if self.namespace is not None: - cmd = ['ip', 'netns', 'exec', self.namespace] + cmd - root_helper = config.get_root_helper(utils.cfg.CONF) - cmd = shlex.split(root_helper) + cmd - self.child_pid = None - super(RootHelperProcess, self).__init__(cmd, *args, **kwargs) - self._wait_for_child_process() - - def kill(self): - pid = self.child_pid or str(self.pid) - utils.execute(['kill', '-9', pid], run_as_root=True) - - def read_stdout(self, timeout=None): - return self._read_stream(self.stdout, timeout) - - @staticmethod - def _read_stream(stream, timeout): - if timeout: - poller = select.poll() - poller.register(stream.fileno()) - poll_predicate = functools.partial(poller.poll, 1) - utils.wait_until_true(poll_predicate, timeout, 0.1, - RuntimeError( - 'No output in %.2f seconds' % timeout)) - return stream.readline() - - def writeline(self, data): - self.stdin.write(data + os.linesep) - self.stdin.flush() - - def _wait_for_child_process(self, timeout=CHILD_PROCESS_TIMEOUT, - sleep=CHILD_PROCESS_SLEEP): - def child_is_running(): - child_pid = utils.get_root_helper_child_pid( - self.pid, run_as_root=True) - if utils.pid_invoked_with_cmdline(child_pid, self.cmd): - return True - - utils.wait_until_true( - child_is_running, - timeout, - exception=RuntimeError("Process %s hasn't been spawned " - "in %d seconds" % (self.cmd, timeout))) - self.child_pid = utils.get_root_helper_child_pid( - self.pid, run_as_root=True) - - -class NetcatTester(object): - TESTING_STRING = 'foo' - TCP = constants.PROTO_NAME_TCP - UDP = constants.PROTO_NAME_UDP - - def __init__(self, client_namespace, server_namespace, address, - dst_port, protocol, server_address='0.0.0.0', src_port=None): - """ - Tool for testing connectivity on transport layer using netcat - executable. - - The processes are spawned lazily. - - :param client_namespace: Namespace in which netcat process that - connects to other netcat will be spawned - :param server_namespace: Namespace in which listening netcat process - will be spawned - :param address: Server address from client point of view - :param dst_port: Port on which netcat listens - :param protocol: Transport protocol, either 'tcp' or 'udp' - :param server_address: Address in server namespace on which netcat - should listen - :param src_port: Source port of netcat process spawned in client - namespace - packet will have src_port in TCP/UDP - header with this value - - """ - self.client_namespace = client_namespace - self.server_namespace = server_namespace - self._client_process = None - self._server_process = None - self.address = address - self.server_address = server_address - self.dst_port = str(dst_port) - self.src_port = str(src_port) if src_port else None - if protocol not in TRANSPORT_PROTOCOLS: - raise ValueError("Unsupported protocol %s" % protocol) - self.protocol = protocol - - @property - def client_process(self): - if not self._client_process: - self.establish_connection() - return self._client_process - - @property - def server_process(self): - if not self._server_process: - self._spawn_server_process() - return self._server_process - - def _spawn_server_process(self): - self._server_process = self._spawn_nc_in_namespace( - self.server_namespace, - address=self.server_address, - listen=True) - - def establish_connection(self): - if self._client_process: - raise RuntimeError('%(proto)s connection to $(ip_addr)s is already' - ' established' % - {'proto': self.protocol, - 'ip_addr': self.address}) - - if not self._server_process: - self._spawn_server_process() - self._client_process = self._spawn_nc_in_namespace( - self.client_namespace, - address=self.address) - if self.protocol == self.UDP: - # Create an entry in conntrack table for UDP packets - self.client_process.writeline(self.TESTING_STRING) - - def test_connectivity(self, respawn=False): - stop_required = (respawn and self._client_process and - self._client_process.poll() is not None) - if stop_required: - self.stop_processes() - - self.client_process.writeline(self.TESTING_STRING) - message = self.server_process.read_stdout(READ_TIMEOUT).strip() - self.server_process.writeline(message) - message = self.client_process.read_stdout(READ_TIMEOUT).strip() - - return message == self.TESTING_STRING - - def _spawn_nc_in_namespace(self, namespace, address, listen=False): - cmd = ['nc', address, self.dst_port] - if self.protocol == self.UDP: - cmd.append('-u') - if listen: - cmd.append('-l') - if self.protocol == self.TCP: - cmd.append('-k') - else: - cmd.extend(['-w', '20']) - if self.src_port: - cmd.extend(['-p', self.src_port]) - proc = RootHelperProcess(cmd, namespace=namespace) - return proc - - def stop_processes(self): - for proc_attr in ('_client_process', '_server_process'): - proc = getattr(self, proc_attr) - if proc: - if proc.poll() is None: - proc.kill() - proc.wait() - setattr(self, proc_attr, None) diff --git a/neutron/tests/functional/agent/linux/test_helpers.py b/neutron/tests/functional/agent/linux/test_helpers.py deleted file mode 100644 index a027245d4c0..00000000000 --- a/neutron/tests/functional/agent/linux/test_helpers.py +++ /dev/null @@ -1,34 +0,0 @@ -# Copyright 2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.tests.functional.agent.linux import helpers -from neutron.tests.functional import base - - -class TestRootHelperProcess(base.BaseSudoTestCase): - - def test_process_read_write(self): - proc = helpers.RootHelperProcess(['tee']) - proc.writeline('foo') - output = proc.read_stdout(helpers.READ_TIMEOUT) - self.assertEqual('foo\n', output) - - def test_process_kill(self): - with self.assert_max_execution_time(100): - proc = helpers.RootHelperProcess(['tee']) - proc.kill() - proc.wait() - # sudo returns 137 and - # rootwrap returns 247 (bug 1364822) - self.assertIn(proc.returncode, [137, 247]) diff --git a/neutron/tests/functional/agent/linux/test_iptables.py b/neutron/tests/functional/agent/linux/test_iptables.py index 3d78459aee0..2130ec8ccd4 100644 --- a/neutron/tests/functional/agent/linux/test_iptables.py +++ b/neutron/tests/functional/agent/linux/test_iptables.py @@ -24,7 +24,6 @@ from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base as linux_base from neutron.tests.functional.agent.linux.bin import ipt_binname -from neutron.tests.functional.agent.linux import helpers from neutron.tests.functional import base as functional_base @@ -44,8 +43,8 @@ class IptablesManagerTestCase(functional_base.BaseSudoTestCase): self.client_fw, self.server_fw = self.create_firewalls() # The port is used in isolated namespace that precludes possibility of # port conflicts - self.port = helpers.get_free_namespace_port(constants.PROTO_NAME_TCP, - self.server.namespace) + self.port = net_helpers.get_free_namespace_port( + constants.PROTO_NAME_TCP, self.server.namespace) def create_firewalls(self): client_iptables = iptables_manager.IptablesManager( @@ -80,7 +79,7 @@ class IptablesManagerTestCase(functional_base.BaseSudoTestCase): return chain, rule def _test_with_nc(self, fw_manager, direction, port, protocol): - netcat = helpers.NetcatTester( + netcat = net_helpers.NetcatTester( self.client.namespace, self.server.namespace, self.server.ip, self.port, protocol) self.addCleanup(netcat.stop_processes) @@ -121,35 +120,35 @@ class IptablesManagerTestCase(functional_base.BaseSudoTestCase): def test_tcp_input_port(self): self._test_with_nc(self.server_fw, 'ingress', self.port, - protocol=helpers.NetcatTester.TCP) + protocol=net_helpers.NetcatTester.TCP) def test_tcp_output_port(self): self._test_with_nc(self.client_fw, 'egress', self.port, - protocol=helpers.NetcatTester.TCP) + protocol=net_helpers.NetcatTester.TCP) def test_tcp_input(self): self._test_with_nc(self.server_fw, 'ingress', port=None, - protocol=helpers.NetcatTester.TCP) + protocol=net_helpers.NetcatTester.TCP) def test_tcp_output(self): self._test_with_nc(self.client_fw, 'egress', port=None, - protocol=helpers.NetcatTester.TCP) + protocol=net_helpers.NetcatTester.TCP) def test_udp_input_port(self): self._test_with_nc(self.server_fw, 'ingress', self.port, - protocol=helpers.NetcatTester.UDP) + protocol=net_helpers.NetcatTester.UDP) def test_udp_output_port(self): self._test_with_nc(self.client_fw, 'egress', self.port, - protocol=helpers.NetcatTester.UDP) + protocol=net_helpers.NetcatTester.UDP) def test_udp_input(self): self._test_with_nc(self.server_fw, 'ingress', port=None, - protocol=helpers.NetcatTester.UDP) + protocol=net_helpers.NetcatTester.UDP) def test_udp_output(self): self._test_with_nc(self.client_fw, 'egress', port=None, - protocol=helpers.NetcatTester.UDP) + protocol=net_helpers.NetcatTester.UDP) class IptablesManagerNonRootTestCase(base.BaseTestCase): diff --git a/neutron/tests/functional/agent/test_l3_agent.py b/neutron/tests/functional/agent/test_l3_agent.py index b35fb074a22..00412968325 100644 --- a/neutron/tests/functional/agent/test_l3_agent.py +++ b/neutron/tests/functional/agent/test_l3_agent.py @@ -400,8 +400,8 @@ class L3AgentTestCase(L3AgentTestFramework): router_info = self.generate_router_info(enable_ha=False) router = self.manage_router(self.agent, router_info) - port = helpers.get_free_namespace_port(l3_constants.PROTO_NAME_TCP, - router.ns_name) + port = net_helpers.get_free_namespace_port(l3_constants.PROTO_NAME_TCP, + router.ns_name) client_address = '19.4.4.3' server_address = '35.4.0.4' @@ -413,9 +413,9 @@ class L3AgentTestCase(L3AgentTestFramework): router.process(self.agent) router_ns = ip_lib.IPWrapper(namespace=router.ns_name) - netcat = helpers.NetcatTester(router.ns_name, router.ns_name, - client_address, port, - protocol=helpers.NetcatTester.TCP) + netcat = net_helpers.NetcatTester( + router.ns_name, router.ns_name, client_address, port, + protocol=net_helpers.NetcatTester.TCP) self.addCleanup(netcat.stop_processes) def assert_num_of_conntrack_rules(n): @@ -705,13 +705,13 @@ class L3AgentTestCase(L3AgentTestFramework): self._add_fip(router, dst_fip, fixed_address=dst_machine.ip) router.process(self.agent) - protocol_port = helpers.get_free_namespace_port( + protocol_port = net_helpers.get_free_namespace_port( l3_constants.PROTO_NAME_TCP, dst_machine.namespace) # client sends to fip - netcat = helpers.NetcatTester( + netcat = net_helpers.NetcatTester( src_machine.namespace, dst_machine.namespace, dst_fip, protocol_port, - protocol=helpers.NetcatTester.TCP) + protocol=net_helpers.NetcatTester.TCP) self.addCleanup(netcat.stop_processes) self.assertTrue(netcat.test_connectivity()) From b9656509c178041f729cbaa6a1ca974f4b3c6f5d Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Thu, 18 Jun 2015 16:00:56 +0000 Subject: [PATCH 09/54] RootHelperProcess: kill can consume signal number The kill() method now accepts a signal parameter. Change-Id: I2eb756a73565d93c979e62eaab358a3a519aa8dd --- neutron/tests/common/net_helpers.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py index d0e03cb5feb..170d1b3a9b0 100644 --- a/neutron/tests/common/net_helpers.py +++ b/neutron/tests/common/net_helpers.py @@ -20,6 +20,7 @@ import random import re import select import shlex +import signal import subprocess import netaddr @@ -177,9 +178,9 @@ class RootHelperProcess(subprocess.Popen): super(RootHelperProcess, self).__init__(cmd, *args, **kwargs) self._wait_for_child_process() - def kill(self): + def kill(self, sig=signal.SIGKILL): pid = self.child_pid or str(self.pid) - utils.execute(['kill', '-9', pid], run_as_root=True) + utils.execute(['kill', '-%d' % sig, pid], run_as_root=True) def read_stdout(self, timeout=None): return self._read_stream(self.stdout, timeout) From 67658607cf69ad2274d8f32680042ca210c7db86 Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Fri, 26 Jun 2015 17:17:14 -0400 Subject: [PATCH 10/54] Revert "Fix 'router_gateway' port status can't be updated" This patch breaks multinode fullstack tests and in my opinion is generally speaking wrong. I've added a comment to explain in the patch that's being reverted. This reverts commit with change ID: If428eadadfd36a9b19ea75920120e48ac49659f2 Change-Id: I73b7825ccc26847ef03d60d6154d544a9145f7e5 --- .../openvswitch/agent/ovs_neutron_agent.py | 2 +- .../agent/test_ovs_neutron_agent.py | 26 ------------------- 2 files changed, 1 insertion(+), 27 deletions(-) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index ca52b216257..054c1fb6b6d 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -858,7 +858,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, br_names = [] for bridge in ovs_bridges: bridge_id = ovs.get_bridge_external_bridge_id(bridge) - if bridge_id and bridge_id != bridge: + if bridge_id != bridge: br_names.append(bridge) ovs_bridges.difference_update(br_names) ancillary_bridges = [] diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py index b3ab4fa3efb..99621826af9 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py @@ -1238,32 +1238,6 @@ class AncillaryBridgesTest(object): actual = self.mock_scan_ancillary_ports(vif_port_set, registered_ports) self.assertEqual(expected, actual) - def _test_ancillary_bridges_external(self, external_bridge_id=None): - bridges = ['br-int', 'br-tun', 'br-ex'] - with mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'setup_integration_br'),\ - mock.patch('neutron.agent.linux.utils.get_interface_mac', - return_value='00:00:00:00:00:01'),\ - mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges', - return_value=bridges),\ - mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' - 'get_bridge_external_bridge_id', - return_value=external_bridge_id),\ - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'get_vif_ports', return_value=[]): - self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), - **self.kwargs) - self.agent.enable_tunneling = True - ancillary_bridges = self.agent.setup_ancillary_bridges( - 'br-int', 'br-tun') - self.assertEqual(1, len(ancillary_bridges)) - - def test_ancillary_bridges_external_bridge_id(self): - self._test_ancillary_bridges_external('br-ex') - - def test_ancillary_bridges_external_bridge_id_none(self): - self._test_ancillary_bridges_external() - class AncillaryBridgesTestOFCtl(AncillaryBridgesTest, ovs_test_base.OVSOFCtlTestBase): From e50e1a236983e0a59b9667bc546c92555c3d0e34 Mon Sep 17 00:00:00 2001 From: Eugene Nikanorov Date: Tue, 5 May 2015 18:18:28 +0400 Subject: [PATCH 11/54] Add logging of agent heartbeats When troubleshooting problems with cluster it would be very convenient to have information about agent heartbeats logged with some searchable identifier which could create 1-to-1 mapping between events in agent's logs and server's logs. Currently agent's heartbeats are not logged at all on server side. Since on a large cluster that could create too much logging (even for troubleshooting cases), it might make sense to make this configurable both on neutron-server side and on agent-side. DocImpact Change-Id: I0a127ef274a84bba5de47395d47b62f48bd4be16 Closes-Bug: #1452582 --- etc/dhcp_agent.ini | 4 +++ etc/l3_agent.ini | 4 +++ etc/metadata_agent.ini | 4 +++ etc/neutron/plugins/ml2/openvswitch_agent.ini | 3 +++ neutron/agent/common/config.py | 2 ++ neutron/agent/dhcp/agent.py | 3 ++- neutron/agent/l3/agent.py | 3 ++- neutron/agent/metadata/agent.py | 1 + neutron/agent/rpc.py | 6 +++++ neutron/db/agents_db.py | 26 ++++++++++++++----- .../openvswitch/agent/ovs_neutron_agent.py | 4 ++- neutron/tests/unit/db/test_agents_db.py | 12 +++++++++ 12 files changed, 62 insertions(+), 10 deletions(-) diff --git a/etc/dhcp_agent.ini b/etc/dhcp_agent.ini index 0b3721b959e..0f56260f4c2 100644 --- a/etc/dhcp_agent.ini +++ b/etc/dhcp_agent.ini @@ -90,3 +90,7 @@ # Timeout for ovs-vsctl commands. # If the timeout expires, ovs commands will fail with ALARMCLOCK error. # ovs_vsctl_timeout = 10 + +[AGENT] +# Log agent heartbeats from this DHCP agent +# log_agent_heartbeats = False diff --git a/etc/l3_agent.ini b/etc/l3_agent.ini index b49797d02fa..310b6b59e02 100644 --- a/etc/l3_agent.ini +++ b/etc/l3_agent.ini @@ -122,3 +122,7 @@ # The advertisement interval in seconds # ha_vrrp_advert_int = 2 + +[AGENT] +# Log agent heartbeats from this L3 agent +# log_agent_heartbeats = False diff --git a/etc/metadata_agent.ini b/etc/metadata_agent.ini index 4a0331ee125..ca31c7fe976 100644 --- a/etc/metadata_agent.ini +++ b/etc/metadata_agent.ini @@ -66,3 +66,7 @@ admin_password = %SERVICE_PASSWORD% # Otherwise default_ttl specifies time in seconds a cache entry is valid for. # No cache is used in case no value is passed. # cache_url = memory://?default_ttl=5 + +[AGENT] +# Log agent heartbeats from this Metadata agent +# log_agent_heartbeats = False diff --git a/etc/neutron/plugins/ml2/openvswitch_agent.ini b/etc/neutron/plugins/ml2/openvswitch_agent.ini index 85586c5969b..5dd11a8ce88 100644 --- a/etc/neutron/plugins/ml2/openvswitch_agent.ini +++ b/etc/neutron/plugins/ml2/openvswitch_agent.ini @@ -58,6 +58,9 @@ # of_interface = ovs-ofctl [agent] +# Log agent heartbeats from this OVS agent +# log_agent_heartbeats = False + # Agent's polling interval in seconds # polling_interval = 2 diff --git a/neutron/agent/common/config.py b/neutron/agent/common/config.py index 7e63ea38789..b563c969ed5 100644 --- a/neutron/agent/common/config.py +++ b/neutron/agent/common/config.py @@ -44,6 +44,8 @@ AGENT_STATE_OPTS = [ help=_('Seconds between nodes reporting state to server; ' 'should be less than agent_down_time, best if it ' 'is half or less than agent_down_time.')), + cfg.BoolOpt('log_agent_heartbeats', default=False, + help=_('Log agent heartbeats')), ] INTERFACE_DRIVER_OPTS = [ diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index 6b5ac5ac715..2ca6da5405d 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -548,7 +548,8 @@ class DhcpAgentWithStateReport(DhcpAgent): 'configurations': { 'dhcp_driver': cfg.CONF.dhcp_driver, 'use_namespaces': cfg.CONF.use_namespaces, - 'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration}, + 'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration, + 'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats}, 'start_flag': True, 'agent_type': constants.AGENT_TYPE_DHCP} report_interval = cfg.CONF.AGENT.report_interval diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index a1aec148843..03a04e7b9d2 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -595,7 +595,8 @@ class L3NATAgentWithStateReport(L3NATAgent): 'external_network_bridge': self.conf.external_network_bridge, 'gateway_external_network_id': self.conf.gateway_external_network_id, - 'interface_driver': self.conf.interface_driver}, + 'interface_driver': self.conf.interface_driver, + 'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats}, 'start_flag': True, 'agent_type': l3_constants.AGENT_TYPE_L3} report_interval = self.conf.AGENT.report_interval diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py index 769d8039bc0..c26626c9a81 100644 --- a/neutron/agent/metadata/agent.py +++ b/neutron/agent/metadata/agent.py @@ -289,6 +289,7 @@ class UnixDomainMetadataProxy(object): 'metadata_proxy_socket': cfg.CONF.metadata_proxy_socket, 'nova_metadata_ip': cfg.CONF.nova_metadata_ip, 'nova_metadata_port': cfg.CONF.nova_metadata_port, + 'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats, }, 'start_flag': True, 'agent_type': n_const.AGENT_TYPE_METADATA} diff --git a/neutron/agent/rpc.py b/neutron/agent/rpc.py index f71d36032e8..11bf79784c5 100644 --- a/neutron/agent/rpc.py +++ b/neutron/agent/rpc.py @@ -18,6 +18,7 @@ import itertools from oslo_log import log as logging import oslo_messaging from oslo_utils import timeutils +from oslo_utils import uuidutils from neutron.common import constants from neutron.common import rpc as n_rpc @@ -72,6 +73,11 @@ class PluginReportStateAPI(object): def report_state(self, context, agent_state, use_call=False): cctxt = self.client.prepare() + # add unique identifier to a report + # that can be logged on server side. + # This create visible correspondence between events on + # the agent and on the server + agent_state['uuid'] = uuidutils.generate_uuid() kwargs = { 'agent_state': {'agent_state': agent_state}, 'time': timeutils.strtime(), diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index 52dccf5c411..702f2e497d1 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -29,7 +29,7 @@ from neutron.common import constants from neutron.db import model_base from neutron.db import models_v2 from neutron.extensions import agent as ext_agent -from neutron.i18n import _LE, _LW +from neutron.i18n import _LE, _LI, _LW from neutron import manager LOG = logging.getLogger(__name__) @@ -203,22 +203,33 @@ class AgentDbMixin(ext_agent.AgentPluginBase): agent = self._get_agent(context, id) return self._make_agent_dict(agent, fields) - def _create_or_update_agent(self, context, agent): + def _log_heartbeat(self, state, agent_db, agent_conf): + if agent_conf.get('log_agent_heartbeats'): + delta = timeutils.utcnow() - agent_db.heartbeat_timestamp + LOG.info(_LI("Heartbeat received from %(type)s agent on " + "host %(host)s, uuid %(uuid)s after %(delta)s"), + {'type': agent_db.agent_type, + 'host': agent_db.host, + 'uuid': state.get('uuid'), + 'delta': delta}) + + def _create_or_update_agent(self, context, agent_state): with context.session.begin(subtransactions=True): res_keys = ['agent_type', 'binary', 'host', 'topic'] - res = dict((k, agent[k]) for k in res_keys) + res = dict((k, agent_state[k]) for k in res_keys) - configurations_dict = agent.get('configurations', {}) + configurations_dict = agent_state.get('configurations', {}) res['configurations'] = jsonutils.dumps(configurations_dict) - res['load'] = self._get_agent_load(agent) + res['load'] = self._get_agent_load(agent_state) current_time = timeutils.utcnow() try: agent_db = self._get_agent_by_type_and_host( - context, agent['agent_type'], agent['host']) + context, agent_state['agent_type'], agent_state['host']) res['heartbeat_timestamp'] = current_time - if agent.get('start_flag'): + if agent_state.get('start_flag'): res['started_at'] = current_time greenthread.sleep(0) + self._log_heartbeat(agent_state, agent_db, configurations_dict) agent_db.update(res) except ext_agent.AgentNotFoundByTypeHost: greenthread.sleep(0) @@ -229,6 +240,7 @@ class AgentDbMixin(ext_agent.AgentPluginBase): agent_db = Agent(**res) greenthread.sleep(0) context.session.add(agent_db) + self._log_heartbeat(agent_state, agent_db, configurations_dict) greenthread.sleep(0) def create_or_update_agent(self, context, agent): diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index ca52b216257..b9ad0dd353f 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -200,7 +200,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, 'arp_responder_enabled': self.arp_responder_enabled, 'enable_distributed_routing': - self.enable_distributed_routing}, + self.enable_distributed_routing, + 'log_agent_heartbeats': + self.conf.AGENT.log_agent_heartbeats}, 'agent_type': q_const.AGENT_TYPE_OVS, 'start_flag': True} diff --git a/neutron/tests/unit/db/test_agents_db.py b/neutron/tests/unit/db/test_agents_db.py index cda8cd33b71..a4726631458 100644 --- a/neutron/tests/unit/db/test_agents_db.py +++ b/neutron/tests/unit/db/test_agents_db.py @@ -127,6 +127,18 @@ class TestAgentsDbMixin(TestAgentsDbBase): agent = agents[0] self._assert_ref_fields_are_equal(self.agent_status, agent) + def test_create_or_update_agent_logs_heartbeat(self): + status = self.agent_status.copy() + status['configurations'] = {'log_agent_heartbeats': True} + + with mock.patch.object(agents_db.LOG, 'info') as info: + self.plugin.create_or_update_agent(self.context, status) + self.assertTrue(info.called) + status['configurations'] = {'log_agent_heartbeats': False} + info.reset_mock() + self.plugin.create_or_update_agent(self.context, status) + self.assertFalse(info.called) + def test_create_or_update_agent_concurrent_insert(self): # NOTE(rpodolyaka): emulate violation of the unique constraint caused # by a concurrent insert. Ensure we make another From 604101ec58d8dd6e6af4aa61c0b2f0d382f89931 Mon Sep 17 00:00:00 2001 From: Meenakshi Kaushik Date: Sun, 24 May 2015 23:30:17 -0700 Subject: [PATCH 12/54] Add documentation for Linux Bridge (previously missing) Change-Id: I092b609f43b37ed85d08bc80d1d048b945abe222 Closes-Bug: #1455979 --- doc/source/devref/linuxbridge_agent.rst | 27 ++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/doc/source/devref/linuxbridge_agent.rst b/doc/source/devref/linuxbridge_agent.rst index 2c7b81d4f98..8dbe1578833 100644 --- a/doc/source/devref/linuxbridge_agent.rst +++ b/doc/source/devref/linuxbridge_agent.rst @@ -1,2 +1,27 @@ +=============================== L2 Networking with Linux Bridge -------------------------------- +=============================== + +This Agent uses the `Linux Bridge +`_ to +provide L2 connectivity for VM instances running on the compute node to the +public network. A graphical illustration of the deployment can be found in +`OpenStack Admin Guide Linux Bridge +`_ + +In most common deployments, there is a compute and a network node. On both the +compute and the network node, the Linux Bridge Agent will manage virtual +switches, connectivity among them, and interaction via virtual ports with other +network components such as namespaces and underlying interfaces. Additionally, +on the compute node, the Linux Bridge Agent will manage security groups. + +Three use cases and their packet flow are documented as follows: + +1. `Legacy implementation with Linux Bridge + `_ + +2. `High Availability using L3HA with Linux Bridge + `_ + +3. `Provider networks with Linux Bridge + `_ From 6d35f5fa91faf24694cf22bf9290f4743175b051 Mon Sep 17 00:00:00 2001 From: Tomoaki Sato Date: Mon, 29 Jun 2015 10:02:20 +0900 Subject: [PATCH 13/54] Fix subnet updating failure on valid allocation pools Currently subnet updating with both allocation-pool and gateway_ip options is failing because of wrong parameter check. The check always checks gateway_ip against allocation pools in db, even when the allocation_pool parameter is given.The fix checks if given parameter of gateway_ip option doesn't conflict with given parameters of allocation-pool. Change-Id: Ia568aa1645b3160ab90a6010efd9a2b9b0d31ac8 Closes-Bug: #1469573 --- neutron/db/db_base_plugin_v2.py | 9 +++++++-- neutron/tests/unit/db/test_db_base_plugin_v2.py | 13 ++++++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 9ca165e5b01..7a953983ac4 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -586,8 +586,13 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, self._validate_subnet(context, s, cur_subnet=db_subnet) if s.get('gateway_ip') is not None: - allocation_pools = [{'start': p['first_ip'], 'end': p['last_ip']} - for p in db_subnet.allocation_pools] + if s.get('allocation_pools') is not None: + allocation_pools = [{'start': p['start'], 'end': p['end']} + for p in s['allocation_pools']] + else: + allocation_pools = [{'start': p['first_ip'], + 'end': p['last_ip']} + for p in db_subnet.allocation_pools] self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools) with context.session.begin(subtransactions=True): diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index fbc20d27c05..241c6e90ad9 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -4070,7 +4070,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) - def test_update_subnet_allocation_pools(self): + def _test_update_subnet_allocation_pools(self, with_gateway_ip=False): """Test that we can successfully update with sane params. This will create a subnet with specified allocation_pools @@ -4086,6 +4086,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): data = {'subnet': {'allocation_pools': [ {'start': '192.168.0.10', 'end': '192.168.0.20'}, {'start': '192.168.0.30', 'end': '192.168.0.40'}]}} + if with_gateway_ip: + data['subnet']['gateway_ip'] = '192.168.0.9' req = self.new_update_request('subnets', data, subnet['subnet']['id']) #check res code but then do GET on subnet for verification @@ -4099,6 +4101,15 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): res['subnet']['allocation_pools'][1].values() for pool_val in ['10', '20', '30', '40']: self.assertTrue('192.168.0.%s' % (pool_val) in res_vals) + if with_gateway_ip: + self.assertEqual((res['subnet']['gateway_ip']), + '192.168.0.9') + + def test_update_subnet_allocation_pools(self): + self._test_update_subnet_allocation_pools() + + def test_update_subnet_allocation_pools_and_gateway_ip(self): + self._test_update_subnet_allocation_pools(with_gateway_ip=True) #updating alloc pool to something outside subnet.cidr def test_update_subnet_allocation_pools_invalid_pool_for_cidr(self): From b21a88603e369a113c8b73c3aebc05fedf8da9d3 Mon Sep 17 00:00:00 2001 From: Eugene Nikanorov Date: Mon, 29 Jun 2015 05:45:24 +0400 Subject: [PATCH 14/54] Don't access mock's attribute directly especially when it's not needed Change-Id: I0df2f7110301c096762396fb23e49a081d051f3b --- .../ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py | 1 - 1 file changed, 1 deletion(-) diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py index b3ab4fa3efb..34fa8bbcdfd 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py @@ -668,7 +668,6 @@ class TestOvsNeutronAgent(object): self.agent.local_vlan_map["netuid12345"] = lvm self.agent.port_unbound("vif1", "netuid12345") self.assertTrue(reclvl_fn.called) - reclvl_fn.called = False lvm.vif_ports = {} self.agent.port_unbound("vif1", "netuid12345") From 6e693fc91dd79cfbf181e3b015a1816d985ad02c Mon Sep 17 00:00:00 2001 From: Elena Ezhova Date: Thu, 18 Jun 2015 10:42:57 +0300 Subject: [PATCH 15/54] Switch to oslo.service oslo.service has graduated, so neutron should consume it. Closes-Bug: #1466851 Depends-On: Ie0fd63f969f954029c3c3cf31337fbe38f59331a Depends-On: I2093b37d411df9a26958fa50ff523c258bbe06ec Depends-On: I4823d344878fc97e66ddd8fdae25c13a34dede40 Change-Id: I0155b3d8b72f6d031bf6f855488f80acebfc25d4 --- neutron/agent/dhcp/agent.py | 2 +- neutron/agent/dhcp_agent.py | 4 +- neutron/agent/l3/agent.py | 4 +- neutron/agent/l3_agent.py | 4 +- neutron/agent/metadata/agent.py | 2 +- neutron/common/rpc.py | 2 +- neutron/db/agentschedulers_db.py | 2 +- neutron/manager.py | 5 +- neutron/openstack/common/eventlet_backdoor.py | 151 ------ neutron/openstack/common/loopingcall.py | 147 ----- neutron/openstack/common/periodic_task.py | 232 -------- neutron/openstack/common/service.py | 507 ------------------ neutron/openstack/common/systemd.py | 105 ---- neutron/openstack/common/threadgroup.py | 150 ------ neutron/plugins/hyperv/agent/l2_agent.py | 2 +- .../plugins/ibm/agent/sdnve_neutron_agent.py | 2 +- .../ml2/drivers/cisco/apic/apic_sync.py | 2 +- .../ml2/drivers/cisco/apic/apic_topology.py | 6 +- .../agent/linuxbridge_neutron_agent.py | 6 +- .../mech_sriov/agent/sriov_nic_agent.py | 2 +- .../openvswitch/agent/ovs_neutron_agent.py | 2 +- neutron/service.py | 9 +- .../metering/agents/metering_agent.py | 8 +- neutron/tests/unit/agent/dhcp/test_agent.py | 5 +- neutron/tests/unit/agent/l3/test_agent.py | 2 +- .../unit/agent/l3/test_dvr_local_router.py | 2 +- .../tests/unit/agent/metadata/test_agent.py | 2 +- .../unit/plugins/ibm/test_sdnve_agent.py | 2 +- .../ml2/drivers/cisco/apic/test_apic_sync.py | 2 +- .../drivers/cisco/apic/test_apic_topology.py | 2 +- .../mech_sriov/agent/test_sriov_nic_agent.py | 2 +- .../agent/test_ovs_neutron_agent.py | 5 +- .../plugins/oneconvergence/test_nvsd_agent.py | 2 +- .../metering/agents/test_metering_agent.py | 6 +- neutron/tests/unit/test_wsgi.py | 2 +- neutron/wsgi.py | 9 +- openstack-common.conf | 6 - requirements.txt | 1 + 38 files changed, 56 insertions(+), 1350 deletions(-) delete mode 100644 neutron/openstack/common/eventlet_backdoor.py delete mode 100644 neutron/openstack/common/loopingcall.py delete mode 100644 neutron/openstack/common/periodic_task.py delete mode 100644 neutron/openstack/common/service.py delete mode 100644 neutron/openstack/common/systemd.py delete mode 100644 neutron/openstack/common/threadgroup.py diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index 6b5ac5ac715..67f855bd4b3 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -21,6 +21,7 @@ import eventlet from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall from oslo_utils import importutils from neutron.agent.linux import dhcp @@ -36,7 +37,6 @@ from neutron.common import utils from neutron import context from neutron.i18n import _LE, _LI, _LW from neutron import manager -from neutron.openstack.common import loopingcall LOG = logging.getLogger(__name__) diff --git a/neutron/agent/dhcp_agent.py b/neutron/agent/dhcp_agent.py index 8b7bbae31ad..845259a2d5f 100644 --- a/neutron/agent/dhcp_agent.py +++ b/neutron/agent/dhcp_agent.py @@ -17,6 +17,7 @@ import sys from oslo_config import cfg +from oslo_service import service from neutron.agent.common import config from neutron.agent.dhcp import config as dhcp_config @@ -24,7 +25,6 @@ from neutron.agent.linux import interface from neutron.agent.metadata import config as metadata_config from neutron.common import config as common_config from neutron.common import topics -from neutron.openstack.common import service from neutron import service as neutron_service @@ -49,4 +49,4 @@ def main(): topic=topics.DHCP_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager='neutron.agent.dhcp.agent.DhcpAgentWithStateReport') - service.launch(server).wait() + service.launch(cfg.CONF, server).wait() diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index a1aec148843..71f64ccca3e 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -18,6 +18,8 @@ import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall +from oslo_service import periodic_task from oslo_utils import excutils from oslo_utils import importutils from oslo_utils import timeutils @@ -47,8 +49,6 @@ from neutron.common import topics from neutron import context as n_context from neutron.i18n import _LE, _LI, _LW from neutron import manager -from neutron.openstack.common import loopingcall -from neutron.openstack.common import periodic_task try: from neutron_fwaas.services.firewall.agents.l3reference \ diff --git a/neutron/agent/l3_agent.py b/neutron/agent/l3_agent.py index 12c152d0536..bee060181c9 100644 --- a/neutron/agent/l3_agent.py +++ b/neutron/agent/l3_agent.py @@ -17,6 +17,7 @@ import sys from oslo_config import cfg +from oslo_service import service from neutron.agent.common import config from neutron.agent.l3 import config as l3_config @@ -26,7 +27,6 @@ from neutron.agent.linux import interface from neutron.agent.metadata import config as metadata_config from neutron.common import config as common_config from neutron.common import topics -from neutron.openstack.common import service from neutron import service as neutron_service @@ -51,4 +51,4 @@ def main(manager='neutron.agent.l3.agent.L3NATAgentWithStateReport'): topic=topics.L3_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager=manager) - service.launch(server).wait() + service.launch(cfg.CONF, server).wait() diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py index 769d8039bc0..ee5b169670b 100644 --- a/neutron/agent/metadata/agent.py +++ b/neutron/agent/metadata/agent.py @@ -20,6 +20,7 @@ from neutronclient.v2_0 import client from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall import six import six.moves.urllib.parse as urlparse import webob @@ -34,7 +35,6 @@ from neutron.common import utils from neutron import context from neutron.i18n import _LE, _LW from neutron.openstack.common.cache import cache -from neutron.openstack.common import loopingcall LOG = logging.getLogger(__name__) diff --git a/neutron/common/rpc.py b/neutron/common/rpc.py index 8c4df963fbc..6fe39842b7e 100644 --- a/neutron/common/rpc.py +++ b/neutron/common/rpc.py @@ -18,10 +18,10 @@ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_messaging import serializer as om_serializer +from oslo_service import service from neutron.common import exceptions from neutron import context -from neutron.openstack.common import service LOG = logging.getLogger(__name__) diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index b9d9c11dbe5..3b682f1e325 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -19,6 +19,7 @@ import time from oslo_config import cfg from oslo_log import log as logging +from oslo_service import loopingcall from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy import orm @@ -32,7 +33,6 @@ from neutron.db import model_base from neutron.extensions import agent as ext_agent from neutron.extensions import dhcpagentscheduler from neutron.i18n import _LE, _LI, _LW -from neutron.openstack.common import loopingcall LOG = logging.getLogger(__name__) diff --git a/neutron/manager.py b/neutron/manager.py index fe46a5aea75..50beae09868 100644 --- a/neutron/manager.py +++ b/neutron/manager.py @@ -18,12 +18,12 @@ import weakref from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import periodic_task from oslo_utils import importutils import six from neutron.common import utils from neutron.i18n import _LE, _LI -from neutron.openstack.common import periodic_task from neutron.plugins.common import constants from stevedore import driver @@ -43,7 +43,8 @@ class Manager(periodic_task.PeriodicTasks): if not host: host = cfg.CONF.host self.host = host - super(Manager, self).__init__() + conf = getattr(self, "conf", cfg.CONF) + super(Manager, self).__init__(conf) def periodic_tasks(self, context, raise_on_error=False): self.run_periodic_tasks(context, raise_on_error=raise_on_error) diff --git a/neutron/openstack/common/eventlet_backdoor.py b/neutron/openstack/common/eventlet_backdoor.py deleted file mode 100644 index 113500339ed..00000000000 --- a/neutron/openstack/common/eventlet_backdoor.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import copy -import errno -import gc -import logging -import os -import pprint -import socket -import sys -import traceback - -import eventlet.backdoor -import greenlet -from oslo_config import cfg - -from neutron.openstack.common._i18n import _LI - -help_for_backdoor_port = ( - "Acceptable values are 0, , and :, where 0 results " - "in listening on a random tcp port number; results in listening " - "on the specified port number (and not enabling backdoor if that port " - "is in use); and : results in listening on the smallest " - "unused port number within the specified range of port numbers. The " - "chosen port is displayed in the service's log file.") -eventlet_backdoor_opts = [ - cfg.StrOpt('backdoor_port', - help="Enable eventlet backdoor. %s" % help_for_backdoor_port) -] - -CONF = cfg.CONF -CONF.register_opts(eventlet_backdoor_opts) -LOG = logging.getLogger(__name__) - - -def list_opts(): - """Entry point for oslo-config-generator. - """ - return [(None, copy.deepcopy(eventlet_backdoor_opts))] - - -class EventletBackdoorConfigValueError(Exception): - def __init__(self, port_range, help_msg, ex): - msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' - '%(help)s' % - {'range': port_range, 'ex': ex, 'help': help_msg}) - super(EventletBackdoorConfigValueError, self).__init__(msg) - self.port_range = port_range - - -def _dont_use_this(): - print("Don't use this, just disconnect instead") - - -def _find_objects(t): - return [o for o in gc.get_objects() if isinstance(o, t)] - - -def _print_greenthreads(): - for i, gt in enumerate(_find_objects(greenlet.greenlet)): - print(i, gt) - traceback.print_stack(gt.gr_frame) - print() - - -def _print_nativethreads(): - for threadId, stack in sys._current_frames().items(): - print(threadId) - traceback.print_stack(stack) - print() - - -def _parse_port_range(port_range): - if ':' not in port_range: - start, end = port_range, port_range - else: - start, end = port_range.split(':', 1) - try: - start, end = int(start), int(end) - if end < start: - raise ValueError - return start, end - except ValueError as ex: - raise EventletBackdoorConfigValueError(port_range, ex, - help_for_backdoor_port) - - -def _listen(host, start_port, end_port, listen_func): - try_port = start_port - while True: - try: - return listen_func((host, try_port)) - except socket.error as exc: - if (exc.errno != errno.EADDRINUSE or - try_port >= end_port): - raise - try_port += 1 - - -def initialize_if_enabled(): - backdoor_locals = { - 'exit': _dont_use_this, # So we don't exit the entire process - 'quit': _dont_use_this, # So we don't exit the entire process - 'fo': _find_objects, - 'pgt': _print_greenthreads, - 'pnt': _print_nativethreads, - } - - if CONF.backdoor_port is None: - return None - - start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) - - # NOTE(johannes): The standard sys.displayhook will print the value of - # the last expression and set it to __builtin__._, which overwrites - # the __builtin__._ that gettext sets. Let's switch to using pprint - # since it won't interact poorly with gettext, and it's easier to - # read the output too. - def displayhook(val): - if val is not None: - pprint.pprint(val) - sys.displayhook = displayhook - - sock = _listen('localhost', start_port, end_port, eventlet.listen) - - # In the case of backdoor port being zero, a port number is assigned by - # listen(). In any case, pull the port number out here. - port = sock.getsockname()[1] - LOG.info( - _LI('Eventlet backdoor listening on %(port)s for process %(pid)d'), - {'port': port, 'pid': os.getpid()} - ) - eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, - locals=backdoor_locals) - return port diff --git a/neutron/openstack/common/loopingcall.py b/neutron/openstack/common/loopingcall.py deleted file mode 100644 index ab28ca1c8e7..00000000000 --- a/neutron/openstack/common/loopingcall.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import logging -import sys -import time - -from eventlet import event -from eventlet import greenthread - -from neutron.openstack.common._i18n import _LE, _LW - -LOG = logging.getLogger(__name__) - -# NOTE(zyluo): This lambda function was declared to avoid mocking collisions -# with time.time() called in the standard logging module -# during unittests. -_ts = lambda: time.time() - - -class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCallBase. - - The poll-function passed to LoopingCallBase can raise this exception to - break out of the loop normally. This is somewhat analogous to - StopIteration. - - An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCallBase.wait() - - """ - - def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCallBase.wait() should return.""" - self.retvalue = retvalue - - -class LoopingCallBase(object): - def __init__(self, f=None, *args, **kw): - self.args = args - self.kw = kw - self.f = f - self._running = False - self.done = None - - def stop(self): - self._running = False - - def wait(self): - return self.done.wait() - - -class FixedIntervalLoopingCall(LoopingCallBase): - """A fixed interval looping call.""" - - def start(self, interval, initial_delay=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - start = _ts() - self.f(*self.args, **self.kw) - end = _ts() - if not self._running: - break - delay = end - start - interval - if delay > 0: - LOG.warning(_LW('task %(func_name)r run outlasted ' - 'interval by %(delay).2f sec'), - {'func_name': self.f, 'delay': delay}) - greenthread.sleep(-delay if delay < 0 else 0) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_LE('in fixed duration looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn_n(_inner) - return self.done - - -class DynamicLoopingCall(LoopingCallBase): - """A looping call which sleeps until the next known event. - - The function called should return how long to sleep for before being - called again. - """ - - def start(self, initial_delay=None, periodic_interval_max=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - idle = self.f(*self.args, **self.kw) - if not self._running: - break - - if periodic_interval_max is not None: - idle = min(idle, periodic_interval_max) - LOG.debug('Dynamic looping call %(func_name)r sleeping ' - 'for %(idle).02f seconds', - {'func_name': self.f, 'idle': idle}) - greenthread.sleep(idle) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_LE('in dynamic looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn(_inner) - return self.done diff --git a/neutron/openstack/common/periodic_task.py b/neutron/openstack/common/periodic_task.py deleted file mode 100644 index 633a1467116..00000000000 --- a/neutron/openstack/common/periodic_task.py +++ /dev/null @@ -1,232 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import logging -import random -import time - -from oslo_config import cfg -import six - -from neutron.openstack.common._i18n import _, _LE, _LI - - -periodic_opts = [ - cfg.BoolOpt('run_external_periodic_tasks', - default=True, - help='Some periodic tasks can be run in a separate process. ' - 'Should we run them here?'), -] - -CONF = cfg.CONF -CONF.register_opts(periodic_opts) - -LOG = logging.getLogger(__name__) - -DEFAULT_INTERVAL = 60.0 - - -def list_opts(): - """Entry point for oslo-config-generator.""" - return [(None, copy.deepcopy(periodic_opts))] - - -class InvalidPeriodicTaskArg(Exception): - message = _("Unexpected argument for periodic task creation: %(arg)s.") - - -def periodic_task(*args, **kwargs): - """Decorator to indicate that a method is a periodic task. - - This decorator can be used in two ways: - - 1. Without arguments '@periodic_task', this will be run on the default - interval of 60 seconds. - - 2. With arguments: - @periodic_task(spacing=N [, run_immediately=[True|False]] - [, name=[None|"string"]) - this will be run on approximately every N seconds. If this number is - negative the periodic task will be disabled. If the run_immediately - argument is provided and has a value of 'True', the first run of the - task will be shortly after task scheduler starts. If - run_immediately is omitted or set to 'False', the first time the - task runs will be approximately N seconds after the task scheduler - starts. If name is not provided, __name__ of function is used. - """ - def decorator(f): - # Test for old style invocation - if 'ticks_between_runs' in kwargs: - raise InvalidPeriodicTaskArg(arg='ticks_between_runs') - - # Control if run at all - f._periodic_task = True - f._periodic_external_ok = kwargs.pop('external_process_ok', False) - if f._periodic_external_ok and not CONF.run_external_periodic_tasks: - f._periodic_enabled = False - else: - f._periodic_enabled = kwargs.pop('enabled', True) - f._periodic_name = kwargs.pop('name', f.__name__) - - # Control frequency - f._periodic_spacing = kwargs.pop('spacing', 0) - f._periodic_immediate = kwargs.pop('run_immediately', False) - if f._periodic_immediate: - f._periodic_last_run = None - else: - f._periodic_last_run = time.time() - return f - - # NOTE(sirp): The `if` is necessary to allow the decorator to be used with - # and without parenthesis. - # - # In the 'with-parenthesis' case (with kwargs present), this function needs - # to return a decorator function since the interpreter will invoke it like: - # - # periodic_task(*args, **kwargs)(f) - # - # In the 'without-parenthesis' case, the original function will be passed - # in as the first argument, like: - # - # periodic_task(f) - if kwargs: - return decorator - else: - return decorator(args[0]) - - -class _PeriodicTasksMeta(type): - def _add_periodic_task(cls, task): - """Add a periodic task to the list of periodic tasks. - - The task should already be decorated by @periodic_task. - - :return: whether task was actually enabled - """ - name = task._periodic_name - - if task._periodic_spacing < 0: - LOG.info(_LI('Skipping periodic task %(task)s because ' - 'its interval is negative'), - {'task': name}) - return False - if not task._periodic_enabled: - LOG.info(_LI('Skipping periodic task %(task)s because ' - 'it is disabled'), - {'task': name}) - return False - - # A periodic spacing of zero indicates that this task should - # be run on the default interval to avoid running too - # frequently. - if task._periodic_spacing == 0: - task._periodic_spacing = DEFAULT_INTERVAL - - cls._periodic_tasks.append((name, task)) - cls._periodic_spacing[name] = task._periodic_spacing - return True - - def __init__(cls, names, bases, dict_): - """Metaclass that allows us to collect decorated periodic tasks.""" - super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) - - # NOTE(sirp): if the attribute is not present then we must be the base - # class, so, go ahead an initialize it. If the attribute is present, - # then we're a subclass so make a copy of it so we don't step on our - # parent's toes. - try: - cls._periodic_tasks = cls._periodic_tasks[:] - except AttributeError: - cls._periodic_tasks = [] - - try: - cls._periodic_spacing = cls._periodic_spacing.copy() - except AttributeError: - cls._periodic_spacing = {} - - for value in cls.__dict__.values(): - if getattr(value, '_periodic_task', False): - cls._add_periodic_task(value) - - -def _nearest_boundary(last_run, spacing): - """Find nearest boundary which is in the past, which is a multiple of the - spacing with the last run as an offset. - - Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24, - 31, 38... - - 0% to 5% of the spacing value will be added to this value to ensure tasks - do not synchronize. This jitter is rounded to the nearest second, this - means that spacings smaller than 20 seconds will not have jitter. - """ - current_time = time.time() - if last_run is None: - return current_time - delta = current_time - last_run - offset = delta % spacing - # Add up to 5% jitter - jitter = int(spacing * (random.random() / 20)) - return current_time - offset + jitter - - -@six.add_metaclass(_PeriodicTasksMeta) -class PeriodicTasks(object): - def __init__(self): - super(PeriodicTasks, self).__init__() - self._periodic_last_run = {} - for name, task in self._periodic_tasks: - self._periodic_last_run[name] = task._periodic_last_run - - def add_periodic_task(self, task): - """Add a periodic task to the list of periodic tasks. - - The task should already be decorated by @periodic_task. - """ - if self.__class__._add_periodic_task(task): - self._periodic_last_run[task._periodic_name] = ( - task._periodic_last_run) - - def run_periodic_tasks(self, context, raise_on_error=False): - """Tasks to be run at a periodic interval.""" - idle_for = DEFAULT_INTERVAL - for task_name, task in self._periodic_tasks: - full_task_name = '.'.join([self.__class__.__name__, task_name]) - - spacing = self._periodic_spacing[task_name] - last_run = self._periodic_last_run[task_name] - - # Check if due, if not skip - idle_for = min(idle_for, spacing) - if last_run is not None: - delta = last_run + spacing - time.time() - if delta > 0: - idle_for = min(idle_for, delta) - continue - - LOG.debug("Running periodic task %(full_task_name)s", - {"full_task_name": full_task_name}) - self._periodic_last_run[task_name] = _nearest_boundary( - last_run, spacing) - - try: - task(self, context) - except Exception: - if raise_on_error: - raise - LOG.exception(_LE("Error during %(full_task_name)s"), - {"full_task_name": full_task_name}) - time.sleep(0) - - return idle_for diff --git a/neutron/openstack/common/service.py b/neutron/openstack/common/service.py deleted file mode 100644 index b757c008e22..00000000000 --- a/neutron/openstack/common/service.py +++ /dev/null @@ -1,507 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generic Node base class for all workers that run on hosts.""" - -import errno -import io -import logging -import os -import random -import signal -import sys -import time - -import eventlet -from eventlet import event -from oslo_config import cfg - -from neutron.openstack.common import eventlet_backdoor -from neutron.openstack.common._i18n import _LE, _LI, _LW -from neutron.openstack.common import systemd -from neutron.openstack.common import threadgroup - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _sighup_supported(): - return hasattr(signal, 'SIGHUP') - - -def _is_daemon(): - # The process group for a foreground process will match the - # process group of the controlling terminal. If those values do - # not match, or ioctl() fails on the stdout file handle, we assume - # the process is running in the background as a daemon. - # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics - try: - is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) - except io.UnsupportedOperation: - # Could not get the fileno for stdout, so we must be a daemon. - is_daemon = True - except OSError as err: - if err.errno == errno.ENOTTY: - # Assume we are a daemon because there is no terminal. - is_daemon = True - else: - raise - return is_daemon - - -def _is_sighup_and_daemon(signo): - if not (_sighup_supported() and signo == signal.SIGHUP): - # Avoid checking if we are a daemon, because the signal isn't - # SIGHUP. - return False - return _is_daemon() - - -def _signo_to_signame(signo): - signals = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'} - if _sighup_supported(): - signals[signal.SIGHUP] = 'SIGHUP' - return signals[signo] - - -def _set_signals_handler(handler): - signal.signal(signal.SIGTERM, handler) - signal.signal(signal.SIGINT, handler) - if _sighup_supported(): - signal.signal(signal.SIGHUP, handler) - - -class Launcher(object): - """Launch one or more services and wait for them to complete.""" - - def __init__(self): - """Initialize the service launcher. - - :returns: None - - """ - self.services = Services() - self.backdoor_port = eventlet_backdoor.initialize_if_enabled() - - def launch_service(self, service): - """Load and start the given service. - - :param service: The service you would like to start. - :returns: None - - """ - service.backdoor_port = self.backdoor_port - self.services.add(service) - - def stop(self): - """Stop all services which are currently running. - - :returns: None - - """ - self.services.stop() - - def wait(self): - """Waits until all services have been stopped, and then returns. - - :returns: None - - """ - self.services.wait() - - def restart(self): - """Reload config files and restart service. - - :returns: None - - """ - cfg.CONF.reload_config_files() - self.services.restart() - - -class SignalExit(SystemExit): - def __init__(self, signo, exccode=1): - super(SignalExit, self).__init__(exccode) - self.signo = signo - - -class ServiceLauncher(Launcher): - def _handle_signal(self, signo, frame): - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - raise SignalExit(signo) - - def handle_signal(self): - _set_signals_handler(self._handle_signal) - - def _wait_for_exit_or_signal(self, ready_callback=None): - status = None - signo = 0 - - LOG.debug('Full set of CONF:') - CONF.log_opt_values(LOG, logging.DEBUG) - - try: - if ready_callback: - ready_callback() - super(ServiceLauncher, self).wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_LI('Caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - finally: - self.stop() - - return status, signo - - def wait(self, ready_callback=None): - systemd.notify_once() - while True: - self.handle_signal() - status, signo = self._wait_for_exit_or_signal(ready_callback) - if not _is_sighup_and_daemon(signo): - return status - self.restart() - - -class ServiceWrapper(object): - def __init__(self, service, workers): - self.service = service - self.workers = workers - self.children = set() - self.forktimes = [] - - -class ProcessLauncher(object): - _signal_handlers_set = set() - - @classmethod - def _handle_class_signals(cls, *args, **kwargs): - for handler in cls._signal_handlers_set: - handler(*args, **kwargs) - - def __init__(self, wait_interval=0.01): - """Constructor. - - :param wait_interval: The interval to sleep for between checks - of child process exit. - """ - self.children = {} - self.sigcaught = None - self.running = True - self.wait_interval = wait_interval - rfd, self.writepipe = os.pipe() - self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') - self.handle_signal() - - def handle_signal(self): - self._signal_handlers_set.add(self._handle_signal) - _set_signals_handler(self._handle_class_signals) - - def _handle_signal(self, signo, frame): - self.sigcaught = signo - self.running = False - - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - - def _pipe_watcher(self): - # This will block until the write end is closed when the parent - # dies unexpectedly - self.readpipe.read(1) - - LOG.info(_LI('Parent process has died unexpectedly, exiting')) - - sys.exit(1) - - def _child_process_handle_signal(self): - # Setup child signal handlers differently - def _sighup(*args): - signal.signal(signal.SIGHUP, signal.SIG_DFL) - raise SignalExit(signal.SIGHUP) - - # Parent signals with SIGTERM when it wants us to go away. - signal.signal(signal.SIGTERM, signal.SIG_DFL) - if _sighup_supported(): - signal.signal(signal.SIGHUP, _sighup) - # Block SIGINT and let the parent send us a SIGTERM - signal.signal(signal.SIGINT, signal.SIG_IGN) - - def _child_wait_for_exit_or_signal(self, launcher): - status = 0 - signo = 0 - - # NOTE(johannes): All exceptions are caught to ensure this - # doesn't fallback into the loop spawning children. It would - # be bad for a child to spawn more children. - try: - launcher.wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_LI('Child caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - except BaseException: - LOG.exception(_LE('Unhandled exception')) - status = 2 - finally: - launcher.stop() - - return status, signo - - def _child_process(self, service): - self._child_process_handle_signal() - - # Reopen the eventlet hub to make sure we don't share an epoll - # fd with parent and/or siblings, which would be bad - eventlet.hubs.use_hub() - - # Close write to ensure only parent has it open - os.close(self.writepipe) - # Create greenthread to watch for parent to close pipe - eventlet.spawn_n(self._pipe_watcher) - - # Reseed random number generator - random.seed() - - launcher = Launcher() - launcher.launch_service(service) - return launcher - - def _start_child(self, wrap): - if len(wrap.forktimes) > wrap.workers: - # Limit ourselves to one process a second (over the period of - # number of workers * 1 second). This will allow workers to - # start up quickly but ensure we don't fork off children that - # die instantly too quickly. - if time.time() - wrap.forktimes[0] < wrap.workers: - LOG.info(_LI('Forking too fast, sleeping')) - time.sleep(1) - - wrap.forktimes.pop(0) - - wrap.forktimes.append(time.time()) - - pid = os.fork() - if pid == 0: - launcher = self._child_process(wrap.service) - while True: - self._child_process_handle_signal() - status, signo = self._child_wait_for_exit_or_signal(launcher) - if not _is_sighup_and_daemon(signo): - break - launcher.restart() - - os._exit(status) - - LOG.info(_LI('Started child %d'), pid) - - wrap.children.add(pid) - self.children[pid] = wrap - - return pid - - def launch_service(self, service, workers=1): - wrap = ServiceWrapper(service, workers) - - LOG.info(_LI('Starting %d workers'), wrap.workers) - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def _wait_child(self): - try: - # Don't block if no child processes have exited - pid, status = os.waitpid(0, os.WNOHANG) - if not pid: - return None - except OSError as exc: - if exc.errno not in (errno.EINTR, errno.ECHILD): - raise - return None - - if os.WIFSIGNALED(status): - sig = os.WTERMSIG(status) - LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), - dict(pid=pid, sig=sig)) - else: - code = os.WEXITSTATUS(status) - LOG.info(_LI('Child %(pid)s exited with status %(code)d'), - dict(pid=pid, code=code)) - - if pid not in self.children: - LOG.warning(_LW('pid %d not in child list'), pid) - return None - - wrap = self.children.pop(pid) - wrap.children.remove(pid) - return wrap - - def _respawn_children(self): - while self.running: - wrap = self._wait_child() - if not wrap: - # Yield to other threads if no children have exited - # Sleep for a short time to avoid excessive CPU usage - # (see bug #1095346) - eventlet.greenthread.sleep(self.wait_interval) - continue - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def wait(self): - """Loop waiting on children to die and respawning as necessary.""" - - systemd.notify_once() - LOG.debug('Full set of CONF:') - CONF.log_opt_values(LOG, logging.DEBUG) - - try: - while True: - self.handle_signal() - self._respawn_children() - # No signal means that stop was called. Don't clean up here. - if not self.sigcaught: - return - - signame = _signo_to_signame(self.sigcaught) - LOG.info(_LI('Caught %s, stopping children'), signame) - if not _is_sighup_and_daemon(self.sigcaught): - break - - cfg.CONF.reload_config_files() - for service in set( - [wrap.service for wrap in self.children.values()]): - service.reset() - - for pid in self.children: - os.kill(pid, signal.SIGHUP) - - self.running = True - self.sigcaught = None - except eventlet.greenlet.GreenletExit: - LOG.info(_LI("Wait called after thread killed. Cleaning up.")) - - self.stop() - - def stop(self): - """Terminate child processes and wait on each.""" - self.running = False - for pid in self.children: - try: - os.kill(pid, signal.SIGTERM) - except OSError as exc: - if exc.errno != errno.ESRCH: - raise - - # Wait for children to die - if self.children: - LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) - while self.children: - self._wait_child() - - -class Service(object): - """Service object for binaries running on hosts.""" - - def __init__(self, threads=1000): - self.tg = threadgroup.ThreadGroup(threads) - - # signal that the service is done shutting itself down: - self._done = event.Event() - - def reset(self): - # NOTE(Fengqian): docs for Event.reset() recommend against using it - self._done = event.Event() - - def start(self): - pass - - def stop(self, graceful=False): - self.tg.stop(graceful) - self.tg.wait() - # Signal that service cleanup is done: - if not self._done.ready(): - self._done.send() - - def wait(self): - self._done.wait() - - -class Services(object): - - def __init__(self): - self.services = [] - self.tg = threadgroup.ThreadGroup() - self.done = event.Event() - - def add(self, service): - self.services.append(service) - self.tg.add_thread(self.run_service, service, self.done) - - def stop(self): - # wait for graceful shutdown of services: - for service in self.services: - service.stop() - service.wait() - - # Each service has performed cleanup, now signal that the run_service - # wrapper threads can now die: - if not self.done.ready(): - self.done.send() - - # reap threads: - self.tg.stop() - - def wait(self): - self.tg.wait() - - def restart(self): - self.stop() - self.done = event.Event() - for restart_service in self.services: - restart_service.reset() - self.tg.add_thread(self.run_service, restart_service, self.done) - - @staticmethod - def run_service(service, done): - """Service start wrapper. - - :param service: service to run - :param done: event to wait on until a shutdown is triggered - :returns: None - - """ - service.start() - done.wait() - - -def launch(service, workers=1): - if workers is None or workers == 1: - launcher = ServiceLauncher() - launcher.launch_service(service) - else: - launcher = ProcessLauncher() - launcher.launch_service(service, workers=workers) - - return launcher diff --git a/neutron/openstack/common/systemd.py b/neutron/openstack/common/systemd.py deleted file mode 100644 index 36243b342ab..00000000000 --- a/neutron/openstack/common/systemd.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2012-2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper module for systemd service readiness notification. -""" - -import logging -import os -import socket -import sys - - -LOG = logging.getLogger(__name__) - - -def _abstractify(socket_name): - if socket_name.startswith('@'): - # abstract namespace socket - socket_name = '\0%s' % socket_name[1:] - return socket_name - - -def _sd_notify(unset_env, msg): - notify_socket = os.getenv('NOTIFY_SOCKET') - if notify_socket: - sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - try: - sock.connect(_abstractify(notify_socket)) - sock.sendall(msg) - if unset_env: - del os.environ['NOTIFY_SOCKET'] - except EnvironmentError: - LOG.debug("Systemd notification failed", exc_info=True) - finally: - sock.close() - - -def notify(): - """Send notification to Systemd that service is ready. - - For details see - http://www.freedesktop.org/software/systemd/man/sd_notify.html - """ - _sd_notify(False, 'READY=1') - - -def notify_once(): - """Send notification once to Systemd that service is ready. - - Systemd sets NOTIFY_SOCKET environment variable with the name of the - socket listening for notifications from services. - This method removes the NOTIFY_SOCKET environment variable to ensure - notification is sent only once. - """ - _sd_notify(True, 'READY=1') - - -def onready(notify_socket, timeout): - """Wait for systemd style notification on the socket. - - :param notify_socket: local socket address - :type notify_socket: string - :param timeout: socket timeout - :type timeout: float - :returns: 0 service ready - 1 service not ready - 2 timeout occurred - """ - sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - sock.settimeout(timeout) - sock.bind(_abstractify(notify_socket)) - try: - msg = sock.recv(512) - except socket.timeout: - return 2 - finally: - sock.close() - if 'READY=1' in msg: - return 0 - else: - return 1 - - -if __name__ == '__main__': - # simple CLI for testing - if len(sys.argv) == 1: - notify() - elif len(sys.argv) >= 2: - timeout = float(sys.argv[1]) - notify_socket = os.getenv('NOTIFY_SOCKET') - if notify_socket: - retval = onready(notify_socket, timeout) - sys.exit(retval) diff --git a/neutron/openstack/common/threadgroup.py b/neutron/openstack/common/threadgroup.py deleted file mode 100644 index a3f74cabda7..00000000000 --- a/neutron/openstack/common/threadgroup.py +++ /dev/null @@ -1,150 +0,0 @@ -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import logging -import threading - -import eventlet -from eventlet import greenpool - -from neutron.openstack.common._i18n import _LE -from neutron.openstack.common import loopingcall - - -LOG = logging.getLogger(__name__) - - -def _thread_done(gt, *args, **kwargs): - """Callback function to be passed to GreenThread.link() when we spawn() - Calls the :class:`ThreadGroup` to notify if. - - """ - kwargs['group'].thread_done(kwargs['thread']) - - -class Thread(object): - """Wrapper around a greenthread, that holds a reference to the - :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when - it has done so it can be removed from the threads list. - """ - def __init__(self, thread, group): - self.thread = thread - self.thread.link(_thread_done, group=group, thread=self) - - def stop(self): - self.thread.kill() - - def wait(self): - return self.thread.wait() - - def link(self, func, *args, **kwargs): - self.thread.link(func, *args, **kwargs) - - -class ThreadGroup(object): - """The point of the ThreadGroup class is to: - - * keep track of timers and greenthreads (making it easier to stop them - when need be). - * provide an easy API to add timers. - """ - def __init__(self, thread_pool_size=10): - self.pool = greenpool.GreenPool(thread_pool_size) - self.threads = [] - self.timers = [] - - def add_dynamic_timer(self, callback, initial_delay=None, - periodic_interval_max=None, *args, **kwargs): - timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) - timer.start(initial_delay=initial_delay, - periodic_interval_max=periodic_interval_max) - self.timers.append(timer) - - def add_timer(self, interval, callback, initial_delay=None, - *args, **kwargs): - pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) - pulse.start(interval=interval, - initial_delay=initial_delay) - self.timers.append(pulse) - - def add_thread(self, callback, *args, **kwargs): - gt = self.pool.spawn(callback, *args, **kwargs) - th = Thread(gt, self) - self.threads.append(th) - return th - - def thread_done(self, thread): - self.threads.remove(thread) - - def _stop_threads(self): - current = threading.current_thread() - - # Iterate over a copy of self.threads so thread_done doesn't - # modify the list while we're iterating - for x in self.threads[:]: - if x is current: - # don't kill the current thread. - continue - try: - x.stop() - except eventlet.greenlet.GreenletExit: - pass - except Exception: - LOG.exception(_LE('Error stopping thread.')) - - def stop_timers(self): - for x in self.timers: - try: - x.stop() - except Exception: - LOG.exception(_LE('Error stopping timer.')) - self.timers = [] - - def stop(self, graceful=False): - """stop function has the option of graceful=True/False. - - * In case of graceful=True, wait for all threads to be finished. - Never kill threads. - * In case of graceful=False, kill threads immediately. - """ - self.stop_timers() - if graceful: - # In case of graceful=True, wait for all threads to be - # finished, never kill threads - self.wait() - else: - # In case of graceful=False(Default), kill threads - # immediately - self._stop_threads() - - def wait(self): - for x in self.timers: - try: - x.wait() - except eventlet.greenlet.GreenletExit: - pass - except Exception: - LOG.exception(_LE('Error waiting on ThreadGroup.')) - current = threading.current_thread() - - # Iterate over a copy of self.threads so thread_done doesn't - # modify the list while we're iterating - for x in self.threads[:]: - if x is current: - continue - try: - x.wait() - except eventlet.greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) diff --git a/neutron/plugins/hyperv/agent/l2_agent.py b/neutron/plugins/hyperv/agent/l2_agent.py index 5818b2f00aa..5b6a8f31dec 100644 --- a/neutron/plugins/hyperv/agent/l2_agent.py +++ b/neutron/plugins/hyperv/agent/l2_agent.py @@ -20,6 +20,7 @@ from hyperv.neutron import hyperv_neutron_agent from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc @@ -28,7 +29,6 @@ from neutron.common import rpc as n_rpc from neutron.common import topics from neutron import context from neutron.i18n import _LE -from neutron.openstack.common import loopingcall LOG = logging.getLogger(__name__) CONF = cfg.CONF diff --git a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py b/neutron/plugins/ibm/agent/sdnve_neutron_agent.py index f00c9aca484..a9827c52e14 100644 --- a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py +++ b/neutron/plugins/ibm/agent/sdnve_neutron_agent.py @@ -25,6 +25,7 @@ eventlet.monkey_patch() from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall import six from neutron.agent.common import ovs_lib @@ -36,7 +37,6 @@ from neutron.common import topics from neutron.common import utils as n_utils from neutron.i18n import _LE, _LI from neutron import context -from neutron.openstack.common import loopingcall from neutron.plugins.ibm.common import constants diff --git a/neutron/plugins/ml2/drivers/cisco/apic/apic_sync.py b/neutron/plugins/ml2/drivers/cisco/apic/apic_sync.py index 08873d70219..fca4e2c1188 100644 --- a/neutron/plugins/ml2/drivers/cisco/apic/apic_sync.py +++ b/neutron/plugins/ml2/drivers/cisco/apic/apic_sync.py @@ -14,12 +14,12 @@ # under the License. from oslo_log import log +from oslo_service import loopingcall from neutron.common import constants as n_constants from neutron import context from neutron.i18n import _LW from neutron import manager -from neutron.openstack.common import loopingcall from neutron.plugins.ml2 import db as l2_db from neutron.plugins.ml2 import driver_context diff --git a/neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py b/neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py index d4901bcf84c..8a1be65a1b0 100644 --- a/neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py +++ b/neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py @@ -23,6 +23,8 @@ from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import periodic_task +from oslo_service import service as svc from neutron.agent.common import config from neutron.agent.linux import ip_lib @@ -33,8 +35,6 @@ from neutron.common import utils as neutron_utils from neutron.db import agents_db from neutron.i18n import _LE, _LI from neutron import manager -from neutron.openstack.common import periodic_task -from neutron.openstack.common import service as svc from neutron.plugins.ml2.drivers.cisco.apic import mechanism_apic as ma from neutron.plugins.ml2.drivers import type_vlan # noqa @@ -325,7 +325,7 @@ def launch(binary, manager, topic=None): server = service.Service.create( binary=binary, manager=manager, topic=topic, report_interval=report_period, periodic_interval=poll_period) - svc.launch(server).wait() + svc.launch(cfg.CONF, server).wait() def service_main(): diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index 66be308a29a..69da70c79bd 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -29,6 +29,8 @@ eventlet.monkey_patch() from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall +from oslo_service import service from six import moves from neutron.agent.linux import ip_lib @@ -42,8 +44,6 @@ from neutron.common import topics from neutron.common import utils as q_utils from neutron import context from neutron.i18n import _LE, _LI, _LW -from neutron.openstack.common import loopingcall -from neutron.openstack.common import service from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.l2pop.rpc_manager \ import l2population_rpc as l2pop_rpc @@ -1055,7 +1055,7 @@ def main(): polling_interval, quitting_rpc_timeout) LOG.info(_LI("Agent initialized successfully, now running... ")) - launcher = service.launch(agent) + launcher = service.launch(cfg.CONF, agent) launcher.wait() diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py index c98e94dc4a7..45124fd42c5 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py @@ -24,6 +24,7 @@ eventlet.monkey_patch() from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc @@ -33,7 +34,6 @@ from neutron.common import topics from neutron.common import utils as q_utils from neutron import context from neutron.i18n import _LE, _LI -from neutron.openstack.common import loopingcall from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config # noqa from neutron.plugins.ml2.drivers.mech_sriov.agent.common \ import exceptions as exc diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index ca52b216257..0c294638680 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -23,6 +23,7 @@ import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall import six from six import moves @@ -40,7 +41,6 @@ from neutron.common import topics from neutron.common import utils as q_utils from neutron import context from neutron.i18n import _LE, _LI, _LW -from neutron.openstack.common import loopingcall from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc from neutron.plugins.ml2.drivers.openvswitch.agent.common \ diff --git a/neutron/service.py b/neutron/service.py index 76b0fd90d2e..ee8432dea50 100644 --- a/neutron/service.py +++ b/neutron/service.py @@ -22,6 +22,8 @@ from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_messaging import server as rpc_server +from oslo_service import loopingcall +from oslo_service import service as common_service from oslo_utils import excutils from oslo_utils import importutils @@ -31,8 +33,6 @@ from neutron import context from neutron.db import api as session from neutron.i18n import _LE, _LI from neutron import manager -from neutron.openstack.common import loopingcall -from neutron.openstack.common import service as common_service from neutron import wsgi @@ -111,7 +111,7 @@ def serve_wsgi(cls): return service -class RpcWorker(object): +class RpcWorker(common_service.ServiceBase): """Wraps a worker to be handled by ProcessLauncher""" def __init__(self, plugin): self._plugin = plugin @@ -161,7 +161,8 @@ def serve_rpc(): # be shared DB connections in child processes which may cause # DB errors. session.dispose() - launcher = common_service.ProcessLauncher(wait_interval=1.0) + launcher = common_service.ProcessLauncher(cfg.CONF, + wait_interval=1.0) launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers) return launcher except Exception: diff --git a/neutron/services/metering/agents/metering_agent.py b/neutron/services/metering/agents/metering_agent.py index f30569e136e..7531f41e959 100644 --- a/neutron/services/metering/agents/metering_agent.py +++ b/neutron/services/metering/agents/metering_agent.py @@ -18,6 +18,9 @@ import time from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +from oslo_service import loopingcall +from oslo_service import periodic_task +from oslo_service import service from oslo_utils import importutils from neutron.agent.common import config @@ -30,9 +33,6 @@ from neutron.common import utils from neutron import context from neutron.i18n import _LE, _LI, _LW from neutron import manager -from neutron.openstack.common import loopingcall -from neutron.openstack.common import periodic_task -from neutron.openstack.common import service from neutron import service as neutron_service @@ -298,4 +298,4 @@ def main(): report_interval=cfg.CONF.AGENT.report_interval, manager='neutron.services.metering.agents.' 'metering_agent.MeteringAgentWithStateReport') - service.launch(server).wait() + service.launch(cfg.CONF, server).wait() diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index 910a834ba43..876bf8db424 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -261,7 +261,7 @@ class TestDhcpAgent(base.BaseTestCase): def test_dhcp_agent_main_agent_manager(self): logging_str = 'neutron.agent.common.config.setup_logging' - launcher_str = 'neutron.openstack.common.service.ServiceLauncher' + launcher_str = 'oslo_service.service.ServiceLauncher' with mock.patch(logging_str): with mock.patch.object(sys, 'argv') as sys_argv: with mock.patch(launcher_str) as launcher: @@ -269,7 +269,8 @@ class TestDhcpAgent(base.BaseTestCase): base.etcdir('neutron.conf')] entry.main() launcher.assert_has_calls( - [mock.call(), mock.call().launch_service(mock.ANY), + [mock.call(cfg.CONF), + mock.call().launch_service(mock.ANY), mock.call().wait()]) def test_run_completes_single_pass(self): diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index 0b28fd5993c..234e91cbe64 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -132,7 +132,7 @@ class BasicRouterOperationsFramework(base.BaseTestCase): l3pluginApi_cls.return_value = self.plugin_api self.looping_call_p = mock.patch( - 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_call_p.start() subnet_id_1 = _uuid() diff --git a/neutron/tests/unit/agent/l3/test_dvr_local_router.py b/neutron/tests/unit/agent/l3/test_dvr_local_router.py index 2b5b5a6d366..51e89802f95 100644 --- a/neutron/tests/unit/agent/l3/test_dvr_local_router.py +++ b/neutron/tests/unit/agent/l3/test_dvr_local_router.py @@ -116,7 +116,7 @@ class TestDvrRouterOperations(base.BaseTestCase): l3pluginApi_cls.return_value = self.plugin_api self.looping_call_p = mock.patch( - 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_call_p.start() subnet_id_1 = _uuid() diff --git a/neutron/tests/unit/agent/metadata/test_agent.py b/neutron/tests/unit/agent/metadata/test_agent.py index eaa5a773fe8..9bef96864c8 100644 --- a/neutron/tests/unit/agent/metadata/test_agent.py +++ b/neutron/tests/unit/agent/metadata/test_agent.py @@ -517,7 +517,7 @@ class TestUnixDomainMetadataProxy(base.BaseTestCase): self.cfg_p = mock.patch.object(agent, 'cfg') self.cfg = self.cfg_p.start() looping_call_p = mock.patch( - 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + 'oslo_service.loopingcall.FixedIntervalLoopingCall') self.looping_mock = looping_call_p.start() self.cfg.CONF.metadata_proxy_socket = '/the/path' self.cfg.CONF.metadata_workers = 0 diff --git a/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py b/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py index a170704c826..08d689e127d 100644 --- a/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py +++ b/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py @@ -68,7 +68,7 @@ class TestSdnveNeutronAgent(base.BaseTestCase): with mock.patch('neutron.plugins.ibm.agent.sdnve_neutron_agent.' 'SdnveNeutronAgent.setup_integration_br', return_value=mock.Mock()),\ - mock.patch('neutron.openstack.common.loopingcall.' + mock.patch('oslo_service.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall): self.agent = sdnve_neutron_agent.SdnveNeutronAgent(**kwargs) diff --git a/neutron/tests/unit/plugins/ml2/drivers/cisco/apic/test_apic_sync.py b/neutron/tests/unit/plugins/ml2/drivers/cisco/apic/test_apic_sync.py index b687f478883..47584710105 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/cisco/apic/test_apic_sync.py +++ b/neutron/tests/unit/plugins/ml2/drivers/cisco/apic/test_apic_sync.py @@ -22,7 +22,7 @@ sys.modules["apicapi"] = mock.Mock() from neutron.plugins.ml2.drivers.cisco.apic import apic_sync from neutron.tests import base -LOOPING_CALL = 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall' +LOOPING_CALL = 'oslo_service.loopingcall.FixedIntervalLoopingCall' GET_PLUGIN = 'neutron.manager.NeutronManager.get_plugin' GET_ADMIN_CONTEXT = 'neutron.context.get_admin_context' L2_DB = 'neutron.plugins.ml2.db.get_locked_port_and_binding' diff --git a/neutron/tests/unit/plugins/ml2/drivers/cisco/apic/test_apic_topology.py b/neutron/tests/unit/plugins/ml2/drivers/cisco/apic/test_apic_topology.py index 016f16453f2..292cb54e0ff 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/cisco/apic/test_apic_topology.py +++ b/neutron/tests/unit/plugins/ml2/drivers/cisco/apic/test_apic_topology.py @@ -28,7 +28,7 @@ NOTIFIER = ('neutron.plugins.ml2.drivers.cisco.apic.' 'apic_topology.ApicTopologyServiceNotifierApi') RPC_CONNECTION = 'neutron.common.rpc.Connection' AGENTS_DB = 'neutron.db.agents_db' -PERIODIC_TASK = 'neutron.openstack.common.periodic_task' +PERIODIC_TASK = 'oslo_service.periodic_task' DEV_EXISTS = 'neutron.agent.linux.ip_lib.device_exists' IP_DEVICE = 'neutron.agent.linux.ip_lib.IPDevice' EXECUTE = 'neutron.agent.linux.utils.execute' diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py index 5e16c15b3a9..9729d3fb7be 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_sriov_nic_agent.py @@ -43,7 +43,7 @@ class TestSriovAgent(base.BaseTestCase): def start(self, interval=0): self.f() - mock.patch('neutron.openstack.common.loopingcall.' + mock.patch('oslo_service.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall) diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py index b3ab4fa3efb..ebf02ec6c5a 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py @@ -124,8 +124,7 @@ class TestOvsNeutronAgent(object): return_value='00:00:00:00:00:01'),\ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\ - mock.patch('neutron.openstack.common.loopingcall.' - 'FixedIntervalLoopingCall', + mock.patch('oslo_service.loopingcall.FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', @@ -1292,7 +1291,7 @@ class TestOvsDvrNeutronAgent(object): return_value='00:00:00:00:00:01'),\ mock.patch( 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\ - mock.patch('neutron.openstack.common.loopingcall.' + mock.patch('oslo_service.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ mock.patch( diff --git a/neutron/tests/unit/plugins/oneconvergence/test_nvsd_agent.py b/neutron/tests/unit/plugins/oneconvergence/test_nvsd_agent.py index 769bf4f421f..e840537bd0a 100644 --- a/neutron/tests/unit/plugins/oneconvergence/test_nvsd_agent.py +++ b/neutron/tests/unit/plugins/oneconvergence/test_nvsd_agent.py @@ -36,7 +36,7 @@ class TestOneConvergenceAgentBase(base.BaseTestCase): cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') - with mock.patch('neutron.openstack.common.loopingcall.' + with mock.patch('oslo_service.loopingcall.' 'FixedIntervalLoopingCall') as loopingcall: kwargs = {'integ_br': 'integration_bridge', 'polling_interval': 5} diff --git a/neutron/tests/unit/services/metering/agents/test_metering_agent.py b/neutron/tests/unit/services/metering/agents/test_metering_agent.py index 9b50dde1e27..11601873c51 100644 --- a/neutron/tests/unit/services/metering/agents/test_metering_agent.py +++ b/neutron/tests/unit/services/metering/agents/test_metering_agent.py @@ -67,7 +67,7 @@ class TestMeteringOperations(base.BaseTestCase): self.driver_patch.start() loopingcall_patch = mock.patch( - 'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall') + 'oslo_service.loopingcall.FixedIntervalLoopingCall') loopingcall_patch.start() self.agent = metering_agent.MeteringAgent('my agent', cfg.CONF) @@ -172,7 +172,7 @@ class TestMeteringDriver(base.BaseTestCase): 'add_metering_label'}) def test_init_chain(self): - with mock.patch('neutron.openstack.common.' + with mock.patch('oslo_service.' 'periodic_task.PeriodicTasks.__init__') as init: metering_agent.MeteringAgent('my agent', cfg.CONF) - init.assert_called_once_with() + init.assert_called_once_with(cfg.CONF) diff --git a/neutron/tests/unit/test_wsgi.py b/neutron/tests/unit/test_wsgi.py index 0f94a14ca23..b64e03937aa 100644 --- a/neutron/tests/unit/test_wsgi.py +++ b/neutron/tests/unit/test_wsgi.py @@ -95,7 +95,7 @@ class TestWSGIServer(base.BaseTestCase): server.stop() server.wait() - @mock.patch('neutron.openstack.common.service.ProcessLauncher') + @mock.patch('oslo_service.service.ProcessLauncher') def test_start_multiple_workers(self, ProcessLauncher): launcher = ProcessLauncher.return_value diff --git a/neutron/wsgi.py b/neutron/wsgi.py index 2bde31369b4..a207c35d24f 100644 --- a/neutron/wsgi.py +++ b/neutron/wsgi.py @@ -31,6 +31,8 @@ import oslo_i18n from oslo_log import log as logging from oslo_log import loggers from oslo_serialization import jsonutils +from oslo_service import service as common_service +from oslo_service import systemd from oslo_utils import excutils import routes.middleware import six @@ -42,8 +44,6 @@ from neutron.common import exceptions as exception from neutron import context from neutron.db import api from neutron.i18n import _LE, _LI -from neutron.openstack.common import service as common_service -from neutron.openstack.common import systemd socket_opts = [ cfg.IntOpt('backlog', @@ -92,7 +92,7 @@ CONF.register_opts(socket_opts) LOG = logging.getLogger(__name__) -class WorkerService(object): +class WorkerService(common_service.ServiceBase): """Wraps a worker to be handled by ProcessLauncher""" def __init__(self, service, application): self._service = service @@ -248,7 +248,8 @@ class Server(object): # The API service runs in a number of child processes. # Minimize the cost of checking for child exit by extending the # wait interval past the default of 0.01s. - self._server = common_service.ProcessLauncher(wait_interval=1.0) + self._server = common_service.ProcessLauncher(cfg.CONF, + wait_interval=1.0) self._server.launch_service(service, workers=workers) @property diff --git a/openstack-common.conf b/openstack-common.conf index 12270514017..fbc952c8fae 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -1,17 +1,11 @@ [DEFAULT] # The list of modules to copy from oslo-incubator.git module=cache -module=eventlet_backdoor module=fileutils # The following module is not synchronized by update.sh script since it's # located in tools/ not neutron/openstack/common/. Left here to make it # explicit that we still ship code from incubator here #module=install_venv_common -module=loopingcall -module=periodic_task -module=service -module=systemd -module=threadgroup # The base module to hold the copy of openstack.common base=neutron diff --git a/requirements.txt b/requirements.txt index 9d4e1c260bd..0d9d0127996 100644 --- a/requirements.txt +++ b/requirements.txt @@ -33,6 +33,7 @@ oslo.middleware!=2.0.0,>=1.2.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.rootwrap>=2.0.0 # Apache-2.0 oslo.serialization>=1.4.0 # Apache-2.0 +oslo.service>=0.1.0 # Apache-2.0 oslo.utils>=1.6.0 # Apache-2.0 python-novaclient>=2.22.0 From 4dc68ea88bf4f07b13253bf9eeedffe22b1f8013 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 28 May 2015 23:13:19 -0700 Subject: [PATCH 16/54] Read vif port information in bulk During startup, the agent was making many calls per port to read information about the current VLAN, external ID, etc. This resulted in hundreds of calls just to read information about a relatively small number of ports. This patch addresses that by converting a few key functions to lookup information for all of the ports at once. Performance improvement on dev laptop for 250 ports from agent start to port ACTIVE status: before: 1m21s after: 1m06s Closes-Bug: #1460233 Change-Id: Ic80c85a07fee3e5651dc19819c6cebdc2048dda7 --- neutron/agent/common/ovs_lib.py | 71 ++++++++++++++----- .../openvswitch/agent/ovs_neutron_agent.py | 22 ++++-- .../tests/functional/agent/test_ovs_lib.py | 9 +++ .../tests/unit/agent/common/test_ovs_lib.py | 44 +++++++++--- .../agent/test_ovs_neutron_agent.py | 27 +++++-- .../openvswitch/agent/test_ovs_tunnel.py | 5 +- 6 files changed, 138 insertions(+), 40 deletions(-) diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index 81340c59888..6db93474cec 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -141,6 +141,12 @@ class BaseOVS(object): return self.ovsdb.db_get(table, record, column).execute( check_error=check_error, log_errors=log_errors) + def db_list(self, table, records=None, columns=None, + check_error=True, log_errors=True, if_exists=False): + return (self.ovsdb.db_list(table, records=records, columns=columns, + if_exists=if_exists). + execute(check_error=check_error, log_errors=log_errors)) + class OVSBridge(BaseOVS): def __init__(self, br_name): @@ -319,11 +325,12 @@ class OVSBridge(BaseOVS): def get_vif_ports(self): edge_ports = [] port_names = self.get_port_name_list() + port_info = self.db_list( + 'Interface', columns=['name', 'external_ids', 'ofport']) + by_name = {x['name']: x for x in port_info} for name in port_names: - external_ids = self.db_get_val("Interface", name, "external_ids", - check_error=True) - ofport = self.db_get_val("Interface", name, "ofport", - check_error=True) + external_ids = by_name[name]['external_ids'] + ofport = by_name[name]['ofport'] if "iface-id" in external_ids and "attached-mac" in external_ids: p = VifPort(name, ofport, external_ids["iface-id"], external_ids["attached-mac"], self) @@ -341,10 +348,9 @@ class OVSBridge(BaseOVS): def get_vif_port_to_ofport_map(self): port_names = self.get_port_name_list() - cmd = self.ovsdb.db_list( - 'Interface', port_names, - columns=['name', 'external_ids', 'ofport'], if_exists=True) - results = cmd.execute(check_error=True) + results = self.db_list( + 'Interface', port_names, ['name', 'external_ids', 'ofport'], + if_exists=True) port_map = {} for r in results: # fall back to basic interface name @@ -359,10 +365,9 @@ class OVSBridge(BaseOVS): def get_vif_port_set(self): edge_ports = set() port_names = self.get_port_name_list() - cmd = self.ovsdb.db_list( - 'Interface', port_names, - columns=['name', 'external_ids', 'ofport'], if_exists=True) - results = cmd.execute(check_error=True) + results = self.db_list( + 'Interface', port_names, ['name', 'external_ids', 'ofport'], + if_exists=True) for result in results: if result['ofport'] == UNASSIGNED_OFPORT: LOG.warn(_LW("Found not yet ready openvswitch port: %s"), @@ -400,11 +405,42 @@ class OVSBridge(BaseOVS): """ port_names = self.get_port_name_list() - cmd = self.ovsdb.db_list('Port', port_names, columns=['name', 'tag'], - if_exists=True) - results = cmd.execute(check_error=True) + results = self.db_list('Port', port_names, ['name', 'tag'], + if_exists=True) return {p['name']: p['tag'] for p in results} + def get_vifs_by_ids(self, port_ids): + interface_info = self.db_list( + "Interface", columns=["name", "external_ids", "ofport"]) + by_id = {x['external_ids'].get('iface-id'): x for x in interface_info} + intfs_on_bridge = self.ovsdb.list_ports(self.br_name).execute( + check_error=True) + result = {} + for port_id in port_ids: + result[port_id] = None + if (port_id not in by_id or + by_id[port_id]['name'] not in intfs_on_bridge): + LOG.info(_LI("Port %(port_id)s not present in bridge " + "%(br_name)s"), + {'port_id': port_id, 'br_name': self.br_name}) + continue + pinfo = by_id[port_id] + if not self._check_ofport(port_id, pinfo): + continue + mac = pinfo['external_ids'].get('attached-mac') + result[port_id] = VifPort(pinfo['name'], pinfo['ofport'], + port_id, mac, self) + return result + + @staticmethod + def _check_ofport(port_id, port_info): + if port_info['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]: + LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a" + " positive integer"), + {'ofport': port_info['ofport'], 'vif': port_id}) + return False + return True + def get_vif_port_by_id(self, port_id): ports = self.ovsdb.db_find( 'Interface', ('external_ids', '=', {'iface-id': port_id}), @@ -413,10 +449,7 @@ class OVSBridge(BaseOVS): for port in ports: if self.br_name != self.get_bridge_for_iface(port['name']): continue - if port['ofport'] in [UNASSIGNED_OFPORT, INVALID_OFPORT]: - LOG.warn(_LW("ofport: %(ofport)s for VIF: %(vif)s is not a" - " positive integer"), - {'ofport': port['ofport'], 'vif': port_id}) + if not self._check_ofport(port_id, port): continue mac = port['external_ids'].get('attached-mac') return VifPort(port['name'], port['ofport'], port_id, mac, self) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index c07daadb121..60940e5b5b2 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -312,10 +312,17 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, def _restore_local_vlan_map(self): cur_ports = self.int_br.get_vif_ports() + port_info = self.int_br.db_list( + "Port", columns=["name", "other_config", "tag"]) + by_name = {x['name']: x for x in port_info} for port in cur_ports: - local_vlan_map = self.int_br.db_get_val("Port", port.port_name, - "other_config") - local_vlan = self.int_br.db_get_val("Port", port.port_name, "tag") + # if a port was deleted between get_vif_ports and db_lists, we + # will get a KeyError + try: + local_vlan_map = by_name[port.port_name]['other_config'] + local_vlan = by_name[port.port_name]['tag'] + except KeyError: + continue if not local_vlan: continue net_uuid = local_vlan_map.get('net_uuid') @@ -730,6 +737,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, port_other_config) def _bind_devices(self, need_binding_ports): + port_info = self.int_br.db_list( + "Port", columns=["name", "tag"]) + tags_by_name = {x['name']: x['tag'] for x in port_info} for port_detail in need_binding_ports: lvm = self.local_vlan_map.get(port_detail['network_id']) if not lvm: @@ -739,7 +749,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, port = port_detail['vif_port'] device = port_detail['device'] # Do not bind a port if it's already bound - cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + cur_tag = tags_by_name.get(port.port_name) if cur_tag != lvm.vlan: self.int_br.set_db_attribute( "Port", port.port_name, "tag", lvm.vlan) @@ -1196,10 +1206,12 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.conf.host) except Exception as e: raise DeviceListRetrievalError(devices=devices, error=e) + vif_by_id = self.int_br.get_vifs_by_ids( + [vif['device'] for vif in devices_details_list]) for details in devices_details_list: device = details['device'] LOG.debug("Processing port: %s", device) - port = self.int_br.get_vif_port_by_id(device) + port = vif_by_id.get(device) if not port: # The port disappeared and cannot be processed LOG.info(_LI("Port %s was not found on the integration bridge " diff --git a/neutron/tests/functional/agent/test_ovs_lib.py b/neutron/tests/functional/agent/test_ovs_lib.py index f430481899b..d90e7fbfa25 100644 --- a/neutron/tests/functional/agent/test_ovs_lib.py +++ b/neutron/tests/functional/agent/test_ovs_lib.py @@ -209,6 +209,15 @@ class OVSBridgeTestCase(OVSBridgeTestBase): self.assertEqual(self.br.get_vif_port_by_id(vif.vif_id).vif_id, vif.vif_id) + def test_get_vifs_by_ids(self): + for i in range(2): + self.create_ovs_port() + vif_ports = [self.create_ovs_vif_port() for i in range(3)] + by_id = self.br.get_vifs_by_ids([v.vif_id for v in vif_ports]) + # convert to str for comparison of VifPorts + by_id = {vid: str(vport) for vid, vport in by_id.items()} + self.assertEqual({v.vif_id: str(v) for v in vif_ports}, by_id) + def test_delete_ports(self): # TODO(twilson) I intensely dislike the current delete_ports function # as the default behavior is really delete_vif_ports(), then it acts diff --git a/neutron/tests/unit/agent/common/test_ovs_lib.py b/neutron/tests/unit/agent/common/test_ovs_lib.py index 9e9c514dcd8..3f0b12b3b85 100644 --- a/neutron/tests/unit/agent/common/test_ovs_lib.py +++ b/neutron/tests/unit/agent/common/test_ovs_lib.py @@ -470,23 +470,22 @@ class OVS_Lib_Test(base.BaseTestCase): def _test_get_vif_ports(self, is_xen=False): pname = "tap99" ofport = 6 - ofport_data = self._encode_ovs_json(['ofport'], [[ofport]]) vif_id = uuidutils.generate_uuid() mac = "ca:fe:de:ad:be:ef" id_field = 'xs-vif-uuid' if is_xen else 'iface-id' external_ids = ('{"data":[[["map",[["attached-mac","%(mac)s"],' '["%(id_field)s","%(vif)s"],' - '["iface-status","active"]]]]],' - '"headings":["external_ids"]}' % { - 'mac': mac, 'vif': vif_id, 'id_field': id_field}) + '["iface-status","active"]]], ' + '"%(name)s", %(ofport)s]],' + '"headings":["external_ids", "name", "ofport"]}' % { + 'mac': mac, 'vif': vif_id, 'id_field': id_field, + 'name': pname, 'ofport': ofport}) # Each element is a tuple of (expected mock call, return_value) expected_calls_and_values = [ (self._vsctl_mock("list-ports", self.BR_NAME), "%s\n" % pname), - (self._vsctl_mock("--columns=external_ids", "list", - "Interface", pname), external_ids), - (self._vsctl_mock("--columns=ofport", "list", "Interface", pname), - ofport_data), + (self._vsctl_mock("--columns=name,external_ids,ofport", "list", + "Interface"), external_ids), ] if is_xen: expected_calls_and_values.append( @@ -724,6 +723,35 @@ class OVS_Lib_Test(base.BaseTestCase): with testtools.ExpectedException(Exception): self.br.get_local_port_mac() + def test_get_vifs_by_ids(self): + db_list_res = [ + {'name': 'qvo1', 'ofport': 1, + 'external_ids': {'iface-id': 'pid1', 'attached-mac': '11'}}, + {'name': 'qvo2', 'ofport': 2, + 'external_ids': {'iface-id': 'pid2', 'attached-mac': '22'}}, + {'name': 'qvo3', 'ofport': 3, + 'external_ids': {'iface-id': 'pid3', 'attached-mac': '33'}}, + {'name': 'qvo4', 'ofport': -1, + 'external_ids': {'iface-id': 'pid4', 'attached-mac': '44'}}, + ] + self.br.db_list = mock.Mock(return_value=db_list_res) + self.br.ovsdb = mock.Mock() + self.br.ovsdb.list_ports.return_value.execute.return_value = [ + 'qvo1', 'qvo2', 'qvo4'] + by_id = self.br.get_vifs_by_ids(['pid1', 'pid2', 'pid3', + 'pid4', 'pid5']) + # pid3 isn't on bridge and pid4 doesn't have a valid ofport and pid5 + # isn't present in the db + self.assertIsNone(by_id['pid3']) + self.assertIsNone(by_id['pid4']) + self.assertIsNone(by_id['pid5']) + self.assertEqual('pid1', by_id['pid1'].vif_id) + self.assertEqual('qvo1', by_id['pid1'].port_name) + self.assertEqual(1, by_id['pid1'].ofport) + self.assertEqual('pid2', by_id['pid2'].vif_id) + self.assertEqual('qvo2', by_id['pid2'].port_name) + self.assertEqual(2, by_id['pid2'].ofport) + def _test_get_vif_port_by_id(self, iface_id, data, br_name=None, extra_calls_and_values=None): headings = ['external_ids', 'name', 'ofport'] diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py index 61423751d28..d0a2a344811 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py @@ -114,6 +114,8 @@ class TestOvsNeutronAgent(object): cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT') cfg.CONF.set_default('prevent_arp_spoofing', False, 'AGENT') kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) + mock.patch('neutron.agent.common.ovs_lib.OVSBridge.db_list', + return_value=[]).start() with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ @@ -172,7 +174,10 @@ class TestOvsNeutronAgent(object): mock.patch.object(self.agent, 'provision_local_vlan') as \ provision_local_vlan: int_br.get_vif_ports.return_value = [port] - int_br.db_get_val.side_effect = [local_vlan_map, tag] + int_br.db_list.return_value = [{ + 'name': port.port_name, 'other_config': local_vlan_map, + 'tag': tag + }] self.agent._restore_local_vlan_map() if tag: self.assertTrue(provision_local_vlan.called) @@ -341,8 +346,8 @@ class TestOvsNeutronAgent(object): 'get_devices_details_list', return_value=[details]),\ mock.patch.object(self.agent.int_br, - 'get_vif_port_by_id', - return_value=port),\ + 'get_vifs_by_ids', + return_value={details['device']: port}),\ mock.patch.object(self.agent, func_name) as func: skip_devs, need_bound_devices = ( self.agent.treat_devices_added_or_updated([{}], False)) @@ -383,8 +388,8 @@ class TestOvsNeutronAgent(object): 'get_devices_details_list', return_value=[dev_mock]),\ mock.patch.object(self.agent.int_br, - 'get_vif_port_by_id', - return_value=None),\ + 'get_vifs_by_ids', + return_value={}),\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: skip_devs = self.agent.treat_devices_added_or_updated([{}], False) @@ -410,8 +415,8 @@ class TestOvsNeutronAgent(object): 'get_devices_details_list', return_value=[fake_details_dict]),\ mock.patch.object(self.agent.int_br, - 'get_vif_port_by_id', - return_value=mock.MagicMock()),\ + 'get_vifs_by_ids', + return_value={'xxx': mock.MagicMock()}),\ mock.patch.object(self.agent, 'treat_vif_port') as treat_vif_port: skip_devs, need_bound_devices = ( @@ -449,6 +454,8 @@ class TestOvsNeutronAgent(object): self.agent, "treat_devices_added_or_updated", return_value=([], [])) as device_added_updated,\ + mock.patch.object(self.agent.int_br, "db_list", + return_value=[]),\ mock.patch.object(self.agent, "treat_devices_removed", return_value=False) as device_removed: @@ -1179,6 +1186,9 @@ class AncillaryBridgesTest(object): mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' 'get_bridge_external_bridge_id', side_effect=pullup_side_effect),\ + mock.patch( + 'neutron.agent.common.ovs_lib.OVSBridge.' 'db_list', + return_value=[]),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]): @@ -1268,6 +1278,9 @@ class TestOvsDvrNeutronAgent(object): mock.patch('neutron.openstack.common.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ + mock.patch( + 'neutron.agent.common.ovs_lib.OVSBridge.' 'db_list', + return_value=[]),\ mock.patch( 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[]): diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py index 6973842aab6..e0c8df7ef66 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py @@ -121,6 +121,7 @@ class TunnelTest(object): self.mock_int_bridge.add_patch_port.side_effect = ( lambda tap, peer: self.ovs_int_ofports[tap]) self.mock_int_bridge.get_vif_ports.return_value = [] + self.mock_int_bridge.db_list.return_value = [] self.mock_int_bridge.db_get_val.return_value = {} self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE] @@ -208,6 +209,7 @@ class TunnelTest(object): ] self.mock_int_bridge_expected += [ mock.call.get_vif_ports(), + mock.call.db_list('Port', columns=['name', 'other_config', 'tag']) ] self.mock_tun_bridge_expected += [ @@ -246,7 +248,7 @@ class TunnelTest(object): def _verify_mock_call(self, mock_obj, expected): mock_obj.assert_has_calls(expected) - self.assertEqual(len(mock_obj.mock_calls), len(expected)) + self.assertEqual(expected, mock_obj.mock_calls) def _verify_mock_calls(self): self._verify_mock_call(self.mock_int_bridge_cls, @@ -599,6 +601,7 @@ class TunnelTestUseVethInterco(TunnelTest): ] self.mock_int_bridge_expected += [ mock.call.get_vif_ports(), + mock.call.db_list('Port', columns=['name', 'other_config', 'tag']) ] self.mock_tun_bridge_expected += [ mock.call.delete_flows(), From 9b23617111706ef6a89e8ba45457238acaea26e2 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Mon, 29 Jun 2015 22:24:22 -0700 Subject: [PATCH 17/54] Increase ping count on ARP spoof test The other IPv4 tests all have a count of 2 to tolerate ping failures due to slow ARP response/interface setup/etc. This patch increases test_arp_spoof_allowed_address_pairs_0cidr to 2 to match. Closes-Bug: #1470234 Change-Id: I82bd8397672194f6162eef5392d4f19d57450552 --- neutron/tests/functional/agent/test_ovs_flows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neutron/tests/functional/agent/test_ovs_flows.py b/neutron/tests/functional/agent/test_ovs_flows.py index 0a1a932a78e..5d73ea1a5f3 100644 --- a/neutron/tests/functional/agent/test_ovs_flows.py +++ b/neutron/tests/functional/agent/test_ovs_flows.py @@ -166,7 +166,7 @@ class _ARPSpoofTestCase(object): '1.2.3.4']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - net_helpers.assert_ping(self.src_namespace, self.dst_addr) + net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_disable_port_security(self): # block first and then disable port security to make sure old rules From 1ac7581c6b7d343d2ee22e6c562871c0465d9735 Mon Sep 17 00:00:00 2001 From: Livnat Peer Date: Tue, 30 Jun 2015 16:25:57 +0300 Subject: [PATCH 18/54] fix spelling mistakes Change-Id: If063f111fa42a6644a1dadc7f0c0b9bbfb359294 --- doc/source/devref/callbacks.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/devref/callbacks.rst b/doc/source/devref/callbacks.rst index baaa98a83b7..4c6e6e71f24 100644 --- a/doc/source/devref/callbacks.rst +++ b/doc/source/devref/callbacks.rst @@ -69,7 +69,7 @@ do whatever they are supposed to do. In a callback-less world this would work li C->my_random_very_difficult_to_remember_method_about_router_created() If B and/or C change, things become sour. In a callback-based world, things become a lot -more uniform and straightward: +more uniform and straightforward: :: @@ -319,7 +319,7 @@ Is the registry thread-safe? Short answer is no: it is not safe to make mutations while callbacks are being called (more details as to why can be found `here `_). - A mutation could happen if a 'subscribe'/'unsuscribe' operation interleaves with the execution + A mutation could happen if a 'subscribe'/'unsubscribe' operation interleaves with the execution of the notify loop. Albeit there is a possibility that things may end up in a bad state, the registry works correctly under the assumption that subscriptions happen at the very beginning of the life of the process and that the unsubscriptions (if any) take place at the very end. From 21ff82d9d33313bb88e5970c7b1829a65f195d33 Mon Sep 17 00:00:00 2001 From: Rossella Sblendido Date: Fri, 5 Dec 2014 17:34:23 +0100 Subject: [PATCH 19/54] Adds base in-tree functional testing of the ovs_neutron_agent Base setup and utility methods for functional testing of the OVS L2 agent. Partially-Implements: blueprint restructure-l2-agent Co-Authored-By: Rossella Sblendido Change-Id: I5b3149b2b8502b9b9a36d3e20d909872cc17f8e8 --- .../functional/agent/test_l2_ovs_agent.py | 298 ++++++++++++++++++ 1 file changed, 298 insertions(+) create mode 100644 neutron/tests/functional/agent/test_l2_ovs_agent.py diff --git a/neutron/tests/functional/agent/test_l2_ovs_agent.py b/neutron/tests/functional/agent/test_l2_ovs_agent.py new file mode 100644 index 00000000000..75614b20f3c --- /dev/null +++ b/neutron/tests/functional/agent/test_l2_ovs_agent.py @@ -0,0 +1,298 @@ +# Copyright (c) 2015 Red Hat, Inc. +# Copyright (c) 2015 SUSE Linux Products GmbH +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import eventlet +import mock +import random + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import uuidutils + +from neutron.agent.common import config as agent_config +from neutron.agent.common import ovs_lib +from neutron.agent.linux import interface +from neutron.agent.linux import polling +from neutron.agent.linux import utils as agent_utils +from neutron.common import config as common_config +from neutron.common import constants as n_const +from neutron.common import utils +from neutron.plugins.common import constants as p_const +from neutron.plugins.ml2.drivers.openvswitch.agent.common import config \ + as ovs_config +from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants +from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ + import br_int +from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ + import br_phys +from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl \ + import br_tun +from neutron.plugins.ml2.drivers.openvswitch.agent import ovs_neutron_agent \ + as ovs_agent +from neutron.tests.functional.agent.linux import base + +LOG = logging.getLogger(__name__) + + +class OVSAgentTestFramework(base.BaseOVSLinuxTestCase): + + def setUp(self): + super(OVSAgentTestFramework, self).setUp() + agent_rpc = ('neutron.plugins.ml2.drivers.openvswitch.agent.' + 'ovs_neutron_agent.OVSPluginApi') + mock.patch(agent_rpc).start() + mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() + self.br_int = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, + prefix='br-int') + self.br_tun = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, + prefix='br-tun') + patch_name_len = n_const.DEVICE_NAME_MAX_LEN - len("-patch-tun") + self.patch_tun = "%s-patch-tun" % self.br_int[:patch_name_len] + self.patch_int = "%s-patch-int" % self.br_tun[:patch_name_len] + self.ovs = ovs_lib.BaseOVS() + self.config = self._configure_agent() + self.driver = interface.OVSInterfaceDriver(self.config) + + def _get_config_opts(self): + config = cfg.ConfigOpts() + config.register_opts(common_config.core_opts) + config.register_opts(interface.OPTS) + config.register_opts(ovs_config.ovs_opts, "OVS") + config.register_opts(ovs_config.agent_opts, "AGENT") + agent_config.register_interface_driver_opts_helper(config) + agent_config.register_agent_state_opts_helper(config) + return config + + def _configure_agent(self): + config = self._get_config_opts() + config.set_override( + 'interface_driver', + 'neutron.agent.linux.interface.OVSInterfaceDriver') + config.set_override('integration_bridge', self.br_int, "OVS") + config.set_override('ovs_integration_bridge', self.br_int) + config.set_override('tunnel_bridge', self.br_tun, "OVS") + config.set_override('int_peer_patch_port', self.patch_tun, "OVS") + config.set_override('tun_peer_patch_port', self.patch_int, "OVS") + config.set_override('host', 'ovs-agent') + return config + + def _bridge_classes(self): + return { + 'br_int': br_int.OVSIntegrationBridge, + 'br_phys': br_phys.OVSPhysicalBridge, + 'br_tun': br_tun.OVSTunnelBridge + } + + def create_agent(self, create_tunnels=True): + if create_tunnels: + tunnel_types = [p_const.TYPE_VXLAN] + else: + tunnel_types = None + local_ip = '192.168.10.1' + bridge_mappings = {'physnet': self.br_int} + agent = ovs_agent.OVSNeutronAgent(self._bridge_classes(), + self.br_int, self.br_tun, + local_ip, bridge_mappings, + polling_interval=1, + tunnel_types=tunnel_types, + prevent_arp_spoofing=False, + conf=self.config) + self.addCleanup(self.ovs.delete_bridge, self.br_int) + if tunnel_types: + self.addCleanup(self.ovs.delete_bridge, self.br_tun) + agent.sg_agent = mock.Mock() + return agent + + def start_agent(self, agent): + polling_manager = polling.InterfacePollingMinimizer() + self.addCleanup(polling_manager.stop) + polling_manager.start() + agent_utils.wait_until_true( + polling_manager._monitor.is_active) + agent.check_ovs_status = mock.Mock( + return_value=constants.OVS_NORMAL) + t = eventlet.spawn(agent.rpc_loop, polling_manager) + + def stop_agent(agent, rpc_loop_thread): + agent.run_daemon_loop = False + rpc_loop_thread.wait() + + self.addCleanup(stop_agent, agent, t) + + def _bind_ports(self, ports, network, agent): + devices = [] + for port in ports: + dev = OVSAgentTestFramework._get_device_details(port, network) + vif_name = port.get('vif_name') + vif_id = uuidutils.generate_uuid(), + vif_port = ovs_lib.VifPort( + vif_name, "%s" % vif_id, 'id-%s' % vif_id, + port.get('mac_address'), agent.int_br) + dev['vif_port'] = vif_port + devices.append(dev) + agent._bind_devices(devices) + + def _create_test_port_dict(self): + return {'id': uuidutils.generate_uuid(), + 'mac_address': utils.get_random_mac( + 'fa:16:3e:00:00:00'.split(':')), + 'fixed_ips': [{ + 'ip_address': '10.%d.%d.%d' % ( + random.randint(3, 254), + random.randint(3, 254), + random.randint(3, 254))}], + 'vif_name': base.get_rand_name( + self.driver.DEV_NAME_LEN, self.driver.DEV_NAME_PREFIX)} + + def _create_test_network_dict(self): + return {'id': uuidutils.generate_uuid(), + 'tenant_id': uuidutils.generate_uuid()} + + def _plug_ports(self, network, ports, agent, ip_len=24): + for port in ports: + self.driver.plug( + network.get('id'), port.get('id'), port.get('vif_name'), + port.get('mac_address'), + agent.int_br.br_name, namespace=None) + ip_cidrs = ["%s/%s" % (port.get('fixed_ips')[0][ + 'ip_address'], ip_len)] + self.driver.init_l3(port.get('vif_name'), ip_cidrs, namespace=None) + + @staticmethod + def _get_device_details(port, network): + dev = {'device': port['id'], + 'port_id': port['id'], + 'network_id': network['id'], + 'network_type': 'vlan', + 'physical_network': 'physnet', + 'segmentation_id': 1, + 'fixed_ips': port['fixed_ips'], + 'device_owner': 'compute', + 'admin_state_up': True} + return dev + + def assert_bridge(self, br, exists=True): + self.assertEqual(exists, self.ovs.bridge_exists(br)) + + def assert_patch_ports(self, agent): + + def get_peer(port): + return agent.int_br.db_get_val( + 'Interface', port, 'options', check_error=True) + + agent_utils.wait_until_true( + lambda: get_peer(self.patch_int) == {'peer': self.patch_tun}) + agent_utils.wait_until_true( + lambda: get_peer(self.patch_tun) == {'peer': self.patch_int}) + + def assert_bridge_ports(self): + for port in [self.patch_tun, self.patch_int]: + self.assertTrue(self.ovs.port_exists(port)) + + def assert_no_vlan_tags(self, ports, agent): + for port in ports: + res = agent.int_br.db_get_val('Port', port.get('vif_name'), 'tag') + self.assertEqual([], res) + + def assert_vlan_tags(self, ports, agent): + for port in ports: + res = agent.int_br.db_get_val('Port', port.get('vif_name'), 'tag') + self.assertTrue(res) + + +class TestOVSAgent(OVSAgentTestFramework): + + def _expected_plugin_rpc_call(self, call, expected_devices): + """Helper to check expected rpc call are received + :param call: The call to check + :param expected_devices The device for which call is expected + """ + args = (args[0][1] for args in call.call_args_list) + return not (set(expected_devices) - set(args)) + + def _create_ports(self, network, agent): + ports = [] + for x in range(3): + ports.append(self._create_test_port_dict()) + + def mock_device_details(context, devices, agent_id, host=None): + details = [] + for port in ports: + if port['id'] in devices: + dev = OVSAgentTestFramework._get_device_details( + port, network) + details.append(dev) + return details + + agent.plugin_rpc.get_devices_details_list.side_effect = ( + mock_device_details) + return ports + + def test_port_creation_and_deletion(self): + agent = self.create_agent() + self.start_agent(agent) + network = self._create_test_network_dict() + ports = self._create_ports(network, agent) + self._plug_ports(network, ports, agent) + up_ports_ids = [p['id'] for p in ports] + agent_utils.wait_until_true( + lambda: self._expected_plugin_rpc_call( + agent.plugin_rpc.update_device_up, up_ports_ids)) + down_ports_ids = [p['id'] for p in ports] + for port in ports: + agent.int_br.delete_port(port['vif_name']) + agent_utils.wait_until_true( + lambda: self._expected_plugin_rpc_call( + agent.plugin_rpc.update_device_down, down_ports_ids)) + + def test_resync_devices_set_up_after_exception(self): + agent = self.create_agent() + self.start_agent(agent) + network = self._create_test_network_dict() + ports = self._create_ports(network, agent) + agent.plugin_rpc.update_device_up.side_effect = [ + Exception('Exception to trigger resync'), + None, None, None] + self._plug_ports(network, ports, agent) + ports_ids = [p['id'] for p in ports] + agent_utils.wait_until_true( + lambda: self._expected_plugin_rpc_call( + agent.plugin_rpc.update_device_up, ports_ids)) + + def test_port_vlan_tags(self): + agent = self.create_agent() + self.start_agent(agent) + ports = [] + for x in range(3): + ports.append(self._create_test_port_dict()) + network = self._create_test_network_dict() + self._plug_ports(network, ports, agent) + agent.provision_local_vlan(network['id'], 'vlan', 'physnet', 1) + self.assert_no_vlan_tags(ports, agent) + self._bind_ports(ports, network, agent) + self.assert_vlan_tags(ports, agent) + + def test_assert_bridges_ports_vxlan(self): + agent = self.create_agent() + self.assertTrue(self.ovs.bridge_exists(self.br_int)) + self.assertTrue(self.ovs.bridge_exists(self.br_tun)) + self.assert_bridge_ports() + self.assert_patch_ports(agent) + + def test_assert_bridges_ports_no_tunnel(self): + self.create_agent(create_tunnels=False) + self.assertTrue(self.ovs.bridge_exists(self.br_int)) + self.assertFalse(self.ovs.bridge_exists(self.br_tun)) From 2bbfe6f8253659ebf6951b6426ffc446baacd420 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 26 May 2015 17:07:37 -0400 Subject: [PATCH 20/54] Move windows requirements to requirements.txt Commit 276028cca26af573c14938255e40c58358eabd4a added these requirements to setup.py from a custom build hook. These requirements can now be expressed in requirements.txt. We need to move them there so that the global requirements sync job can continue to keep setup.py in sync with the global version. Depends-on: I2369971d306c10dc39a1b89698cec95cf7551d07 Change-Id: I3c07c279d33f6aed46c3a97dd9ba81251e51429a --- requirements.txt | 4 ++++ setup.py | 8 +------- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/requirements.txt b/requirements.txt index 9d4e1c260bd..ffac4b9eeec 100644 --- a/requirements.txt +++ b/requirements.txt @@ -36,3 +36,7 @@ oslo.serialization>=1.4.0 # Apache-2.0 oslo.utils>=1.6.0 # Apache-2.0 python-novaclient>=2.22.0 + +# Windows-only requirements +pywin32;sys_platform=='win32' +wmi;sys_platform=='win32' diff --git a/setup.py b/setup.py index 09b206ed561..056c16c2b8f 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,9 +26,4 @@ except ImportError: setuptools.setup( setup_requires=['pbr'], - pbr=True, - # TODO(lifeless): Once pbr supports markers in requirements.txt, move this - # there, so that update.py can see it. - extras_require={ - ':sys_platform=="win32"': ['pywin32', 'wmi'] - }) + pbr=True) From 8dd8a7d93564168b98fa2350eedf56acede42b0f Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Tue, 30 Jun 2015 12:06:07 -0400 Subject: [PATCH 21/54] Remove bridge cleanup call Remove the bridge cleanup call to delete bridges, since we are seeing race conditions where bridges are deleted, then new interfaces are created and are attempting to plug into the bridge before it is recreated. Change-Id: I4ccc96566a5770384eacbbdc492bf09a514f5b31 Related-Bug: #1328546 --- .../ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py | 1 - 1 file changed, 1 deletion(-) diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index 66be308a29a..1e21db74c7a 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -948,7 +948,6 @@ class LinuxBridgeNeutronAgentRPC(service.Service): LOG.info(_LI("Port %s updated."), device) else: LOG.debug("Device %s not defined on plugin", device) - self.br_mgr.remove_empty_bridges() return resync def scan_devices(self, previous, sync): From 3da491cf5fe629559281507f65f12a0e34eaedf7 Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Tue, 30 Jun 2015 13:22:17 -0400 Subject: [PATCH 22/54] Disable pylint job Disabling pylint until it gets unbroken. Pylint 1.4.1 is using logilab-common, which had a release on the 30th, breaking pylint. Pylint developers are planning a logilab-common release tomorrow which should unbreak pylint once again, at which point I'll re-enable pylint. Change-Id: I5d8aaab8192168946c2a0b74abc1a56848ca51a2 Related-Bug: #1470186 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index dd78040a24f..12f2b91fbf3 100644 --- a/tox.ini +++ b/tox.ini @@ -85,7 +85,7 @@ commands= {toxinidir}/tools/check_unit_test_structure.sh # Checks for coding and style guidelines flake8 - sh ./tools/coding-checks.sh --pylint '{posargs}' + # sh ./tools/coding-checks.sh --pylint '{posargs}' neutron-db-manage --config-file neutron/tests/etc/neutron.conf check_migration whitelist_externals = sh From 7344e3ab8e3d4fd8af5b6f85184a0c093d88b6a4 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Tue, 30 Jun 2015 09:40:17 +1200 Subject: [PATCH 23/54] Improve fixture usage. There were two broad issues with fixtures. Firstly, the 'SafeFixture' workaround for resource leaks in fixtures <1.3 is not needed if we depend on fixtures>=1.3.1. While testtools may raise a TypeError when trying to query a fixture that failed to setup, this is only ever a cascading failure - it will not cause tests to fail, cause leaks, or cause tests to incorrectly pass. That will be fixed in testtools soon to stop it happening (but as it cannot affect whether a test passes or fails or leaks happen there is no reason to wait for that). Leaks are seen with fixtures 1.3.0 still because eventlet raises a BaseException subclass rather than an Exception subclass, and fixtures 1.3.0 didn't handle that - 1.3.1 does. Secondly, some of the fixtures had race conditions where things were started and then cleanups scheduled. Where possible I've fixed those, but some of them require more significant work to fully address. Change-Id: I3290712f7274970defda19263f4955e3c78e5ed6 Depends-On: I8c01506894ec0a92b53bc0e4ad14767f2dd6a6b3 Closes-bug: #1453888 --- neutron/tests/base.py | 11 ++-- neutron/tests/common/machine_fixtures.py | 13 +++-- neutron/tests/common/net_helpers.py | 47 ++++++++-------- neutron/tests/fullstack/config_fixtures.py | 12 ++--- neutron/tests/fullstack/fullstack_fixtures.py | 40 +++++--------- neutron/tests/fullstack/test_l3_agent.py | 4 +- .../tests/functional/agent/linux/helpers.py | 7 ++- neutron/tests/retargetable/client_fixtures.py | 8 +-- neutron/tests/tools.py | 53 +++---------------- neutron/tests/unit/plugins/ml2/test_plugin.py | 8 +-- neutron/tests/unit/testlib_api.py | 7 ++- test-requirements.txt | 2 +- 12 files changed, 73 insertions(+), 139 deletions(-) diff --git a/neutron/tests/base.py b/neutron/tests/base.py index 5e777804b84..4cb79914aaa 100644 --- a/neutron/tests/base.py +++ b/neutron/tests/base.py @@ -244,10 +244,10 @@ class DietTestCase(testtools.TestCase): {'key': k, 'exp': v, 'act': actual_superset[k]}) -class ProcessMonitorFixture(tools.SafeFixture): +class ProcessMonitorFixture(fixtures.Fixture): """Test fixture to capture and cleanup any spawn process monitor.""" - def setUp(self): - super(ProcessMonitorFixture, self).setUp() + + def _setUp(self): self.old_callable = ( external_process.ProcessMonitor._spawn_checking_thread) p = mock.patch("neutron.agent.linux.external_process.ProcessMonitor." @@ -410,14 +410,13 @@ class BaseTestCase(DietTestCase): cfg.CONF.set_override("notification_driver", notification_driver) -class PluginFixture(tools.SafeFixture): +class PluginFixture(fixtures.Fixture): def __init__(self, core_plugin=None): super(PluginFixture, self).__init__() self.core_plugin = core_plugin - def setUp(self): - super(PluginFixture, self).setUp() + def _setUp(self): self.dhcp_periodic_p = mock.patch( 'neutron.db.agentschedulers_db.DhcpAgentSchedulerDbMixin.' 'start_periodic_dhcp_agent_status_check') diff --git a/neutron/tests/common/machine_fixtures.py b/neutron/tests/common/machine_fixtures.py index de1089d1aa5..6e46c879b79 100644 --- a/neutron/tests/common/machine_fixtures.py +++ b/neutron/tests/common/machine_fixtures.py @@ -13,12 +13,13 @@ # under the License. # +import fixtures + from neutron.agent.linux import ip_lib from neutron.tests.common import net_helpers -from neutron.tests import tools -class FakeMachine(tools.SafeFixture): +class FakeMachine(fixtures.Fixture): """Create a fake machine. :ivar bridge: bridge on which the fake machine is bound @@ -42,8 +43,7 @@ class FakeMachine(tools.SafeFixture): self.ip = self.ip_cidr.partition('/')[0] self.gateway_ip = gateway_ip - def setUp(self): - super(FakeMachine, self).setUp() + def _setUp(self): ns_fixture = self.useFixture( net_helpers.NamespaceFixture()) self.namespace = ns_fixture.name @@ -66,7 +66,7 @@ class FakeMachine(tools.SafeFixture): net_helpers.assert_no_ping(self.namespace, dst_ip) -class PeerMachines(tools.SafeFixture): +class PeerMachines(fixtures.Fixture): """Create 'amount' peered machines on an ip_cidr. :ivar bridge: bridge on which peer machines are bound @@ -85,8 +85,7 @@ class PeerMachines(tools.SafeFixture): self.ip_cidr = ip_cidr or self.CIDR self.gateway_ip = gateway_ip - def setUp(self): - super(PeerMachines, self).setUp() + def _setUp(self): self.machines = [] for index in range(self.AMOUNT): diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py index 170d1b3a9b0..3fb50838dee 100644 --- a/neutron/tests/common/net_helpers.py +++ b/neutron/tests/common/net_helpers.py @@ -23,6 +23,7 @@ import shlex import signal import subprocess +import fixtures import netaddr from oslo_utils import uuidutils import six @@ -328,7 +329,7 @@ class NetcatTester(object): setattr(self, proc_attr, None) -class NamespaceFixture(tools.SafeFixture): +class NamespaceFixture(fixtures.Fixture): """Create a namespace. :ivar ip_wrapper: created namespace @@ -341,27 +342,25 @@ class NamespaceFixture(tools.SafeFixture): super(NamespaceFixture, self).__init__() self.prefix = prefix - def setUp(self): - super(NamespaceFixture, self).setUp() + def _setUp(self): ip = ip_lib.IPWrapper() self.name = self.prefix + uuidutils.generate_uuid() - self.ip_wrapper = ip.ensure_namespace(self.name) self.addCleanup(self.destroy) + self.ip_wrapper = ip.ensure_namespace(self.name) def destroy(self): if self.ip_wrapper.netns.exists(self.name): self.ip_wrapper.netns.delete(self.name) -class VethFixture(tools.SafeFixture): +class VethFixture(fixtures.Fixture): """Create a veth. :ivar ports: created veth ports :type ports: IPDevice 2-uplet """ - def setUp(self): - super(VethFixture, self).setUp() + def _setUp(self): ip_wrapper = ip_lib.IPWrapper() self.ports = common_base.create_resource( @@ -392,7 +391,7 @@ class VethFixture(tools.SafeFixture): @six.add_metaclass(abc.ABCMeta) -class PortFixture(tools.SafeFixture): +class PortFixture(fixtures.Fixture): """Create a port. :ivar port: created port @@ -410,8 +409,8 @@ class PortFixture(tools.SafeFixture): pass @abc.abstractmethod - def setUp(self): - super(PortFixture, self).setUp() + def _setUp(self): + super(PortFixture, self)._setUp() if not self.bridge: self.bridge = self.useFixture(self._create_bridge_fixture()).bridge @@ -427,7 +426,7 @@ class PortFixture(tools.SafeFixture): tools.fail('Unexpected bridge type: %s' % type(bridge)) -class OVSBridgeFixture(tools.SafeFixture): +class OVSBridgeFixture(fixtures.Fixture): """Create an OVS bridge. :ivar prefix: bridge name prefix @@ -440,8 +439,7 @@ class OVSBridgeFixture(tools.SafeFixture): super(OVSBridgeFixture, self).__init__() self.prefix = prefix - def setUp(self): - super(OVSBridgeFixture, self).setUp() + def _setUp(self): ovs = ovs_lib.BaseOVS() self.bridge = common_base.create_resource(self.prefix, ovs.add_bridge) self.addCleanup(self.bridge.destroy) @@ -458,8 +456,8 @@ class OVSPortFixture(PortFixture): def _create_bridge_fixture(self): return OVSBridgeFixture() - def setUp(self): - super(OVSPortFixture, self).setUp() + def _setUp(self): + super(OVSPortFixture, self)._setUp() port_name = common_base.create_resource(PORT_PREFIX, self.create_port) self.addCleanup(self.bridge.delete_port, port_name) @@ -475,7 +473,7 @@ class OVSPortFixture(PortFixture): return name -class LinuxBridgeFixture(tools.SafeFixture): +class LinuxBridgeFixture(fixtures.Fixture): """Create a linux bridge. :ivar bridge: created bridge @@ -484,9 +482,7 @@ class LinuxBridgeFixture(tools.SafeFixture): :type namespace: str """ - def setUp(self): - super(LinuxBridgeFixture, self).setUp() - + def _setUp(self): self.namespace = self.useFixture(NamespaceFixture()).name self.bridge = common_base.create_resource( BR_PREFIX, @@ -509,8 +505,8 @@ class LinuxBridgePortFixture(PortFixture): def _create_bridge_fixture(self): return LinuxBridgeFixture() - def setUp(self): - super(LinuxBridgePortFixture, self).setUp() + def _setUp(self): + super(LinuxBridgePortFixture, self)._setUp() self.port, self.br_port = self.useFixture(VethFixture()).ports # bridge side @@ -539,15 +535,14 @@ class VethBridge(object): len(self.ports)) -class VethBridgeFixture(tools.SafeFixture): +class VethBridgeFixture(fixtures.Fixture): """Simulate a bridge with a veth. :ivar bridge: created bridge :type bridge: FakeBridge """ - def setUp(self): - super(VethBridgeFixture, self).setUp() + def _setUp(self): ports = self.useFixture(VethFixture()).ports self.bridge = VethBridge(ports) @@ -562,8 +557,8 @@ class VethPortFixture(PortFixture): def _create_bridge_fixture(self): return VethBridgeFixture() - def setUp(self): - super(VethPortFixture, self).setUp() + def _setUp(self): + super(VethPortFixture, self)._setUp() self.port = self.bridge.allocate_port() ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) diff --git a/neutron/tests/fullstack/config_fixtures.py b/neutron/tests/fullstack/config_fixtures.py index f07993cfaa2..9ab6f1c5306 100644 --- a/neutron/tests/fullstack/config_fixtures.py +++ b/neutron/tests/fullstack/config_fixtures.py @@ -15,13 +15,13 @@ import os.path import tempfile +import fixtures import six from neutron.common import constants from neutron.tests import base from neutron.tests.common import helpers as c_helpers from neutron.tests.common import net_helpers -from neutron.tests import tools class ConfigDict(base.AttributeDict): @@ -41,7 +41,7 @@ class ConfigDict(base.AttributeDict): self.convert_to_attr_dict(value) -class ConfigFileFixture(tools.SafeFixture): +class ConfigFileFixture(fixtures.Fixture): """A fixture that knows how to translate configurations to files. :param base_filename: the filename to use on disk. @@ -55,8 +55,7 @@ class ConfigFileFixture(tools.SafeFixture): self.config = config self.temp_dir = temp_dir - def setUp(self): - super(ConfigFileFixture, self).setUp() + def _setUp(self): config_parser = self.dict_to_config_parser(self.config) # Need to randomly generate a unique folder to put the file in self.filename = os.path.join(self.temp_dir, self.base_filename) @@ -74,7 +73,7 @@ class ConfigFileFixture(tools.SafeFixture): return config_parser -class ConfigFixture(tools.SafeFixture): +class ConfigFixture(fixtures.Fixture): """A fixture that holds an actual Neutron configuration. Note that 'self.config' is intended to only be updated once, during @@ -88,8 +87,7 @@ class ConfigFixture(tools.SafeFixture): self.temp_dir = temp_dir self.base_filename = base_filename - def setUp(self): - super(ConfigFixture, self).setUp() + def _setUp(self): cfg_fixture = ConfigFileFixture( self.base_filename, self.config, self.temp_dir) self.useFixture(cfg_fixture) diff --git a/neutron/tests/fullstack/fullstack_fixtures.py b/neutron/tests/fullstack/fullstack_fixtures.py index a1b539b492a..690891cd550 100644 --- a/neutron/tests/fullstack/fullstack_fixtures.py +++ b/neutron/tests/fullstack/fullstack_fixtures.py @@ -28,7 +28,6 @@ from neutron.agent.linux import utils from neutron.tests import base from neutron.tests.common import net_helpers from neutron.tests.fullstack import config_fixtures -from neutron.tests import tools LOG = logging.getLogger(__name__) @@ -36,7 +35,7 @@ LOG = logging.getLogger(__name__) DEFAULT_LOG_DIR = '/tmp/fullstack-logs/' -class ProcessFixture(tools.SafeFixture): +class ProcessFixture(fixtures.Fixture): def __init__(self, test_name, process_name, exec_name, config_filenames): super(ProcessFixture, self).__init__() self.test_name = test_name @@ -45,8 +44,8 @@ class ProcessFixture(tools.SafeFixture): self.config_filenames = config_filenames self.process = None - def setUp(self): - super(ProcessFixture, self).setUp() + def _setUp(self): + self.addCleanup(self.stop) self.start() def start(self): @@ -65,15 +64,10 @@ class ProcessFixture(tools.SafeFixture): def stop(self): self.process.stop(block=True) - def cleanUp(self, *args, **kwargs): - self.stop() - super(ProcessFixture, self).cleanUp(*args, **kwargs) +class RabbitmqEnvironmentFixture(fixtures.Fixture): -class RabbitmqEnvironmentFixture(tools.SafeFixture): - def setUp(self): - super(RabbitmqEnvironmentFixture, self).setUp() - + def _setUp(self): self.user = base.get_rand_name(prefix='user') self.password = base.get_rand_name(prefix='pass') self.vhost = base.get_rand_name(prefix='vhost') @@ -93,14 +87,12 @@ class RabbitmqEnvironmentFixture(tools.SafeFixture): utils.execute(cmd, run_as_root=True) -class FullstackFixture(tools.SafeFixture): +class FullstackFixture(fixtures.Fixture): def __init__(self): super(FullstackFixture, self).__init__() self.test_name = None - def setUp(self): - super(FullstackFixture, self).setUp() - + def _setUp(self): self.temp_dir = self.useFixture(fixtures.TempDir()).path rabbitmq_environment = self.useFixture(RabbitmqEnvironmentFixture()) @@ -120,7 +112,7 @@ class FullstackFixture(tools.SafeFixture): return False -class NeutronServerFixture(tools.SafeFixture): +class NeutronServerFixture(fixtures.Fixture): NEUTRON_SERVER = "neutron-server" @@ -130,9 +122,7 @@ class NeutronServerFixture(tools.SafeFixture): self.temp_dir = temp_dir self.rabbitmq_environment = rabbitmq_environment - def setUp(self): - super(NeutronServerFixture, self).setUp() - + def _setUp(self): self.neutron_cfg_fixture = config_fixtures.NeutronConfigFixture( self.temp_dir, cfg.CONF.database.connection, self.rabbitmq_environment) @@ -169,7 +159,7 @@ class NeutronServerFixture(tools.SafeFixture): return client.Client(auth_strategy="noauth", endpoint_url=url) -class OVSAgentFixture(tools.SafeFixture): +class OVSAgentFixture(fixtures.Fixture): NEUTRON_OVS_AGENT = "neutron-openvswitch-agent" @@ -182,9 +172,7 @@ class OVSAgentFixture(tools.SafeFixture): self.neutron_config = self.neutron_cfg_fixture.config self.plugin_config = self.plugin_cfg_fixture.config - def setUp(self): - super(OVSAgentFixture, self).setUp() - + def _setUp(self): self.useFixture(net_helpers.OVSBridgeFixture(self._get_br_int_name())) self.useFixture(net_helpers.OVSBridgeFixture(self._get_br_phys_name())) @@ -204,7 +192,7 @@ class OVSAgentFixture(tools.SafeFixture): return self.plugin_config.ovs.bridge_mappings.split(':')[1] -class L3AgentFixture(tools.SafeFixture): +class L3AgentFixture(fixtures.Fixture): NEUTRON_L3_AGENT = "neutron-l3-agent" @@ -217,9 +205,7 @@ class L3AgentFixture(tools.SafeFixture): self.neutron_config = self.neutron_cfg_fixture.config self.integration_bridge_name = integration_bridge_name - def setUp(self): - super(L3AgentFixture, self).setUp() - + def _setUp(self): self.plugin_cfg_fixture = config_fixtures.L3ConfigFixture( self.temp_dir, self.integration_bridge_name) self.useFixture(self.plugin_cfg_fixture) diff --git a/neutron/tests/fullstack/test_l3_agent.py b/neutron/tests/fullstack/test_l3_agent.py index e7a37457efb..e12e9410df7 100644 --- a/neutron/tests/fullstack/test_l3_agent.py +++ b/neutron/tests/fullstack/test_l3_agent.py @@ -23,8 +23,8 @@ from neutron.tests.fullstack import fullstack_fixtures as f_fixtures class SingleNodeEnvironment(f_fixtures.FullstackFixture): - def setUp(self): - super(SingleNodeEnvironment, self).setUp() + def _setUp(self): + super(SingleNodeEnvironment, self)._setUp() neutron_config = self.neutron_server.neutron_cfg_fixture ml2_config = self.neutron_server.plugin_cfg_fixture diff --git a/neutron/tests/functional/agent/linux/helpers.py b/neutron/tests/functional/agent/linux/helpers.py index f7dc76099e1..9980293aa75 100644 --- a/neutron/tests/functional/agent/linux/helpers.py +++ b/neutron/tests/functional/agent/linux/helpers.py @@ -14,10 +14,10 @@ # under the License. import os -from neutron.tests import tools +import fixtures -class RecursivePermDirFixture(tools.SafeFixture): +class RecursivePermDirFixture(fixtures.Fixture): """Ensure at least perms permissions on directory and ancestors.""" def __init__(self, directory, perms): @@ -25,8 +25,7 @@ class RecursivePermDirFixture(tools.SafeFixture): self.directory = directory self.least_perms = perms - def setUp(self): - super(RecursivePermDirFixture, self).setUp() + def _setUp(self): previous_directory = None current_directory = self.directory while previous_directory != current_directory: diff --git a/neutron/tests/retargetable/client_fixtures.py b/neutron/tests/retargetable/client_fixtures.py index c9c7cbc5803..1161842331e 100644 --- a/neutron/tests/retargetable/client_fixtures.py +++ b/neutron/tests/retargetable/client_fixtures.py @@ -17,18 +17,18 @@ Neutron API via different methods. import abc +import fixtures import six from neutron.common import exceptions as q_exc from neutron import context from neutron import manager from neutron.tests import base -from neutron.tests import tools from neutron.tests.unit import testlib_api @six.add_metaclass(abc.ABCMeta) -class AbstractClientFixture(tools.SafeFixture): +class AbstractClientFixture(fixtures.Fixture): """ Base class for a client that can interact the neutron api in some manner. @@ -71,8 +71,8 @@ class PluginClientFixture(AbstractClientFixture): super(PluginClientFixture, self).__init__() self.plugin_conf = plugin_conf - def setUp(self): - super(PluginClientFixture, self).setUp() + def _setUp(self): + super(PluginClientFixture, self)._setUp() self.useFixture(testlib_api.SqlFixture()) self.useFixture(self.plugin_conf) self.useFixture(base.PluginFixture(self.plugin_conf.plugin_name)) diff --git a/neutron/tests/tools.py b/neutron/tests/tools.py index f28bc4983f8..1a5183489d6 100644 --- a/neutron/tests/tools.py +++ b/neutron/tests/tools.py @@ -16,52 +16,12 @@ import warnings import fixtures -from oslo_utils import excutils import six from neutron.api.v2 import attributes -class SafeFixture(fixtures.Fixture): - """Base Fixture ensuring cleanups are done even if setUp fails. - - Required until testtools/fixtures bugs #1456353 #1456370 are solved. - """ - - def __init__(self): - unsafe_setup = self.setUp - self.setUp = lambda: self.safe_setUp(unsafe_setup) - self.initialized = True - - def setUp(self): - assert getattr(self, 'initialized', True) - super(SafeFixture, self).setUp() - - def safe_setUp(self, unsafe_setup): - """Ensure cleanup is done even if setUp fails.""" - try: - unsafe_setup() - except Exception: - with excutils.save_and_reraise_exception(): - self.safe_cleanUp() - - def safe_cleanUp(self): - """Perform cleanUp if required. - - Fixture.addCleanup/cleanUp can be called only after Fixture.setUp - successful call. It implies we cannot and don't need to call cleanUp - if Fixture.setUp fails or is not called. - - This method assumes Fixture.setUp was called successfully if - self._detail_sources is defined (Fixture.setUp last action). - """ - root_setup_succeed = hasattr(self, '_detail_sources') - - if root_setup_succeed: - self.cleanUp() - - -class AttributeMapMemento(SafeFixture): +class AttributeMapMemento(fixtures.Fixture): """Create a copy of the resource attribute map so it can be restored during test cleanup. @@ -75,13 +35,13 @@ class AttributeMapMemento(SafeFixture): - Inheritance is a bit of overkill for this facility and it's a stretch to rationalize the "is a" criteria. """ - def setUp(self): + + def _setUp(self): # Shallow copy is not a proper choice for keeping a backup copy as # the RESOURCE_ATTRIBUTE_MAP map is modified in place through the # 0th level keys. Ideally deepcopy() would be used but this seems # to result in test failures. A compromise is to copy one level # deeper than a shallow copy. - super(AttributeMapMemento, self).setUp() self.contents_backup = {} for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): self.contents_backup[res] = attrs.copy() @@ -91,19 +51,18 @@ class AttributeMapMemento(SafeFixture): attributes.RESOURCE_ATTRIBUTE_MAP = self.contents_backup -class WarningsFixture(SafeFixture): +class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" warning_types = ( DeprecationWarning, PendingDeprecationWarning, ImportWarning ) - def setUp(self): - super(WarningsFixture, self).setUp() + def _setUp(self): + self.addCleanup(warnings.resetwarnings) for wtype in self.warning_types: warnings.filterwarnings( "always", category=wtype, module='^neutron\\.') - self.addCleanup(warnings.resetwarnings) """setup_mock_calls and verify_mock_calls are convenient methods diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index abb857b3e3a..dc4f9ea3fa3 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -14,6 +14,8 @@ # under the License. import functools + +import fixtures import mock import six import testtools @@ -48,7 +50,6 @@ from neutron.plugins.ml2.drivers import type_vlan from neutron.plugins.ml2 import models from neutron.plugins.ml2 import plugin as ml2_plugin from neutron.tests import base -from neutron.tests import tools from neutron.tests.unit import _test_extension_portbindings as test_bindings from neutron.tests.unit.agent import test_securitygroups_rpc as test_sg_rpc from neutron.tests.unit.db import test_allowedaddresspairs_db as test_pair @@ -71,7 +72,7 @@ HOST = 'fake_host' # TODO(marun) - Move to somewhere common for reuse -class PluginConfFixture(tools.SafeFixture): +class PluginConfFixture(fixtures.Fixture): """Plugin configuration shared across the unit and functional tests.""" def __init__(self, plugin_name, parent_setup=None): @@ -79,8 +80,7 @@ class PluginConfFixture(tools.SafeFixture): self.plugin_name = plugin_name self.parent_setup = parent_setup - def setUp(self): - super(PluginConfFixture, self).setUp() + def _setUp(self): if self.parent_setup: self.parent_setup() diff --git a/neutron/tests/unit/testlib_api.py b/neutron/tests/unit/testlib_api.py index 702192d0191..30955cc221c 100644 --- a/neutron/tests/unit/testlib_api.py +++ b/neutron/tests/unit/testlib_api.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import fixtures import six import testtools @@ -21,7 +22,6 @@ from neutron.db import api as db_api from neutron.db.migration.models import head # noqa from neutron.db import model_base from neutron.tests import base -from neutron.tests import tools from neutron import wsgi @@ -57,13 +57,12 @@ def create_request(path, body, content_type, method='GET', return req -class SqlFixture(tools.SafeFixture): +class SqlFixture(fixtures.Fixture): # flag to indicate that the models have been loaded _TABLES_ESTABLISHED = False - def setUp(self): - super(SqlFixture, self).setUp() + def _setUp(self): # Register all data models engine = db_api.get_engine() if not SqlFixture._TABLES_ESTABLISHED: diff --git a/test-requirements.txt b/test-requirements.txt index 9bea5ff02f2..6693ab22eed 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -5,7 +5,7 @@ hacking<0.11,>=0.10.0 cliff>=1.13.0 # Apache-2.0 coverage>=3.6 -fixtures>=0.3.14 +fixtures>=1.3.1 mock>=1.0 python-subunit>=0.0.18 requests-mock>=0.6.0 # Apache-2.0 From cf8c9e40c8720036bd0c06bd8370f88a472e3e6f Mon Sep 17 00:00:00 2001 From: Fawad Khaliq Date: Tue, 30 Jun 2015 02:17:19 -0700 Subject: [PATCH 24/54] Update PLUMgrid plugin information README was quite oudated and created confusion among users. Updated the information after decomposition. Change-Id: I78bf8dec20ba2ceb644d4565035d29bbf53cb3b5 --- neutron/plugins/plumgrid/README | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/neutron/plugins/plumgrid/README b/neutron/plugins/plumgrid/README index e7118307d3e..5fc4050e4cb 100644 --- a/neutron/plugins/plumgrid/README +++ b/neutron/plugins/plumgrid/README @@ -1,8 +1,14 @@ -PLUMgrid Neutron Plugin for Virtual Network Infrastructure (VNI) +PLUMgrid Neutron Plugin +======================== -This plugin implements Neutron v2 APIs and helps configure -L2/L3 virtual networks consisting of PLUMgrid Platform. -Implements External Networks and Port Binding Extension +PLUMgrid Neutron Plugin for PLUMgrid Open Networking Suite -For more details on use please refer to: -http://wiki.openstack.org/PLUMgrid-Neutron +* Full plugin code is available at: + * https://github.com/stackforge/networking-plumgrid + +* PyPI package location: + * https://pypi.python.org/pypi/networking-plumgrid + +* For config, install and other details, please refer to + wiki page: + * http://wiki.openstack.org/PLUMgrid-Neutron From fc472397016c6958e7e02808ac3bc43216e21a62 Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Wed, 24 Jun 2015 12:25:22 +0300 Subject: [PATCH 25/54] Fixing indentation and typo in comments - Fix strange indentation - Fix typo in comment Change-Id: I70893bc751c16265a8c3b3214524ab2553f4f30f --- neutron/db/ipam_backend_mixin.py | 2 +- neutron/ipam/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index d4468e40e78..cb82b4ede6b 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -226,7 +226,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): """ for subnet in network.subnets: if (subnet.ip_version == ip_version and - new_subnetpool_id != subnet.subnetpool_id): + new_subnetpool_id != subnet.subnetpool_id): raise n_exc.NetworkSubnetPoolAffinityError() def _validate_allocation_pools(self, ip_pools, subnet_cidr): diff --git a/neutron/ipam/__init__.py b/neutron/ipam/__init__.py index 6e1e438177d..16c8151e358 100644 --- a/neutron/ipam/__init__.py +++ b/neutron/ipam/__init__.py @@ -44,7 +44,7 @@ class SubnetRequest(object): :param tenant_id: The tenant id who will own the subnet :type tenant_id: str uuid :param subnet_id: Neutron's subnet ID - :type subnet_id: srt uuid + :type subnet_id: str uuid :param gateway_ip: An IP to reserve for the subnet gateway. :type gateway_ip: None or convertible to netaddr.IPAddress :param allocation_pools: The pool from which IPAM should allocate From abb7124a518823616c22afbd6bb5fe412b395bcd Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Mon, 29 Jun 2015 14:02:29 -0400 Subject: [PATCH 26/54] Remove unused linux bridge agent configuration options This is cruft left from the Linux bridge monolithic plugin, or from pre-Havana versions of the code. Change-Id: Id7bb7d7860859283b53f588a940ca21c94fd0e6a --- etc/neutron/plugins/ml2/linuxbridge_agent.ini | 28 ------------------- .../linuxbridge/agent/common/config.py | 14 ---------- 2 files changed, 42 deletions(-) diff --git a/etc/neutron/plugins/ml2/linuxbridge_agent.ini b/etc/neutron/plugins/ml2/linuxbridge_agent.ini index b25d02916d5..d1a01ba72ee 100644 --- a/etc/neutron/plugins/ml2/linuxbridge_agent.ini +++ b/etc/neutron/plugins/ml2/linuxbridge_agent.ini @@ -1,25 +1,3 @@ -[vlans] -# (StrOpt) Type of network to allocate for tenant networks. The -# default value 'local' is useful only for single-box testing and -# provides no connectivity between hosts. You MUST change this to -# 'vlan' and configure network_vlan_ranges below in order for tenant -# networks to provide connectivity between hosts. Set to 'none' to -# disable creation of tenant networks. -# -# tenant_network_type = local -# Example: tenant_network_type = vlan - -# (ListOpt) Comma-separated list of -# [::] tuples enumerating ranges -# of VLAN IDs on named physical networks that are available for -# allocation. All physical networks listed are available for flat and -# VLAN provider network creation. Specified ranges of VLAN IDs are -# available for tenant network allocation if tenant_network_type is -# 'vlan'. If empty, only local networks may be created. -# -# network_vlan_ranges = -# Example: network_vlan_ranges = physnet1:1000:2999 - [linux_bridge] # (ListOpt) Comma-separated list of # : tuples mapping physical @@ -62,12 +40,6 @@ # Agent's polling interval in seconds # polling_interval = 2 -# (BoolOpt) Enable server RPC compatibility with old (pre-havana) -# agents. -# -# rpc_support_old_agents = False -# Example: rpc_support_old_agents = True - # (IntOpt) Set new timeout in seconds for new rpc calls after agent receives # SIGTERM. If value is set to 0, rpc timeout won't be changed. # diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py index fa1487c6b49..5656b740977 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py @@ -16,21 +16,10 @@ from oslo_config import cfg from neutron.agent.common import config -DEFAULT_VLAN_RANGES = [] DEFAULT_INTERFACE_MAPPINGS = [] DEFAULT_VXLAN_GROUP = '224.0.0.1' -vlan_opts = [ - cfg.StrOpt('tenant_network_type', default='local', - help=_("Network type for tenant networks " - "(local, vlan, or none)")), - cfg.ListOpt('network_vlan_ranges', - default=DEFAULT_VLAN_RANGES, - help=_("List of :: " - "or ")), -] - vxlan_opts = [ cfg.BoolOpt('enable_vxlan', default=True, help=_("Enable VXLAN on the agent. Can be enabled when " @@ -60,8 +49,6 @@ agent_opts = [ cfg.IntOpt('polling_interval', default=2, help=_("The number of seconds the agent will wait between " "polling for local device changes.")), - cfg.BoolOpt('rpc_support_old_agents', default=False, - help=_("Enable server RPC compatibility with old agents")), cfg.IntOpt('quitting_rpc_timeout', default=10, help=_("Set new timeout in seconds for new rpc calls after " "agent receives SIGTERM. If value is set to 0, rpc " @@ -69,7 +56,6 @@ agent_opts = [ ] -cfg.CONF.register_opts(vlan_opts, "VLANS") cfg.CONF.register_opts(vxlan_opts, "VXLAN") cfg.CONF.register_opts(bridge_opts, "LINUX_BRIDGE") cfg.CONF.register_opts(agent_opts, "AGENT") From 49569327c20d8a10ba3d426833ff28d68b1b7a27 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 1 Jul 2015 12:00:14 -0700 Subject: [PATCH 27/54] Fix log traces induced by retry decorator Patch 4e77442d5 added a retry decorator to the API layer to catch DB deadlock errors. However, when they occur, the retried operation ends up being ineffective because the original body has been altered, which leads the notification and validation layers to barf exceptions due to unrecognized/unserializable elements. This ultimately results to an error reported to the user. To address this, let's make a deep copy of the request body, before we pass it down to the lower layers. This allows the decorator to work on a pristine copy of the body on every attempt. The performance impact for this should be negligible. Closes-bug: #1470615 Change-Id: I82a2a002612d28fa8f97b0afbd4f7ba1e8830377 --- neutron/api/v2/base.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/neutron/api/v2/base.py b/neutron/api/v2/base.py index d0f2aa8f156..48dea6bf6d0 100644 --- a/neutron/api/v2/base.py +++ b/neutron/api/v2/base.py @@ -391,7 +391,8 @@ class Controller(object): self._notifier.info(request.context, self._resource + '.create.start', body) - body = Controller.prepare_request_body(request.context, body, True, + body = Controller.prepare_request_body(request.context, + copy.deepcopy(body), True, self._resource, self._attr_info, allow_bulk=self._allow_bulk) action = self._plugin_handlers[self.CREATE] From f1771131a85a2fe633126f354364205554ef71d1 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Wed, 1 Jul 2015 13:06:38 -0700 Subject: [PATCH 28/54] Change the half of the bridge name used for ports The code to generate the names of the patch ports was based on a chunk of the bridge name starting from the beginning. With the long suffix, this ended up excluding all of the random characters in the name. (e.g. br-int374623235 would create an interface br-in-patch-tun). This meant that if two tests using patch interfaces ran together, they would have a name collision and one would fail. This patch updates the patch port name generation to use the randomized back portion of the name. Change-Id: I172e0b2c0b53e8c7151bd92f0915773ea62c0c6a Closes-Bug: #1470637 --- neutron/tests/functional/agent/test_l2_ovs_agent.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neutron/tests/functional/agent/test_l2_ovs_agent.py b/neutron/tests/functional/agent/test_l2_ovs_agent.py index 75614b20f3c..6deaab64e2e 100644 --- a/neutron/tests/functional/agent/test_l2_ovs_agent.py +++ b/neutron/tests/functional/agent/test_l2_ovs_agent.py @@ -60,8 +60,8 @@ class OVSAgentTestFramework(base.BaseOVSLinuxTestCase): self.br_tun = base.get_rand_name(n_const.DEVICE_NAME_MAX_LEN, prefix='br-tun') patch_name_len = n_const.DEVICE_NAME_MAX_LEN - len("-patch-tun") - self.patch_tun = "%s-patch-tun" % self.br_int[:patch_name_len] - self.patch_int = "%s-patch-int" % self.br_tun[:patch_name_len] + self.patch_tun = "%s-patch-tun" % self.br_int[patch_name_len:] + self.patch_int = "%s-patch-int" % self.br_tun[patch_name_len:] self.ovs = ovs_lib.BaseOVS() self.config = self._configure_agent() self.driver = interface.OVSInterfaceDriver(self.config) From 5e11769e498f210b1c84a6addaffecb7db9c5fed Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 1 Jul 2015 18:01:10 -0700 Subject: [PATCH 29/54] Use EXT_TO_SERVICE_MAPPING instead of ALLOWED_SERVICES We can derive the services from EXT_TO_SERVICE_MAPPING, therefore there is no need for duplicating the service labels into ALLOWED_SERVICES. Change-Id: If92e0ea3dea4480588141a2819ea4036c527c9bc --- neutron/plugins/common/constants.py | 7 +------ neutron/services/provider_configuration.py | 5 +++-- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py index 5c562dc3b7b..809a1399e85 100644 --- a/neutron/plugins/common/constants.py +++ b/neutron/plugins/common/constants.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. -# Service type constants: +# Neutron well-known service type constants: CORE = "CORE" DUMMY = "DUMMY" LOADBALANCER = "LOADBALANCER" @@ -23,7 +23,6 @@ VPN = "VPN" METERING = "METERING" L3_ROUTER_NAT = "L3_ROUTER_NAT" - # Maps extension alias to service type EXT_TO_SERVICE_MAPPING = { 'dummy': DUMMY, @@ -35,10 +34,6 @@ EXT_TO_SERVICE_MAPPING = { 'router': L3_ROUTER_NAT } -# TODO(salvatore-orlando): Move these (or derive them) from conf file -ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING, - L3_ROUTER_NAT, LOADBALANCERV2] - COMMON_PREFIXES = { CORE: "", DUMMY: "/dummy_svc", diff --git a/neutron/services/provider_configuration.py b/neutron/services/provider_configuration.py index 9a247bfc311..cc406e74193 100644 --- a/neutron/services/provider_configuration.py +++ b/neutron/services/provider_configuration.py @@ -111,11 +111,12 @@ def parse_service_provider_opt(): prov_def) LOG.error(msg) raise n_exc.Invalid(msg) - if svc_type not in constants.ALLOWED_SERVICES: + ALLOWED_SERVICES = constants.EXT_TO_SERVICE_MAPPING.values() + if svc_type not in ALLOWED_SERVICES: msg = (_("Service type '%(svc_type)s' is not allowed, " "allowed types: %(allowed)s") % {'svc_type': svc_type, - 'allowed': constants.ALLOWED_SERVICES}) + 'allowed': ALLOWED_SERVICES}) LOG.error(msg) raise n_exc.Invalid(msg) driver = get_provider_driver_class(driver) From cbd95318ad6c44e72a3aa163f7a399353c8b4458 Mon Sep 17 00:00:00 2001 From: "vikram.choudhary" Date: Tue, 9 Jun 2015 19:55:59 +0530 Subject: [PATCH 30/54] Support Basic Address Scope CRUD as extensions This patch adds the support for basic address scope CRUD. Subsequent patches will be added to use this address scope on subnet pools. DocImpact APIImpact Co-Authored-By: Ryan Tidwell Co-Authored-By: Numan Siddique Change-Id: Icabdd22577cfda0e1fbf6042e4b05b8080e54fdb Partially-implements: blueprint address-scopes --- etc/policy.json | 8 + neutron/db/address_scope_db.py | 105 ++++++++ .../versions/52c5312f6baf_address_scopes.py | 36 +++ .../alembic_migrations/versions/HEAD | 2 +- neutron/extensions/address_scope.py | 138 ++++++++++ neutron/plugins/ml2/plugin.py | 7 +- neutron/tests/etc/policy.json | 8 + .../unit/extensions/test_address_scope.py | 239 ++++++++++++++++++ 8 files changed, 540 insertions(+), 3 deletions(-) create mode 100644 neutron/db/address_scope_db.py create mode 100644 neutron/db/migration/alembic_migrations/versions/52c5312f6baf_address_scopes.py create mode 100644 neutron/extensions/address_scope.py create mode 100644 neutron/tests/unit/extensions/test_address_scope.py diff --git a/etc/policy.json b/etc/policy.json index 87f6b266897..eaf6d685ffe 100644 --- a/etc/policy.json +++ b/etc/policy.json @@ -9,6 +9,7 @@ "shared_firewalls": "field:firewalls:shared=True", "shared_firewall_policies": "field:firewall_policies:shared=True", "shared_subnetpools": "field:subnetpools:shared=True", + "shared_address_scopes": "field:address_scopes:shared=True", "external": "field:networks:router:external=True", "default": "rule:admin_or_owner", @@ -23,6 +24,13 @@ "update_subnetpool": "rule:admin_or_owner", "delete_subnetpool": "rule:admin_or_owner", + "create_address_scope": "", + "create_address_scope:shared": "rule:admin_only", + "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes", + "update_address_scope": "rule:admin_or_owner", + "update_address_scope:shared": "rule:admin_only", + "delete_address_scope": "rule:admin_or_owner", + "create_network": "", "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", "get_network:router:external": "rule:regular_user", diff --git a/neutron/db/address_scope_db.py b/neutron/db/address_scope_db.py new file mode 100644 index 00000000000..c5c7a8469f4 --- /dev/null +++ b/neutron/db/address_scope_db.py @@ -0,0 +1,105 @@ +# Copyright (c) 2015 Huawei Technologies Co.,LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log as logging +from oslo_utils import uuidutils +import sqlalchemy as sa +from sqlalchemy.orm import exc + +from neutron.db import model_base +from neutron.db import models_v2 +from neutron.extensions import address_scope as ext_address_scope + +LOG = logging.getLogger(__name__) + + +class AddressScope(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + """Represents a neutron address scope.""" + + __tablename__ = "address_scopes" + + name = sa.Column(sa.String(255), nullable=False) + shared = sa.Column(sa.Boolean, nullable=False) + + +class AddressScopeDbMixin(ext_address_scope.AddressScopePluginBase): + """Mixin class to add address scope to db_base_plugin_v2.""" + + __native_bulk_support = True + + def _make_address_scope_dict(self, address_scope, fields=None): + res = {'id': address_scope['id'], + 'name': address_scope['name'], + 'tenant_id': address_scope['tenant_id'], + 'shared': address_scope['shared']} + return self._fields(res, fields) + + def _get_address_scope(self, context, id): + try: + return self._get_by_id(context, AddressScope, id) + except exc.NoResultFound: + raise ext_address_scope.AddressScopeNotFound(address_scope_id=id) + + def create_address_scope(self, context, address_scope): + """Create a address scope.""" + a_s = address_scope['address_scope'] + tenant_id = self._get_tenant_id_for_create(context, a_s) + address_scope_id = a_s.get('id') or uuidutils.generate_uuid() + with context.session.begin(subtransactions=True): + pool_args = {'tenant_id': tenant_id, + 'id': address_scope_id, + 'name': a_s['name'], + 'shared': a_s['shared']} + address_scope = AddressScope(**pool_args) + context.session.add(address_scope) + + return self._make_address_scope_dict(address_scope) + + def update_address_scope(self, context, id, address_scope): + a_s = address_scope['address_scope'] + with context.session.begin(subtransactions=True): + address_scope = self._get_address_scope(context, id) + if address_scope.shared and not a_s.get('shared', True): + reason = _("Shared address scope can't be unshared") + raise ext_address_scope.AddressScopeUpdateError( + address_scope_id=id, reason=reason) + address_scope.update(a_s) + + return self._make_address_scope_dict(address_scope) + + def get_address_scope(self, context, id, fields=None): + address_scope = self._get_address_scope(context, id) + return self._make_address_scope_dict(address_scope, fields) + + def get_address_scopes(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'addrscope', limit, marker) + collection = self._get_collection(context, AddressScope, + self._make_address_scope_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + return collection + + def get_address_scopes_count(self, context, filters=None): + return self._get_collection_count(context, AddressScope, + filters=filters) + + def delete_address_scope(self, context, id): + with context.session.begin(subtransactions=True): + address_scope = self._get_address_scope(context, id) + context.session.delete(address_scope) diff --git a/neutron/db/migration/alembic_migrations/versions/52c5312f6baf_address_scopes.py b/neutron/db/migration/alembic_migrations/versions/52c5312f6baf_address_scopes.py new file mode 100644 index 00000000000..9fa1466e52b --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/52c5312f6baf_address_scopes.py @@ -0,0 +1,36 @@ +# Copyright (c) 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""Initial operations in support of address scopes + +""" + +# revision identifiers, used by Alembic. +revision = '52c5312f6baf' +down_revision = '599c6a226151' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.create_table( + 'address_scopes', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('name', sa.String(length=255), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True, + index=True), + sa.Column('shared', sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint('id')) diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD index 054926f3afd..5d2bcdc22c2 100644 --- a/neutron/db/migration/alembic_migrations/versions/HEAD +++ b/neutron/db/migration/alembic_migrations/versions/HEAD @@ -1 +1 @@ -599c6a226151 +52c5312f6baf diff --git a/neutron/extensions/address_scope.py b/neutron/extensions/address_scope.py new file mode 100644 index 00000000000..63829920bf3 --- /dev/null +++ b/neutron/extensions/address_scope.py @@ -0,0 +1,138 @@ +# Copyright (c) 2015 Huawei Technologies Co.,LTD. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import base +from neutron.common import exceptions as nexception +from neutron import manager +import six + +ADDRESS_SCOPE = 'address_scope' +ADDRESS_SCOPES = '%ss' % ADDRESS_SCOPE + + +# Attribute Map +RESOURCE_ATTRIBUTE_MAP = { + ADDRESS_SCOPES: { + 'id': {'allow_post': False, + 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True, + 'primary_key': True}, + 'name': {'allow_post': True, + 'allow_put': True, + 'default': '', + 'validate': {'type:string': attr.NAME_MAX_LEN}, + 'is_visible': True}, + 'tenant_id': {'allow_post': True, + 'allow_put': False, + 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, + 'required_by_policy': True, + 'is_visible': True}, + attr.SHARED: {'allow_post': True, + 'allow_put': True, + 'default': False, + 'convert_to': attr.convert_to_boolean, + 'is_visible': True, + 'required_by_policy': True, + 'enforce_policy': True}, + } +} + + +class AddressScopeNotFound(nexception.NotFound): + message = _("Address scope %(address_scope_id)s could not be found") + + +class AddressScopeDeleteError(nexception.BadRequest): + message = _("Unable to delete address scope %(address_scope_id)s : " + "%(reason)s") + + +class AddressScopeUpdateError(nexception.BadRequest): + message = _("Unable to update address scope %(address_scope_id)s : " + "%(reason)s") + + +class Address_scope(extensions.ExtensionDescriptor): + """Extension class supporting Address Scopes.""" + + @classmethod + def get_name(cls): + return "Address scope" + + @classmethod + def get_alias(cls): + return "address-scope" + + @classmethod + def get_description(cls): + return "Address scopes extension." + + @classmethod + def get_updated(cls): + return "2015-07-26T10:00:00-00:00" + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] + attr.PLURALS.update(dict(my_plurals)) + plugin = manager.NeutronManager.get_plugin() + collection_name = ADDRESS_SCOPES.replace('_', '-') + params = RESOURCE_ATTRIBUTE_MAP.get(ADDRESS_SCOPES, dict()) + controller = base.create_resource(collection_name, + ADDRESS_SCOPE, + plugin, params, allow_bulk=True, + allow_pagination=True, + allow_sorting=True) + + ex = extensions.ResourceExtension(collection_name, controller, + attr_map=params) + return [ex] + + def get_extended_resources(self, version): + return {} + + +@six.add_metaclass(abc.ABCMeta) +class AddressScopePluginBase(object): + + @abc.abstractmethod + def create_address_scope(self, context, adress_scope): + pass + + @abc.abstractmethod + def update_address_scope(self, context, id, address_scope): + pass + + @abc.abstractmethod + def get_address_scope(self, context, id, fields=None): + pass + + @abc.abstractmethod + def get_address_scopes(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + pass + + @abc.abstractmethod + def delete_address_scope(self, context, id): + pass + + def get_address_scopes_count(self, context, filters=None): + raise NotImplementedError() diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index a56039d4548..c61a717e393 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -44,6 +44,7 @@ from neutron.common import log as neutron_log from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils +from neutron.db import address_scope_db from neutron.db import agents_db from neutron.db import agentschedulers_db from neutron.db import allowedaddresspairs_db as addr_pair_db @@ -88,7 +89,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, addr_pair_db.AllowedAddressPairsMixin, vlantransparent_db.Vlantransparent_db_mixin, extradhcpopt_db.ExtraDhcpOptMixin, - netmtu_db.Netmtu_db_mixin): + netmtu_db.Netmtu_db_mixin, + address_scope_db.AddressScopeDbMixin): """Implement the Neutron L2 abstractions using modules. @@ -112,7 +114,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, "dhcp_agent_scheduler", "multi-provider", "allowed-address-pairs", "extra_dhcp_opt", "subnet_allocation", - "net-mtu", "vlan-transparent"] + "net-mtu", "vlan-transparent", + "address-scope"] @property def supported_extension_aliases(self): diff --git a/neutron/tests/etc/policy.json b/neutron/tests/etc/policy.json index 87f6b266897..eaf6d685ffe 100644 --- a/neutron/tests/etc/policy.json +++ b/neutron/tests/etc/policy.json @@ -9,6 +9,7 @@ "shared_firewalls": "field:firewalls:shared=True", "shared_firewall_policies": "field:firewall_policies:shared=True", "shared_subnetpools": "field:subnetpools:shared=True", + "shared_address_scopes": "field:address_scopes:shared=True", "external": "field:networks:router:external=True", "default": "rule:admin_or_owner", @@ -23,6 +24,13 @@ "update_subnetpool": "rule:admin_or_owner", "delete_subnetpool": "rule:admin_or_owner", + "create_address_scope": "", + "create_address_scope:shared": "rule:admin_only", + "get_address_scope": "rule:admin_or_owner or rule:shared_address_scopes", + "update_address_scope": "rule:admin_or_owner", + "update_address_scope:shared": "rule:admin_only", + "delete_address_scope": "rule:admin_or_owner", + "create_network": "", "get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc", "get_network:router:external": "rule:regular_user", diff --git a/neutron/tests/unit/extensions/test_address_scope.py b/neutron/tests/unit/extensions/test_address_scope.py new file mode 100644 index 00000000000..df46e6bce65 --- /dev/null +++ b/neutron/tests/unit/extensions/test_address_scope.py @@ -0,0 +1,239 @@ +# Copyright (c) 2015 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import contextlib + +import webob.exc + +from neutron.api.v2 import attributes as attr +from neutron import context +from neutron.db import address_scope_db +from neutron.db import db_base_plugin_v2 +from neutron.extensions import address_scope as ext_address_scope +from neutron.tests.unit.db import test_db_base_plugin_v2 + +DB_PLUGIN_KLASS = ('neutron.tests.unit.extensions.test_address_scope.' + 'AddressScopeTestPlugin') + + +class AddressScopeTestExtensionManager(object): + + def get_resources(self): + # Add the resources to the global attribute map + # This is done here as the setup process won't + # initialize the main API router which extends + # the global attribute map + attr.RESOURCE_ATTRIBUTE_MAP.update( + ext_address_scope.RESOURCE_ATTRIBUTE_MAP) + return ext_address_scope.Address_scope.get_resources() + + def get_actions(self): + return [] + + def get_request_extensions(self): + return [] + + +class AddressScopeTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): + + def _create_address_scope(self, fmt, expected_res_status=None, + admin=False, **kwargs): + address_scope = {'address_scope': {}} + for k, v in kwargs.items(): + address_scope['address_scope'][k] = str(v) + + address_scope_req = self.new_create_request('address-scopes', + address_scope, fmt) + + if not admin: + neutron_context = context.Context('', kwargs.get('tenant_id', + self._tenant_id)) + address_scope_req.environ['neutron.context'] = neutron_context + + address_scope_res = address_scope_req.get_response(self.ext_api) + if expected_res_status: + self.assertEqual(address_scope_res.status_int, expected_res_status) + return address_scope_res + + def _make_address_scope(self, fmt, admin=False, **kwargs): + res = self._create_address_scope(fmt, admin=admin, **kwargs) + if res.status_int >= webob.exc.HTTPClientError.code: + raise webob.exc.HTTPClientError(code=res.status_int) + return self.deserialize(fmt, res) + + @contextlib.contextmanager + def address_scope(self, admin=False, **kwargs): + addr_scope = self._make_address_scope(self.fmt, admin, **kwargs) + yield addr_scope + + def _test_create_address_scope(self, admin=False, expected=None, **kwargs): + keys = kwargs.copy() + keys.setdefault('tenant_id', self._tenant_id) + with self.address_scope(admin=admin, **keys) as addr_scope: + self._validate_resource(addr_scope, keys, 'address_scope') + if expected: + self._compare_resource(addr_scope, expected, 'address_scope') + return addr_scope + + def _test_update_address_scope(self, addr_scope_id, data, admin=False, + expected=None, tenant_id=None): + update_req = self.new_update_request( + 'address-scopes', data, addr_scope_id) + if not admin: + neutron_context = context.Context('', tenant_id or self._tenant_id) + update_req.environ['neutron.context'] = neutron_context + + update_res = update_req.get_response(self.ext_api) + if expected: + addr_scope = self.deserialize(self.fmt, update_res) + self._compare_resource(addr_scope, expected, 'address_scope') + return addr_scope + + return update_res + + +class AddressScopeTestPlugin(db_base_plugin_v2.NeutronDbPluginV2, + address_scope_db.AddressScopeDbMixin): + __native_pagination_support = True + __native_sorting_support = True + + supported_extension_aliases = ["address-scope"] + + +class TestAddressScope(AddressScopeTestCase): + + def setUp(self): + plugin = DB_PLUGIN_KLASS + ext_mgr = AddressScopeTestExtensionManager() + super(TestAddressScope, self).setUp(plugin=plugin, ext_mgr=ext_mgr) + + def test_create_address_scope(self): + expected_addr_scope = {'name': 'foo-address-scope', + 'tenant_id': self._tenant_id, + 'shared': False} + self._test_create_address_scope(name='foo-address-scope', + expected=expected_addr_scope) + + def test_create_address_scope_empty_name(self): + expected_addr_scope = {'name': '', + 'tenant_id': self._tenant_id, + 'shared': False} + self._test_create_address_scope(name='', expected=expected_addr_scope) + + # no name specified + self._test_create_address_scope(expected=expected_addr_scope) + + def test_create_address_scope_shared_admin(self): + expected_addr_scope = {'name': 'foo-address-scope', 'shared': True} + self._test_create_address_scope(name='foo-address-scope', admin=True, + shared=True, + expected=expected_addr_scope) + + def test_created_address_scope_shared_non_admin(self): + res = self._create_address_scope(self.fmt, name='foo-address-scope', + tenant_id=self._tenant_id, + admin=False, shared=True) + self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) + + def test_created_address_scope_specify_id(self): + res = self._create_address_scope(self.fmt, name='foo-address-scope', + id='foo-id') + self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) + + def test_delete_address_scope(self): + with self.address_scope(name='foo-address-scope') as addr_scope: + self._delete('address-scopes', addr_scope['address_scope']['id']) + self._show('address-scopes', addr_scope['address_scope']['id'], + expected_code=webob.exc.HTTPNotFound.code) + + def test_update_address_scope(self): + addr_scope = self._test_create_address_scope(name='foo-address-scope') + data = {'address_scope': {'name': 'bar-address-scope'}} + self._test_update_address_scope(addr_scope['address_scope']['id'], + data, expected=data['address_scope']) + + def test_update_address_scope_shared_true_admin(self): + addr_scope = self._test_create_address_scope(name='foo-address-scope') + data = {'address_scope': {'shared': True}} + self._test_update_address_scope(addr_scope['address_scope']['id'], + data, admin=True, + expected=data['address_scope']) + + def test_update_address_scope_shared_true_non_admin(self): + addr_scope = self._test_create_address_scope(name='foo-address-scope') + data = {'address_scope': {'shared': True}} + res = self._test_update_address_scope( + addr_scope['address_scope']['id'], data, admin=False) + self.assertEqual(webob.exc.HTTPForbidden.code, res.status_int) + + def test_update_address_scope_shared_false_admin(self): + addr_scope = self._test_create_address_scope(name='foo-address-scope', + admin=True, shared=True) + data = {'address_scope': {'shared': False}} + res = self._test_update_address_scope( + addr_scope['address_scope']['id'], data, admin=True) + self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) + + def test_get_address_scope(self): + addr_scope = self._test_create_address_scope(name='foo-address-scope') + req = self.new_show_request('address-scopes', + addr_scope['address_scope']['id']) + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(addr_scope['address_scope']['id'], + res['address_scope']['id']) + + def test_get_address_scope_different_tenants_not_shared(self): + addr_scope = self._test_create_address_scope(name='foo-address-scope') + req = self.new_show_request('address-scopes', + addr_scope['address_scope']['id']) + neutron_context = context.Context('', 'not-the-owner') + req.environ['neutron.context'] = neutron_context + res = req.get_response(self.ext_api) + self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int) + + def test_get_address_scope_different_tenants_shared(self): + addr_scope = self._test_create_address_scope(name='foo-address-scope', + shared=True, admin=True) + req = self.new_show_request('address-scopes', + addr_scope['address_scope']['id']) + neutron_context = context.Context('', 'test-tenant-2') + req.environ['neutron.context'] = neutron_context + res = self.deserialize(self.fmt, req.get_response(self.ext_api)) + self.assertEqual(addr_scope['address_scope']['id'], + res['address_scope']['id']) + + def test_list_address_scopes(self): + self._test_create_address_scope(name='foo-address-scope') + self._test_create_address_scope(name='bar-address-scope') + res = self._list('address-scopes') + self.assertEqual(2, len(res['address_scopes'])) + + def test_list_address_scopes_different_tenants_shared(self): + self._test_create_address_scope(name='foo-address-scope', shared=True, + admin=True) + admin_res = self._list('address-scopes') + mortal_res = self._list( + 'address-scopes', + neutron_context=context.Context('', 'not-the-owner')) + self.assertEqual(1, len(admin_res['address_scopes'])) + self.assertEqual(1, len(mortal_res['address_scopes'])) + + def test_list_address_scopes_different_tenants_not_shared(self): + self._test_create_address_scope(name='foo-address-scope') + admin_res = self._list('address-scopes') + mortal_res = self._list( + 'address-scopes', + neutron_context=context.Context('', 'not-the-owner')) + self.assertEqual(1, len(admin_res['address_scopes'])) + self.assertEqual(0, len(mortal_res['address_scopes'])) From 197aa10487d6cf8081099f33aae1ec7efe4f9545 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 2 Jul 2015 01:45:46 -0700 Subject: [PATCH 31/54] Downgrade log level for gone port on status update If a port is deleted immediately before a status update arrives from the L2 agent, the port will be missing from the DB. The current code was logging this at the warning level, but this occurs during normal operations so it should only be a debug event. Change-Id: I22af81e6807bfccb4c906ec0873fcbfca67b72df --- neutron/plugins/ml2/plugin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index a56039d4548..105d0f5d58c 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -1397,8 +1397,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, session.begin(subtransactions=True): port = db.get_port(session, port_id) if not port: - LOG.warning(_LW("Port %(port)s updated up by agent not found"), - {'port': port_id}) + LOG.debug("Port %(port)s update to %(val)s by agent not found", + {'port': port_id, 'val': status}) return None if (port.status != status and port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE): From 06d6012e3e379f774e190203f4f6f32c20704daa Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Thu, 25 Jun 2015 16:32:22 +0300 Subject: [PATCH 32/54] Collapse create_subnet into single method Previously create_subnet called different methods for subnet allocation with subnetpool and without it. _create_subnet_from_implicit_pool and _create_subnet_from_pool were collapsed into single method _create_subnet. This is intermediate step for supporting pluggable ipam. Partially-Implements: blueprint neutron-ipam Change-Id: Ia6cfc2c15e29f983a623772f5473166c075a20e4 --- neutron/db/db_base_plugin_common.py | 2 +- neutron/db/db_base_plugin_v2.py | 73 +++++------------------- neutron/db/ipam_non_pluggable_backend.py | 29 ++++++++++ 3 files changed, 44 insertions(+), 60 deletions(-) diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py index 1bbca99e10b..29816ca39ac 100644 --- a/neutron/db/db_base_plugin_common.py +++ b/neutron/db/db_base_plugin_common.py @@ -227,7 +227,7 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): attributes.NETWORKS, res, network) return self._fields(res, fields) - def _make_subnet_args(self, context, shared, detail, + def _make_subnet_args(self, shared, detail, subnet, subnetpool_id=None): args = {'tenant_id': detail.tenant_id, 'id': detail.subnet_id, diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 52a09188ddf..a30cb7fb626 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -442,60 +442,23 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, retry_on_request=True, retry_on_deadlock=True) - def _create_subnet_from_pool(self, context, subnet, subnetpool_id): + def _create_subnet(self, context, subnet, subnetpool_id): s = subnet['subnet'] - self._validate_pools_with_subnetpool(s) with context.session.begin(subtransactions=True): - subnetpool = self._get_subnetpool(context, subnetpool_id) - self._validate_ip_version_with_subnetpool(s, subnetpool) - network = self._get_network(context, s["network_id"]) - allocator = subnet_alloc.SubnetAllocator(subnetpool, context) - req = ipam.SubnetRequestFactory.get_request(context, s, subnetpool) - - ipam_subnet = allocator.allocate_subnet(req) - detail = ipam_subnet.get_details() - subnet = self._save_subnet(context, - network, - self._make_subnet_args( - context, - network.shared, - detail, - s, - subnetpool_id=subnetpool['id']), - s['dns_nameservers'], - s['host_routes'], - s['allocation_pools']) - if hasattr(network, 'external') and network.external: - self._update_router_gw_ports(context, - network, - subnet) - return self._make_subnet_dict(subnet) - - def _create_subnet_from_implicit_pool(self, context, subnet): - s = subnet['subnet'] - self._validate_subnet(context, s) - id = s.get('id', uuidutils.generate_uuid()) - detail = ipam.SpecificSubnetRequest(s['tenant_id'], - id, - s['cidr']) - with context.session.begin(subtransactions=True): - network = self._get_network(context, s["network_id"]) - self._validate_subnet_cidr(context, network, s['cidr']) - subnet = self._save_subnet(context, - network, - self._make_subnet_args(context, - network.shared, - detail, - s), - s['dns_nameservers'], - s['host_routes'], - s['allocation_pools']) + subnet = self._allocate_subnet(context, + network, + s, + subnetpool_id) if hasattr(network, 'external') and network.external: self._update_router_gw_ports(context, network, subnet) + # If this subnet supports auto-addressing, then update any + # internal ports on the network with addresses for this subnet. + if ipv6_utils.is_auto_address_subnet(subnet): + self._add_auto_addrs_on_network_ports(context, subnet) return self._make_subnet_dict(subnet) def _get_subnetpool_id(self, subnet): @@ -550,24 +513,16 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, s['tenant_id'] = self._get_tenant_id_for_create(context, s) subnetpool_id = self._get_subnetpool_id(s) - if not subnetpool_id: + if subnetpool_id: + self._validate_pools_with_subnetpool(s) + else: if not has_cidr: msg = _('A cidr must be specified in the absence of a ' 'subnet pool') raise n_exc.BadRequest(resource='subnets', msg=msg) - # Create subnet from the implicit(AKA null) pool - created_subnet = self._create_subnet_from_implicit_pool(context, - subnet) - else: - created_subnet = self._create_subnet_from_pool(context, subnet, - subnetpool_id) + self._validate_subnet(context, s) - # If this subnet supports auto-addressing, then update any - # internal ports on the network with addresses for this subnet. - if ipv6_utils.is_auto_address_subnet(created_subnet): - self._add_auto_addrs_on_network_ports(context, created_subnet) - - return created_subnet + return self._create_subnet(context, subnet, subnetpool_id) def update_subnet(self, context, id, subnet): """Update the subnet with new info. diff --git a/neutron/db/ipam_non_pluggable_backend.py b/neutron/db/ipam_non_pluggable_backend.py index 4941776f0c0..f97d603867d 100644 --- a/neutron/db/ipam_non_pluggable_backend.py +++ b/neutron/db/ipam_non_pluggable_backend.py @@ -27,6 +27,8 @@ from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils from neutron.db import ipam_backend_mixin from neutron.db import models_v2 +from neutron import ipam +from neutron.ipam import subnet_alloc from neutron.ipam import utils as ipam_utils LOG = logging.getLogger(__name__) @@ -465,3 +467,30 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): raise n_exc.IpAddressInUse(net_id=network_id, ip_address=ip_address) return ip_address + + def _allocate_subnet(self, context, network, subnet, subnetpool_id): + subnetpool = None + if subnetpool_id: + subnetpool = self._get_subnetpool(context, subnetpool_id) + self._validate_ip_version_with_subnetpool(subnet, subnetpool) + + subnet_request = ipam.SubnetRequestFactory.get_request(context, + subnet, + subnetpool) + + if subnetpool_id: + driver = subnet_alloc.SubnetAllocator(subnetpool, context) + ipam_subnet = driver.allocate_subnet(subnet_request) + subnet_request = ipam_subnet.get_details() + + subnet = self._save_subnet(context, + network, + self._make_subnet_args( + network.shared, + subnet_request, + subnet, + subnetpool_id), + subnet['dns_nameservers'], + subnet['host_routes'], + subnet['allocation_pools']) + return subnet From 55cb8e4026f025a351896909ba6fa05e3f882003 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 2 Jul 2015 00:16:51 -0700 Subject: [PATCH 33/54] OVS native DBListcommand if_exists support Add support for the if_exists flag to the OVS native db list command. Closes-Bug: #1470742 Closes-Bug: #1470894 Change-Id: Ife48d99c145cfab7f0f5523f4cdfd33492085355 --- neutron/agent/ovsdb/native/commands.py | 38 ++++++++++++++----- .../tests/functional/agent/test_ovs_lib.py | 14 +++++++ 2 files changed, 42 insertions(+), 10 deletions(-) diff --git a/neutron/agent/ovsdb/native/commands.py b/neutron/agent/ovsdb/native/commands.py index b8bb1b117e2..973c4cac1f4 100644 --- a/neutron/agent/ovsdb/native/commands.py +++ b/neutron/agent/ovsdb/native/commands.py @@ -351,24 +351,42 @@ class PortToBridgeCommand(BaseCommand): class DbListCommand(BaseCommand): def __init__(self, api, table, records, columns, if_exists): super(DbListCommand, self).__init__(api) + self.requested_info = {'records': records, 'columns': columns, + 'table': table} self.table = self.api._tables[table] self.columns = columns or self.table.columns.keys() + ['_uuid'] self.if_exists = if_exists if records: - self.records = [ - idlutils.row_by_record(self.api.idl, table, record).uuid - for record in records] + self.records = [] + for record in records: + try: + self.records.append(idlutils.row_by_record( + self.api.idl, table, record).uuid) + except idlutils.RowNotFound: + if self.if_exists: + continue + raise else: self.records = self.table.rows.keys() def run_idl(self, txn): - self.result = [ - { - c: idlutils.get_column_value(self.table.rows[uuid], c) - for c in self.columns - } - for uuid in self.records - ] + try: + self.result = [ + { + c: idlutils.get_column_value(self.table.rows[uuid], c) + for c in self.columns + if not self.if_exists or uuid in self.table.rows + } + for uuid in self.records + ] + except KeyError: + # NOTE(kevinbenton): this is converted to a RuntimeError for compat + # with the vsctl version. It might make more sense to change this + # to a RowNotFoundError in the future. + raise RuntimeError(_LE( + "Row removed from DB during listing. Request info: " + "Table=%(table)s. Columns=%(columns)s. " + "Records=%(records)s.") % self.requested_info) class DbFindCommand(BaseCommand): diff --git a/neutron/tests/functional/agent/test_ovs_lib.py b/neutron/tests/functional/agent/test_ovs_lib.py index f430481899b..dc2e994cb8a 100644 --- a/neutron/tests/functional/agent/test_ovs_lib.py +++ b/neutron/tests/functional/agent/test_ovs_lib.py @@ -14,6 +14,7 @@ # under the License. import collections +import mock import uuid from neutron.agent.common import ovs_lib @@ -197,6 +198,19 @@ class OVSBridgeTestCase(OVSBridgeTestBase): expected = set([x.vif_id for x in vif_ports]) self.assertEqual(expected, ports) + def test_get_vif_port_set_with_missing_port(self): + self.create_ovs_port() + vif_ports = [self.create_ovs_vif_port()] + + # return an extra port to make sure the db list ignores it + orig = self.br.get_port_name_list + new_port_name_list = lambda: orig() + ['anotherport'] + mock.patch.object(self.br, 'get_port_name_list', + new=new_port_name_list).start() + ports = self.br.get_vif_port_set() + expected = set([vif_ports[0].vif_id]) + self.assertEqual(expected, ports) + def test_get_port_tag_dict(self): # Simple case tested in port test_set_get_clear_db_val pass From e173a31e3b04daf6385813539a163ccb73e24efd Mon Sep 17 00:00:00 2001 From: Oleg Bondarev Date: Thu, 2 Jul 2015 12:18:47 +0300 Subject: [PATCH 34/54] DVR: remove unused method Change-Id: I9d13993d899e2947c5f025100c98ee8934cc5c5d --- neutron/agent/l3/dvr.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/neutron/agent/l3/dvr.py b/neutron/agent/l3/dvr.py index 99735df526a..703364a4102 100644 --- a/neutron/agent/l3/dvr.py +++ b/neutron/agent/l3/dvr.py @@ -18,7 +18,6 @@ from oslo_log import log as logging from neutron.agent.l3 import dvr_fip_ns from neutron.agent.l3 import dvr_snat_ns -from neutron.agent.l3 import namespaces LOG = logging.getLogger(__name__) @@ -50,11 +49,6 @@ class AgentMixin(object): return fip_ns - def _destroy_fip_namespace(self, ns): - ex_net_id = namespaces.get_id_from_ns_name(ns) - fip_ns = self.get_fip_ns(ex_net_id) - fip_ns.delete() - def get_ports_by_subnet(self, subnet_id): return self.plugin_rpc.get_ports_by_subnet(self.context, subnet_id) From 1e5ef92f6af7b1a7c9d9221110a1e0accf2b4405 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Wed, 1 Jul 2015 19:16:43 +0000 Subject: [PATCH 35/54] Python3: do not use urllib.urlencode It has been moved in Python3. Use six.moves to have code that works with both Python 2 and 3. Change-Id: I5f286b1f784b3b7bb37852b00169a6c1227eb74b Blueprint: neutron-python3 --- neutron/api/api_common.py | 6 +++--- neutron/plugins/ibm/sdnve_api.py | 4 ++-- .../tempest/services/identity/v3/json/identity_client.py | 7 ++++--- .../tempest/services/identity/v3/json/region_client.py | 5 +++-- .../tests/tempest/services/network/json/network_client.py | 6 +++--- neutron/tests/tempest/test.py | 4 ++-- tox.ini | 6 ++++++ 7 files changed, 23 insertions(+), 15 deletions(-) diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py index 7c062cd6d65..778c40794da 100644 --- a/neutron/api/api_common.py +++ b/neutron/api/api_common.py @@ -14,11 +14,11 @@ # under the License. import functools -import urllib from oslo_config import cfg from oslo_log import log as logging import six +from six.moves.urllib import parse from webob import exc from neutron.common import constants @@ -60,7 +60,7 @@ def get_previous_link(request, items, id_key): marker = items[0][id_key] params['marker'] = marker params['page_reverse'] = True - return "%s?%s" % (request.path_url, urllib.urlencode(params)) + return "%s?%s" % (request.path_url, parse.urlencode(params)) def get_next_link(request, items, id_key): @@ -70,7 +70,7 @@ def get_next_link(request, items, id_key): marker = items[-1][id_key] params['marker'] = marker params.pop('page_reverse', None) - return "%s?%s" % (request.path_url, urllib.urlencode(params)) + return "%s?%s" % (request.path_url, parse.urlencode(params)) def get_limit_and_marker(request): diff --git a/neutron/plugins/ibm/sdnve_api.py b/neutron/plugins/ibm/sdnve_api.py index 5fe8af0665b..63546d30394 100644 --- a/neutron/plugins/ibm/sdnve_api.py +++ b/neutron/plugins/ibm/sdnve_api.py @@ -16,12 +16,12 @@ from six.moves import http_client as httplib -import urllib import httplib2 from keystoneclient.v2_0 import client as keyclient from oslo_config import cfg from oslo_log import log as logging +from six.moves.urllib import parse from neutron.api.v2 import attributes from neutron.common import utils @@ -158,7 +158,7 @@ class RequestHandler(object): serverurl = SDNVE_URL % (controller_ip, self.port, self.base_url) myurl = serverurl + url if params and isinstance(params, dict): - myurl += '?' + urllib.urlencode(params, doseq=1) + myurl += '?' + parse.urlencode(params, doseq=1) try: LOG.debug("Sending request to SDN-VE. url: " diff --git a/neutron/tests/tempest/services/identity/v3/json/identity_client.py b/neutron/tests/tempest/services/identity/v3/json/identity_client.py index f8dd4f75e03..a7db46a5785 100644 --- a/neutron/tests/tempest/services/identity/v3/json/identity_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/identity_client.py @@ -14,7 +14,8 @@ # under the License. import json -import urllib + +from six.moves.urllib import parse from neutron.tests.tempest.common import service_client @@ -95,7 +96,7 @@ class IdentityV3ClientJSON(service_client.ServiceClient): """Get the list of users.""" url = 'users' if params: - url += '?%s' % urllib.urlencode(params) + url += '?%s' % parse.urlencode(params) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) @@ -134,7 +135,7 @@ class IdentityV3ClientJSON(service_client.ServiceClient): def list_projects(self, params=None): url = "projects" if params: - url += '?%s' % urllib.urlencode(params) + url += '?%s' % parse.urlencode(params) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) diff --git a/neutron/tests/tempest/services/identity/v3/json/region_client.py b/neutron/tests/tempest/services/identity/v3/json/region_client.py index e173aa51961..d2fa53b7561 100644 --- a/neutron/tests/tempest/services/identity/v3/json/region_client.py +++ b/neutron/tests/tempest/services/identity/v3/json/region_client.py @@ -14,7 +14,8 @@ # under the License. import json -import urllib + +from six.moves.urllib import parse from neutron.tests.tempest.common import service_client @@ -64,7 +65,7 @@ class RegionClientJSON(service_client.ServiceClient): """List regions.""" url = 'regions' if params: - url += '?%s' % urllib.urlencode(params) + url += '?%s' % parse.urlencode(params) resp, body = self.get(url) self.expected_success(200, resp.status) body = json.loads(body) diff --git a/neutron/tests/tempest/services/network/json/network_client.py b/neutron/tests/tempest/services/network/json/network_client.py index 82271f71aa9..54f264c82f1 100644 --- a/neutron/tests/tempest/services/network/json/network_client.py +++ b/neutron/tests/tempest/services/network/json/network_client.py @@ -12,8 +12,8 @@ import json import time -import urllib +from six.moves.urllib import parse from tempest_lib.common.utils import misc from tempest_lib import exceptions as lib_exc @@ -98,7 +98,7 @@ class NetworkClientJSON(service_client.ServiceClient): def _list(**filters): uri = self.get_uri(plural_name) if filters: - uri += '?' + urllib.urlencode(filters, doseq=1) + uri += '?' + parse.urlencode(filters, doseq=1) resp, body = self.get(uri) result = {plural_name: self.deserialize_list(body)} self.expected_success(200, resp.status) @@ -124,7 +124,7 @@ class NetworkClientJSON(service_client.ServiceClient): plural = self.pluralize(resource_name) uri = '%s/%s' % (self.get_uri(plural), resource_id) if fields: - uri += '?' + urllib.urlencode(fields, doseq=1) + uri += '?' + parse.urlencode(fields, doseq=1) resp, body = self.get(uri) body = self.deserialize_single(body) self.expected_success(200, resp.status) diff --git a/neutron/tests/tempest/test.py b/neutron/tests/tempest/test.py index 4bb196038ac..d95174bd886 100644 --- a/neutron/tests/tempest/test.py +++ b/neutron/tests/tempest/test.py @@ -20,13 +20,13 @@ import os import re import sys import time -import urllib import uuid import fixtures from oslo_log import log as logging from oslo_utils import importutils import six +from six.moves.urllib import parse import testscenarios import testtools @@ -590,7 +590,7 @@ class NegativeAutoTest(BaseTestCase): if not json_dict: return url, None elif method in ["GET", "HEAD", "PUT", "DELETE"]: - return "%s?%s" % (url, urllib.urlencode(json_dict)), None + return "%s?%s" % (url, parse.urlencode(json_dict)), None else: return url, json.dumps(json_dict) diff --git a/tox.ini b/tox.ini index 12f2b91fbf3..e9716d7f049 100644 --- a/tox.ini +++ b/tox.ini @@ -117,6 +117,7 @@ commands = python -m testtools.run \ neutron.tests.unit.plugins.oneconvergence.test_nvsd_agent \ neutron.tests.unit.plugins.oneconvergence.test_plugin_helper \ neutron.tests.unit.plugins.oneconvergence.test_nvsdlib \ + neutron.tests.unit.plugins.ibm.test_sdnve_agent \ neutron.tests.unit.plugins.ibm.test_sdnve_api \ neutron.tests.unit.plugins.ml2.test_db \ neutron.tests.unit.plugins.ml2.test_driver_context \ @@ -131,6 +132,7 @@ commands = python -m testtools.run \ neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent \ neutron.tests.unit.plugins.ml2.drivers.test_type_vxlan \ neutron.tests.unit.plugins.ml2.drivers.test_type_gre \ + neutron.tests.unit.plugins.ml2.drivers.test_helpers \ neutron.tests.unit.plugins.ml2.drivers.arista.test_mechanism_arista \ neutron.tests.unit.plugins.ml2.drivers.test_type_local \ neutron.tests.unit.plugins.ml2.drivers.mechanism_logger \ @@ -138,12 +140,14 @@ commands = python -m testtools.run \ neutron.tests.unit.plugins.ml2.drivers.cisco.apic.base \ neutron.tests.unit.plugins.ml2.drivers.cisco.apic.test_apic_topology \ neutron.tests.unit.plugins.ml2.drivers.test_type_flat \ + neutron.tests.unit.plugins.ml2.drivers.test_type_vlan \ neutron.tests.unit.plugins.ml2.drivers.mechanism_test \ neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager.l2population_rpc_base \ neutron.tests.unit.plugins.ml2.extensions.fake_extension \ neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager.test_l2population_rpc \ neutron.tests.unit.plugins.cisco.n1kv.fake_client \ neutron.tests.unit.plugins.cisco.test_network_db \ + neutron.tests.unit.scheduler.test_dhcp_agent_scheduler \ neutron.tests.unit.db.test_l3_dvr_db \ neutron.tests.unit.db.test_migration \ neutron.tests.unit.db.test_agents_db \ @@ -177,8 +181,10 @@ commands = python -m testtools.run \ neutron.tests.unit.agent.linux.test_ipset_manager \ neutron.tests.unit.agent.linux.test_iptables_firewall \ neutron.tests.unit.agent.linux.test_ebtables_manager \ + neutron.tests.unit.agent.linux.test_iptables_firewall \ neutron.tests.unit.agent.linux.test_ebtables_driver \ neutron.tests.unit.agent.linux.test_polling \ + neutron.tests.unit.agent.linux.test_ip_lib \ neutron.tests.unit.agent.linux.test_ip_monitor \ neutron.tests.unit.agent.linux.test_iptables_manager \ neutron.tests.unit.agent.linux.test_external_process \ From 23b5806932cf0c890a8ba665148abeb5dce53755 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 2 Jul 2015 18:32:42 +0300 Subject: [PATCH 36/54] devref: document API status for neutron.openstack.common.* Make sure we document the fact that neutron.openstack.common.* contents are not meant to be used by external repositories (except, temporarily, *aas repos). If I could bootstrap the oslo-incubator subtree from scratch, I would put it under neutron._openstack, to indicate that it's for internal usage only. But we can't do it now, so instead I update devref. Change-Id: I42252a7b0a07759c57995b2fc1f8d20ecba7d33b --- doc/source/devref/index.rst | 1 + doc/source/devref/neutron_api.rst | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 doc/source/devref/neutron_api.rst diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index d2b263baa5f..d54d442697d 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -32,6 +32,7 @@ Programming HowTos and Tutorials development.environment contribute + neutron_api sub_projects diff --git a/doc/source/devref/neutron_api.rst b/doc/source/devref/neutron_api.rst new file mode 100644 index 00000000000..92e116d250a --- /dev/null +++ b/doc/source/devref/neutron_api.rst @@ -0,0 +1,20 @@ +Neutron public API +================== + +Neutron main tree serves as a library for multiple subprojects that rely on +different modules from neutron.* namespace to accomodate their needs. +Specifically, advanced service repositories and open source or vendor +plugin/driver repositories do it. + +Neutron modules differ in their API stability a lot, and there is no part of it +that is explicitly marked to be consumed by other projects. + +That said, there are modules that other projects should definitely avoid relying on. + +Specifically, no external repository should use anything located under +neutron.openstack.common.* import path. This code belongs to oslo-incubator +modules and is not meant to work for consumers other than neutron main tree +itself. (The only exception is made for advanced service repositories that are +tightly controlled by neutron community.) Long story short, if your repository +uses those modules, please switch to corresponding oslo libraries or use your +own copy of oslo-incubator files. From d06990b8a548a63df5e50e9e75b59a5bbe0ba5b0 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 2 Jul 2015 18:42:07 +0300 Subject: [PATCH 37/54] Start documenting potential API breakages in devref:neutron_api Change-Id: I2ceb9e347ea0687e93b766d58601cd86561d1e2b --- doc/source/devref/neutron_api.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/source/devref/neutron_api.rst b/doc/source/devref/neutron_api.rst index 92e116d250a..6479b6d8b79 100644 --- a/doc/source/devref/neutron_api.rst +++ b/doc/source/devref/neutron_api.rst @@ -18,3 +18,18 @@ itself. (The only exception is made for advanced service repositories that are tightly controlled by neutron community.) Long story short, if your repository uses those modules, please switch to corresponding oslo libraries or use your own copy of oslo-incubator files. + + +Breakages +--------- + +Neutron API is not very stable, and there are cases when a desired change in +neutron tree is expected to trigger breakage for one or more external +repositories under the neutron tent. Below you can find a list of known +incompatible changes that could or are known to trigger those breakages. + +* change: oslo.service adopted. + + - commit: 6e693fc91dd79cfbf181e3b015a1816d985ad02c + - solution: switch using oslo_service.* namespace; stop using ANY neutron.openstack.* contents. + - severity: low (plugins must not rely on that subtree). From 7a73c2d0f87bb269d0cced1847edce4d1e76179e Mon Sep 17 00:00:00 2001 From: Carl Baldwin Date: Tue, 30 Jun 2015 20:23:39 +0000 Subject: [PATCH 38/54] Refactor IpRuleCommand to take more arguments The iproute2 rule command takes more arguments than the ones supported by this wrapper. Particularly, for address scopes, we're interested in iif and fwmark. Instead of adding these piecemeal, this change makes the wrapper flexible to pass any of them using kwargs. Callers of add / delete are updated to pass keyword arguments for table and priority since they are no longer required positional arguments. This looks better anyway. Change-Id: Ia93b086b787c34bd560961cb84e4a003cf359e7e Partially-Implements: blueprint address-scopes --- neutron/agent/l3/dvr_local_router.py | 17 ++++-- neutron/agent/linux/ip_lib.py | 61 +++++++++++++------ .../unit/agent/l3/test_dvr_local_router.py | 6 +- neutron/tests/unit/agent/linux/test_ip_lib.py | 27 ++++++-- 4 files changed, 81 insertions(+), 30 deletions(-) diff --git a/neutron/agent/l3/dvr_local_router.py b/neutron/agent/l3/dvr_local_router.py index ead84e4e7bb..e306c7c91f0 100755 --- a/neutron/agent/l3/dvr_local_router.py +++ b/neutron/agent/l3/dvr_local_router.py @@ -84,7 +84,9 @@ class DvrLocalRouter(router.RouterInfo): self.floating_ips_dict[floating_ip] = rule_pr fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id) ip_rule = ip_lib.IPRule(namespace=self.ns_name) - ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) + ip_rule.rule.add(ip=fixed_ip, + table=dvr_fip_ns.FIP_RT_TBL, + priority=rule_pr) #Add routing rule in fip namespace fip_ns_name = self.fip_ns.get_name() rtr_2_fip, _ = self.rtr_fip_subnet.get_pair() @@ -114,7 +116,9 @@ class DvrLocalRouter(router.RouterInfo): if floating_ip in self.floating_ips_dict: rule_pr = self.floating_ips_dict[floating_ip] ip_rule = ip_lib.IPRule(namespace=self.ns_name) - ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr) + ip_rule.rule.delete(ip=floating_ip, + table=dvr_fip_ns.FIP_RT_TBL, + priority=rule_pr) self.fip_ns.deallocate_rule_priority(rule_pr) #TODO(rajeev): Handle else case - exception/log? @@ -258,7 +262,9 @@ class DvrLocalRouter(router.RouterInfo): if is_add: ns_ipd.route.add_gateway(gw_ip_addr, table=snat_idx) - ns_ipr.rule.add(sn_port_cidr, snat_idx, snat_idx) + ns_ipr.rule.add(ip=sn_port_cidr, + table=snat_idx, + priority=snat_idx) ns_ipwrapr.netns.execute( ['sysctl', '-w', 'net.ipv4.conf.%s.send_redirects=0' % sn_int]) @@ -266,8 +272,9 @@ class DvrLocalRouter(router.RouterInfo): self._delete_gateway_device_if_exists(ns_ipd, gw_ip_addr, snat_idx) - ns_ipr.rule.delete(sn_port_cidr, snat_idx, - snat_idx) + ns_ipr.rule.delete(ip=sn_port_cidr, + table=snat_idx, + priority=snat_idx) break except Exception: if is_add: diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index 890444a01f9..edb35d5a7d7 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -22,6 +22,7 @@ from oslo_utils import excutils import re from neutron.agent.common import utils +from neutron.common import constants from neutron.common import exceptions from neutron.i18n import _LE @@ -281,30 +282,56 @@ class IPRule(SubProcessBase): class IpRuleCommand(IpCommandBase): COMMAND = 'rule' - def _exists(self, ip, ip_version, table, rule_pr): - # Typical rule from 'ip rule show': + ALL = {4: constants.IPv4_ANY, 6: constants.IPv6_ANY} + + def _parse_line(self, ip_version, line): + # Typical rules from 'ip rule show': # 4030201: from 1.2.3.4/24 lookup 10203040 + # 1024: from all iif qg-c43b1928-48 lookup noscope - rule_pr = str(rule_pr) + ":" - for line in self._as_root([ip_version], ['show']).splitlines(): - parts = line.split() - if parts and (parts[0] == rule_pr and - parts[2] == str(ip) and - parts[-1] == str(table)): - return True + parts = line.split() + if not parts: + return {} - return False + # Format of line is: "priority: ..." + settings = {k: v for k, v in zip(parts[1::2], parts[2::2])} + settings['priority'] = parts[0][:-1] - def add(self, ip, table, rule_pr): + # Canonicalize some arguments + if settings.get('from') == "all": + settings['from'] = self.ALL[ip_version] + if 'lookup' in settings: + settings['table'] = settings.pop('lookup') + + return settings + + def _exists(self, ip_version, **kwargs): + kwargs_strings = {k: str(v) for k, v in kwargs.items()} + lines = self._as_root([ip_version], ['show']).splitlines() + return kwargs_strings in (self._parse_line(ip_version, line) + for line in lines) + + def _make__flat_args_tuple(self, *args, **kwargs): + for kwargs_item in sorted(kwargs.items(), key=lambda i: i[0]): + args += kwargs_item + return tuple(args) + + def add(self, ip, **kwargs): ip_version = get_ip_version(ip) - if not self._exists(ip, ip_version, table, rule_pr): - args = ['add', 'from', ip, 'table', table, 'priority', rule_pr] - self._as_root([ip_version], tuple(args)) - def delete(self, ip, table, rule_pr): + kwargs.update({'from': ip}) + + if not self._exists(ip_version, **kwargs): + args_tuple = self._make__flat_args_tuple('add', **kwargs) + self._as_root([ip_version], args_tuple) + + def delete(self, ip, **kwargs): ip_version = get_ip_version(ip) - args = ['del', 'table', table, 'priority', rule_pr] - self._as_root([ip_version], tuple(args)) + + # TODO(Carl) ip ignored in delete, okay in general? + + args_tuple = self._make__flat_args_tuple('del', **kwargs) + self._as_root([ip_version], args_tuple) class IpDeviceCommandBase(IpCommandBase): diff --git a/neutron/tests/unit/agent/l3/test_dvr_local_router.py b/neutron/tests/unit/agent/l3/test_dvr_local_router.py index 51e89802f95..bec9168afbe 100644 --- a/neutron/tests/unit/agent/l3/test_dvr_local_router.py +++ b/neutron/tests/unit/agent/l3/test_dvr_local_router.py @@ -199,7 +199,9 @@ class TestDvrRouterOperations(base.BaseTestCase): ri.dist_fip_count = 0 ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address']) ri.floating_ip_added_dist(fip, ip_cidr) - mIPRule().rule.add.assert_called_with('192.168.0.1', 16, FIP_PRI) + mIPRule().rule.add.assert_called_with(ip='192.168.0.1', + table=16, + priority=FIP_PRI) self.assertEqual(1, ri.dist_fip_count) # TODO(mrsmith): add more asserts @@ -233,7 +235,7 @@ class TestDvrRouterOperations(base.BaseTestCase): ri.rtr_fip_subnet = s ri.floating_ip_removed_dist(fip_cidr) mIPRule().rule.delete.assert_called_with( - str(netaddr.IPNetwork(fip_cidr).ip), 16, FIP_PRI) + ip=str(netaddr.IPNetwork(fip_cidr).ip), table=16, priority=FIP_PRI) mIPDevice().route.delete_route.assert_called_with(fip_cidr, str(s.ip)) self.assertFalse(ri.fip_ns.unsubscribe.called) diff --git a/neutron/tests/unit/agent/linux/test_ip_lib.py b/neutron/tests/unit/agent/linux/test_ip_lib.py index 42c3befa3c9..f28232cdf4c 100644 --- a/neutron/tests/unit/agent/linux/test_ip_lib.py +++ b/neutron/tests/unit/agent/linux/test_ip_lib.py @@ -551,23 +551,38 @@ class TestIpRuleCommand(TestIPCmdBase): def _test_add_rule(self, ip, table, priority): ip_version = netaddr.IPNetwork(ip).version - self.rule_cmd.add(ip, table, priority) + self.rule_cmd.add(ip, table=table, priority=priority) self._assert_sudo([ip_version], (['show'])) self._assert_sudo([ip_version], ('add', 'from', ip, - 'table', table, 'priority', priority)) + 'priority', priority, 'table', table)) def _test_add_rule_exists(self, ip, table, priority, output): self.parent._as_root.return_value = output ip_version = netaddr.IPNetwork(ip).version - self.rule_cmd.add(ip, table, priority) + self.rule_cmd.add(ip, table=table, priority=priority) self._assert_sudo([ip_version], (['show'])) def _test_delete_rule(self, ip, table, priority): ip_version = netaddr.IPNetwork(ip).version - self.rule_cmd.delete(ip, table, priority) + self.rule_cmd.delete(ip, table=table, priority=priority) self._assert_sudo([ip_version], - ('del', 'table', table, - 'priority', priority)) + ('del', 'priority', priority, + 'table', table)) + + def test__parse_line(self): + def test(ip_version, line, expected): + actual = self.rule_cmd._parse_line(ip_version, line) + self.assertEqual(expected, actual) + + test(4, "4030201:\tfrom 1.2.3.4/24 lookup 10203040", + {'from': '1.2.3.4/24', + 'table': '10203040', + 'priority': '4030201'}) + test(6, "1024: from all iif qg-c43b1928-48 lookup noscope", + {'priority': '1024', + 'from': '::/0', + 'iif': 'qg-c43b1928-48', + 'table': 'noscope'}) def test_add_rule_v4(self): self._test_add_rule('192.168.45.100', 2, 100) From 9aaa2befdece5036fb8a6c3bdee6290d3658745d Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 1 Jul 2015 19:46:16 -0700 Subject: [PATCH 39/54] Fall back on empty path if prefix is missing A missing entry causes a KeyError that leads the server to blow up during startup. We can fallback on an empty path (like some services do), in case the prefix is not specified. Furthermore, we can be declarative with this property, the same way we are with properties like aliases, bulk support, etc. Change-Id: I58a9b90a39d434f4808264aeb6f9ee5aceff7fbd --- neutron/api/v2/resource_helper.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/neutron/api/v2/resource_helper.py b/neutron/api/v2/resource_helper.py index 9d952901b27..61090638b91 100644 --- a/neutron/api/v2/resource_helper.py +++ b/neutron/api/v2/resource_helper.py @@ -82,10 +82,12 @@ def build_resource_info(plural_mappings, resource_map, which_service, allow_bulk=allow_bulk, allow_pagination=cfg.CONF.allow_pagination, allow_sorting=cfg.CONF.allow_sorting) + path_prefix = getattr(plugin, "path_prefix", + constants.COMMON_PREFIXES.get(which_service, "")) resource = extensions.ResourceExtension( collection_name, controller, - path_prefix=constants.COMMON_PREFIXES[which_service], + path_prefix=path_prefix, member_actions=member_actions, attr_map=params) resources.append(resource) From 59ae35ba8fa6f4b79a1370c32faaa1ae4fce3f37 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Thu, 2 Jul 2015 12:06:05 -0700 Subject: [PATCH 40/54] COMMON_PREFIXES cleanup - patch 1/5 This dictionary does not belong to the plugins directory as it captures API business, but practically speaking it does not even deserve to exist and can be removed altogether. This is patch one in a series that aims at addressing this monkey business. Change-Id: I95cd71dfc35e266f6f3cc5715ab8a0deb10058e7 --- neutron/plugins/common/constants.py | 4 ---- neutron/services/metering/metering_plugin.py | 1 + neutron/tests/unit/db/metering/test_metering_db.py | 2 +- neutron/tests/unit/dummy_plugin.py | 1 + .../tests/unit/services/metering/test_metering_plugin.py | 6 +++--- 5 files changed, 6 insertions(+), 8 deletions(-) diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py index 809a1399e85..401b01dd093 100644 --- a/neutron/plugins/common/constants.py +++ b/neutron/plugins/common/constants.py @@ -35,14 +35,10 @@ EXT_TO_SERVICE_MAPPING = { } COMMON_PREFIXES = { - CORE: "", - DUMMY: "/dummy_svc", LOADBALANCER: "/lb", LOADBALANCERV2: "/lbaas", FIREWALL: "/fw", VPN: "/vpn", - METERING: "/metering", - L3_ROUTER_NAT: "", } # Service operation status constants diff --git a/neutron/services/metering/metering_plugin.py b/neutron/services/metering/metering_plugin.py index 5af10361559..57789b78475 100644 --- a/neutron/services/metering/metering_plugin.py +++ b/neutron/services/metering/metering_plugin.py @@ -22,6 +22,7 @@ from neutron.db.metering import metering_rpc class MeteringPlugin(metering_db.MeteringDbMixin): """Implementation of the Neutron Metering Service Plugin.""" supported_extension_aliases = ["metering"] + path_prefix = "/metering" def __init__(self): super(MeteringPlugin, self).__init__() diff --git a/neutron/tests/unit/db/metering/test_metering_db.py b/neutron/tests/unit/db/metering/test_metering_db.py index c9e185163bc..f2db17f6e1b 100644 --- a/neutron/tests/unit/db/metering/test_metering_db.py +++ b/neutron/tests/unit/db/metering/test_metering_db.py @@ -114,7 +114,7 @@ class MeteringPluginDbTestCase( fmt = 'json' resource_prefix_map = dict( - (k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING]) + (k.replace('_', '-'), "/metering") for k in metering.RESOURCE_ATTRIBUTE_MAP.keys() ) diff --git a/neutron/tests/unit/dummy_plugin.py b/neutron/tests/unit/dummy_plugin.py index ed0b739440c..41a37231935 100644 --- a/neutron/tests/unit/dummy_plugin.py +++ b/neutron/tests/unit/dummy_plugin.py @@ -88,6 +88,7 @@ class DummyServicePlugin(service_base.ServicePluginBase): """ supported_extension_aliases = ['dummy', servicetype.EXT_ALIAS] + path_prefix = "/dummy_svc" agent_notifiers = {'dummy': 'dummy_agent_notifier'} def __init__(self): diff --git a/neutron/tests/unit/services/metering/test_metering_plugin.py b/neutron/tests/unit/services/metering/test_metering_plugin.py index 508117395c8..b2443bf3857 100644 --- a/neutron/tests/unit/services/metering/test_metering_plugin.py +++ b/neutron/tests/unit/services/metering/test_metering_plugin.py @@ -62,7 +62,7 @@ class TestMeteringPlugin(test_db_base_plugin_v2.NeutronDbPluginV2TestCase, test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( - (k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING]) + (k.replace('_', '-'), "/metering") for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys() ) @@ -281,7 +281,7 @@ class TestMeteringPluginL3AgentScheduler( test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( - (k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING]) + (k.replace('_', '-'), "/metering") for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys() ) @@ -404,7 +404,7 @@ class TestMeteringPluginRpcFromL3Agent( test_metering_db.MeteringPluginDbTestCaseMixin): resource_prefix_map = dict( - (k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING]) + (k.replace('_', '-'), "/metering") for k in ext_metering.RESOURCE_ATTRIBUTE_MAP ) From 26f50761efaa5bc362e35a41f0adc458e0224296 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Fri, 26 Jun 2015 10:00:42 -0700 Subject: [PATCH 41/54] Update DVR agent to use get_vifs_by_id The new get_vifs_by_id function retrieves all of the VIFs for a port iteration at once to eliminate unnecessary multiple calls to OVSDB. Change-Id: If18557faead836121bfa3b4e6efccd0318ce72d3 Related-Bug: #1460233 --- .../openvswitch/agent/ovs_dvr_neutron_agent.py | 4 +++- .../openvswitch/agent/test_ovs_neutron_agent.py | 17 ++++++++++------- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py index 94d0e8b2082..905c8a8e9e8 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -400,8 +400,10 @@ class OVSDVRNeutronAgent(object): LOG.debug("DVR: List of ports received from " "get_ports_on_host_by_subnet %s", local_compute_ports) + vif_by_id = self.int_br.get_vifs_by_ids( + [prt['id'] for prt in local_compute_ports]) for prt in local_compute_ports: - vif = self.int_br.get_vif_port_by_id(prt['id']) + vif = vif_by_id.get(prt['id']) if not vif: continue ldm.add_compute_ofport(vif.vif_id, vif.ofport) diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py index 9b4cf011038..3d42adea658 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py @@ -1327,12 +1327,15 @@ class TestOvsDvrNeutronAgent(object): 'ip_address': '1.1.1.3'}] @staticmethod - def _expected_port_bound(port, lvid): - return [ + def _expected_port_bound(port, lvid, is_dvr=True): + resp = [ mock.call.db_get_val('Port', port.port_name, 'other_config'), mock.call.set_db_attribute('Port', port.port_name, 'other_config', mock.ANY), ] + if is_dvr: + resp = [mock.call.get_vifs_by_ids([])] + resp + return resp def _expected_install_dvr_process(self, lvid, port, ip_version, gateway_ip, gateway_mac): @@ -1441,7 +1444,7 @@ class TestOvsDvrNeutronAgent(object): dst_port=self._compute_port.ofport, vlan_tag=segmentation_id, ), - ] + self._expected_port_bound(self._compute_port, lvid) + ] + self._expected_port_bound(self._compute_port, lvid, False) self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertFalse([], tun_br.mock_calls) self.assertFalse([], phys_br.mock_calls) @@ -1523,7 +1526,7 @@ class TestOvsDvrNeutronAgent(object): dst_port=self._compute_port.ofport, vlan_tag=lvid, ), - ] + self._expected_port_bound(self._compute_port, lvid) + ] + self._expected_port_bound(self._compute_port, lvid, False) self.assertEqual(expected_on_int_br, int_br.mock_calls) self.assertEqual([], tun_br.mock_calls) self.assertEqual([], phys_br.mock_calls) @@ -1594,7 +1597,7 @@ class TestOvsDvrNeutronAgent(object): dst_port=self._port.ofport, vlan_tag=lvid, ), - ] + self._expected_port_bound(self._port, lvid) + ] + self._expected_port_bound(self._port, lvid, is_dvr=False) self.assertEqual(expected_on_int_br, int_br.mock_calls) expected_on_tun_br = [ mock.call.provision_local_vlan( @@ -1756,7 +1759,7 @@ class TestOvsDvrNeutronAgent(object): dst_port=self._compute_port.ofport, vlan_tag=lvid, ), - ] + self._expected_port_bound(self._compute_port, lvid), + ] + self._expected_port_bound(self._compute_port, lvid, False), int_br.mock_calls) self.assertEqual([], tun_br.mock_calls) @@ -1835,7 +1838,7 @@ class TestOvsDvrNeutronAgent(object): dst_port=self._port.ofport, vlan_tag=lvid, ), - ] + self._expected_port_bound(self._port, lvid) + ] + self._expected_port_bound(self._port, lvid, is_dvr=False) self.assertEqual(expected_on_int_br, int_br.mock_calls) expected_on_tun_br = [ mock.call.provision_local_vlan( From a76090161fba69329389d4b8e3389f4797293ba9 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Wed, 1 Jul 2015 22:29:12 +0000 Subject: [PATCH 42/54] Python 3: do not index dict_keys objects This cannot be done in Python 3, where dict.keys() returns an iterator. We need to cast the result of dict.keys() to a list first. Change-Id: I28986aefb720b4513e3eee9ba0909f79d1dc9695 Blueprint: neutron-python3 --- neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py | 2 +- .../ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py | 3 ++- tox.ini | 5 +++++ 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py index fb741baffaf..16e95140ead 100644 --- a/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py @@ -168,7 +168,7 @@ class DhcpAgentNotifyAPI(object): # data is {'key' : 'value'} with only one key if method_name not in self.VALID_METHOD_NAMES: return - obj_type = data.keys()[0] + obj_type = list(data.keys())[0] if obj_type not in self.VALID_RESOURCES: return obj_value = data[obj_type] diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py index 9b4cf011038..28bf4de693f 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_neutron_agent.py @@ -1885,7 +1885,8 @@ class TestOvsDvrNeutronAgent(object): 'mac_address': '11:22:33:44:55:66'}]): self.agent.dvr_agent.setup_dvr_flows_on_integ_br() self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) - physical_networks = self.agent.dvr_agent.bridge_mappings.keys() + physical_networks = list( + self.agent.dvr_agent.bridge_mappings.keys()) ioport = self.agent.dvr_agent.int_ofports[physical_networks[0]] expected_on_int_br = [ # setup_dvr_flows_on_integ_br diff --git a/tox.ini b/tox.ini index e9716d7f049..19dbef45776 100644 --- a/tox.ini +++ b/tox.ini @@ -145,6 +145,7 @@ commands = python -m testtools.run \ neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager.l2population_rpc_base \ neutron.tests.unit.plugins.ml2.extensions.fake_extension \ neutron.tests.unit.plugins.ml2.drivers.l2pop.rpc_manager.test_l2population_rpc \ + neutron.tests.unit.plugins.cisco.n1kv.test_n1kv_db \ neutron.tests.unit.plugins.cisco.n1kv.fake_client \ neutron.tests.unit.plugins.cisco.test_network_db \ neutron.tests.unit.scheduler.test_dhcp_agent_scheduler \ @@ -166,6 +167,7 @@ commands = python -m testtools.run \ neutron.tests.unit.api.rpc.agentnotifiers.test_dhcp_rpc_agent_api \ neutron.tests.unit.agent.metadata.test_driver \ neutron.tests.unit.agent.test_rpc \ + neutron.tests.unit.agent.test_securitygroups_rpc \ neutron.tests.unit.agent.l3.test_link_local_allocator \ neutron.tests.unit.agent.l3.test_ha_router \ neutron.tests.unit.agent.l3.test_legacy_router \ @@ -194,10 +196,13 @@ commands = python -m testtools.run \ neutron.tests.unit.agent.linux.test_interface \ neutron.tests.unit.test_auth \ neutron.tests.unit.extensions.v2attributes \ + neutron.tests.unit.extensions.test_l3_ext_gw_mode \ + neutron.tests.unit.extensions.test_extra_dhcp_opt \ neutron.tests.unit.extensions.extendedattribute \ neutron.tests.unit.extensions.base \ neutron.tests.unit.extensions.foxinsocks \ neutron.tests.unit.extensions.extensionattribute \ + neutron.tests.unit.extensions.test_portsecurity \ neutron.tests.unit.callbacks.test_manager \ neutron.tests.unit.hacking.test_checks \ neutron.tests.unit.common.test_config \ From efa1f16706c9d44c654be411e9bf0c1c8f670801 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 2 Jul 2015 17:33:24 +0900 Subject: [PATCH 43/54] portsecurity_db_common: Access db columns in a consistent way While db columns and api attribute happen to have same name here, it's still better to distinguish them. Change-Id: I6d6e649925a41d89fd74ca5e64290737c9baed9a --- neutron/db/portsecurity_db_common.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/neutron/db/portsecurity_db_common.py b/neutron/db/portsecurity_db_common.py index 3fad11152b5..e348f81b19c 100644 --- a/neutron/db/portsecurity_db_common.py +++ b/neutron/db/portsecurity_db_common.py @@ -83,7 +83,7 @@ class PortSecurityDbCommon(object): NetworkSecurityBinding.network_id == network_id).one() except exc.NoResultFound: raise psec.PortSecurityBindingNotFound() - return binding[psec.PORTSECURITY] + return binding.port_security_enabled def _get_port_security_binding(self, context, port_id): try: @@ -92,7 +92,7 @@ class PortSecurityDbCommon(object): PortSecurityBinding.port_id == port_id).one() except exc.NoResultFound: raise psec.PortSecurityBindingNotFound() - return binding[psec.PORTSECURITY] + return binding.port_security_enabled def _process_port_port_security_update( self, context, port_req, port_res): @@ -130,10 +130,10 @@ class PortSecurityDbCommon(object): def _make_network_port_security_dict(self, port_security, fields=None): res = {'network_id': port_security['network_id'], - psec.PORTSECURITY: port_security[psec.PORTSECURITY]} + psec.PORTSECURITY: port_security.port_security_enabled} return self._fields(res, fields) def _make_port_security_dict(self, port, fields=None): res = {'port_id': port['port_id'], - psec.PORTSECURITY: port[psec.PORTSECURITY]} + psec.PORTSECURITY: port.port_security_enabled} return self._fields(res, fields) From d269657089e93e304a33dcbc35b7c4abc6e9900d Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Fri, 3 Jul 2015 15:58:03 +0000 Subject: [PATCH 44/54] Python3: do not add dict_values objects In Python 3, dict.values returns a dict_values object instead of a list. Change-Id: I83bc7718ac9bbb64187fefae57ce835fbe225829 Blueprint: neutron-python3 --- neutron/tests/unit/db/test_db_base_plugin_v2.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index adcfcb7ebf2..6fef2cbbd8f 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -4110,8 +4110,10 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.fmt) res = self.deserialize(self.fmt, req.get_response(self.api)) self.assertEqual(len(res['subnet']['allocation_pools']), 2) - res_vals = res['subnet']['allocation_pools'][0].values() +\ - res['subnet']['allocation_pools'][1].values() + res_vals = ( + list(res['subnet']['allocation_pools'][0].values()) + + list(res['subnet']['allocation_pools'][1].values()) + ) for pool_val in ['10', '20', '30', '40']: self.assertTrue('192.168.0.%s' % (pool_val) in res_vals) if with_gateway_ip: From b510dd5c2e4eb6c33be1e047e00991ce51d6aec0 Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Mon, 1 Jun 2015 13:52:18 -0400 Subject: [PATCH 45/54] Devref for out-of-tree plugin/driver contribution Change-Id: I6198acce97409e0e87520a31f2749b62d607e9c1 --- doc/source/devref/contribute.rst | 784 ++++++++++++++++++------------- 1 file changed, 467 insertions(+), 317 deletions(-) diff --git a/doc/source/devref/contribute.rst b/doc/source/devref/contribute.rst index b24366685f6..a39d011a30a 100644 --- a/doc/source/devref/contribute.rst +++ b/doc/source/devref/contribute.rst @@ -1,6 +1,19 @@ Contributing new extensions to Neutron ====================================== +**NOTE!** +--------- + +**Third-party plugins/drivers which do not start decomposition in Liberty will +be marked as deprecated, and they will be removed before the Mxxx-3 +milestone.** + +Read on for details ... + + +Introduction +------------ + Neutron has a pluggable architecture, with a number of extension points. This documentation covers aspects relevant to contributing new Neutron v2 core (aka monolithic) plugins, ML2 mechanism drivers, and L3 service @@ -16,22 +29,44 @@ within the OpenStack Networking project. If you are a developer who wants to provide a Neutron-based solution without interacting with the Neutron community, you are free to do so, but you can stop reading now, as this guide is not for you. -In fact, from the Kilo release onwards, the Neutron core team propose that -additions to the codebase adopt a structure where the *monolithic plugins*, -*ML2 MechanismDrivers*, and *L3 service plugins* are integration-only -(called "vendor integration" hereinafter) to code that lives outside the -tree (called "vendor library" hereinafter); the same applies for any -vendor-specific agents. The only part that is to stay in the tree is the -agent 'main' (a small python file that imports agent code from the vendor -library and starts it). 'Outside the tree' can be anything that is publicly -available: it may be a stackforge repo for instance, a tarball, a pypi package, -etc. A plugin/drivers maintainer team self-governs in order to promote sharing, -reuse, innovation, and release of the 'out-of-tree' deliverable. It should not -be required for any member of the core team to be involved with this process, + +Plugins and drivers for non-reference implementations are known as +"third-party" code. This includes code for supporting vendor products, as well +as code for supporting open-source networking implementations. + +Before the Kilo release these plugins and drivers were included in the Neutron +tree. During the Kilo cycle the third-party plugins and drivers underwent the +first phase of a process called decomposition. During this phase, each plugin +and driver moved the bulk of its logic to a separate git repository, while +leaving a thin "shim" in the neutron tree together with the DB models and +migrations (and perhaps some config examples). + +During the Liberty cycle the decomposition concept was taken to its conclusion +by allowing third-party code to exist entirely out of tree. Further extension +mechanisms have been provided to better support external plugins and drivers +that alter the API and/or the data model. + +In the Mxxx cycle we will **require** all third-party code to be moved out of +the neutron tree completely. + +'Outside the tree' can be anything that is publicly available: it may be a repo +on git.openstack.org for instance, a tarball, a pypi package, etc. A +plugin/drivers maintainer team self-governs in order to promote sharing, reuse, +innovation, and release of the 'out-of-tree' deliverable. It should not be +required for any member of the core team to be involved with this process, although core members of the Neutron team can participate in whichever capacity is deemed necessary to facilitate out-of-tree development. -Below, the following strategies will be documented: +This guide is aimed at you as the maintainer of code that integrates with +Neutron but resides in a separate repository. + + +Contribution Process +-------------------- + +If you want to extend OpenStack Networking with your technology, and you want +to do it within the visibility of the OpenStack project, follow the guidelines +and examples below. We'll describe best practices for: * Design and Development; * Testing and Continuous Integration; @@ -40,105 +75,61 @@ Below, the following strategies will be documented: * DevStack Integration; * Documentation; -This document will then provide a working example on how to contribute -new additions to Neutron. +Once you have everything in place you may want to add your project to the list +of Neutron sub-projects. Submit a patch via a gerrit review to neutron to add +your project to ``doc/source/devref/sub_projects.rst``. -Blueprint Spec Submission Strategy + +Design and Development +---------------------- + +Assuming you have a working repository, any development to your own repo does +not need any blueprint, specification or bugs against Neutron. However, if your +project is a part of the Neutron Stadium effort, you are expected to +participate in the principles of the Four Opens, meaning your design should be +done in the open. Thus, it is encouraged to file documentation for changes in +your own repository. + +If your code is hosted on git.openstack.org then the gerrit review system is +automatically provided. Contributors should follow the review guidelines +similar to those of Neutron. However, you as the maintainer have the +flexibility to choose who can approve/merge changes in your own repo. + +It is recommended (but not required, see `policies +`_) +that you set up a third-party CI system. This will provide a vehicle for +checking the third-party code against Neutron changes. See `Testing and +Continuous Integration`_ below for more detailed recommendations. + +Design documents can still be supplied in form of Restructured Text (RST) +documents, within the same third-party library repo. If changes to the common +Neutron code are required, an `RFE +`_ +may need to be filed. However every case is different and you are invited to +seek guidance from Neutron core reviewers about what steps to follow. + + +Testing and Continuous Integration ---------------------------------- -Provided contributors adhere to the abovementioned development footprint -they should not be required to follow the spec process for changes that -only affect their vendor integration and library. New contributions can -simply be submitted for code review, with the proviso that adequate -documentation and 3rd CI party is supplied at the time of the code -submission. For tracking purposes, the review itself can be tagged -with a Launchpad bug report. The bug should be marked as wishlist to -avoid complicating tracking of Neutron's primary deliverables. Design -documents can still be supplied in form of RST documents, within the same -vendor library repo. If substantial change to the common Neutron code are -required, a spec that targets common Neutron code will be required, however -every case is different and a contributor is invited to seek guidance from -the Neutron core team as to what steps to follow, and whether a spec or -a bug report is more suited for what a contributor needs to deliver. +The following strategies are recommendations only, since third-party CI testing +is not a enforced requirement. However, these strategies are employed by the +majority of the plugin/driver contributors that actively participate in the +Neutron development community, since they have learned from experience how +quickly their code can fall out of sync with the rapidly changing Neutron core +code base. -Once again, for submitting the integration module to the Neutron codebase, -no spec is required. +* You should run unit tests in your own external library (e.g. on + git.openstack.org where Jenkins setup is for free). -Development Strategy --------------------- +* Your third-party CI should validate third-party integration with Neutron via + functional testing. The third-party CI is a communication mechanism. The + objective of this mechanism is as follows: -* The following elements are suggested to be contributed in the tree - for plugins and drivers (called vendor integration hereinafter): - - * Data models - * Extension definitions - * Configuration files - * Requirements file targeting vendor code - -* Things that do not remain in the tree (called vendor library hereinafter): - - * Vendor specific logic - * Associated unit tests - -The idea here would be to provide in-tree the plugin/driver code that -implements an API, but have it delegate to out-of-tree code for -backend-specific interactions. The vendor integration will then typically -involve minor passthrough/parsing of parameters, minor handling of DB objects -as well as handling of responses, whereas the vendor library will do the -heavylifting and implement the vendor-specific logic. The boundary between -the in-tree layer and the out-of-tree one should be defined by the contributor -while asking these types of questions: - - * If something changes in my backend, do I need to alter the integration - layer drastically? Clearly, the less impact there is, the better the - separation being achieved. - * If I expose vendor details (e.g. protocols, auth, etc.), can I easily swap - and replace the targeted backend (e.g. hardware with a newer version - being supplied) without affecting the integration too much? Clearly, the - more reusable the integration the better the separation. - -As mentioned above, the vendor code *must* be available publicly, and a git -repository makes the most sense. By doing so, the module itself can be made -accessible using a pip requirements file. This file should not be confused -with the Neutron requirements file that lists all common dependencies. Instead -it should be a file 'requirements.txt' that is located in neutron/plugins/pluginXXX/, -whose content is something along the lines of 'my_plugin_xxx_library>=X.Y.Z'. -Vendors are responsible for ensuring that their library does not depend on -libraries conflicting with global requirements, but it could depend on -libraries not included in the global requirements. Just as in Neutron's -main requirements.txt, it will be possible to pin the version of the vendor -library. - -For instance, a vendor integration module can become as simple as one that -performs only the following: - -* Registering config options -* Registering the plugin class -* Registering the models -* Registering the extensions - -Testing Strategy ----------------- - -The testing process will be as follow: - -* No unit tests for the vendor integration of plugins and drivers are deemed - necessary. The expectation is that contributors would run unit test in their - own external library (e.g. in stackforge where Jenkins setup is for free). - For unit tests that validate the vendor library, it is the responsibility of - the vendor to choose what CI system they see fit to run them. There is no - need or requirement to use OpenStack CI resources if they do not want to. - Having said that, it may be useful to provide coverage for the shim layer in - the form of basic validation as done in `ODL `_ and `LBaaS A10 driver `_. - -* 3rd Party CI will continue to validate vendor integration with Neutron via - functional testing. 3rd Party CI is a communication mechanism. This objective - of this mechanism is as follows: - - * it communicates to plugin/driver contributors when someone has contributed - a change that is potentially breaking. It is then up to a given - contributor maintaining the affected plugin to determine whether the - failure is transient or real, and resolve the problem if it is. + * it communicates to you when someone has contributed a change that + potentially breaks your code. It is then up to you maintaining the affected + plugin/driver to determine whether the failure is transient or real, and + resolve the problem if it is. * it communicates to a patch author that they may be breaking a plugin/driver. If they have the time/energy/relationship with the maintainer of the plugin/driver in question, then they can (at their discretion) work to @@ -146,69 +137,104 @@ The testing process will be as follow: * it communicates to the community at large whether a given plugin/driver is being actively maintained. * A maintainer that is perceived to be responsive to failures in their - 3rd party CI jobs is likely to generate community goodwill. + third-party CI jobs is likely to generate community goodwill. - It is worth noting that if the vendor library is hosted on StackForge, due to - current openstack-infra limitations, it is not possible to have 3rd party CI systems - participating in the gate pipeline for the StackForge repo. This means that the only - validation provided during the merge process to the StackForge repo is through unit - tests. Post-merge hooks can still be exploited to provide 3rd party CI feedback, and - alert the contributor/reviewer of potential issues. As mentioned above, 3rd party CI - systems will continue to validate Neutron core commits. This will allow them to - detect when incompatible changes occur, whether they are in Neutron or in the vendor - library repo. + It is worth noting that if the plugin/driver repository is hosted on + git.openstack.org, due to current openstack-infra limitations, it is not + possible to have third-party CI systems participating in the gate pipeline + for the repo. This means that the only validation provided during the merge + process to the repo is through unit tests. Post-merge hooks can still be + exploited to provide third-party CI feedback, and alert you of potential + issues. As mentioned above, third-party CI systems will continue to validate + Neutron core commits. This will allow them to detect when incompatible + changes occur, whether they are in Neutron or in the third-party repo. -Review and Defect Management Strategies ---------------------------------------- -The usual process applies to the code that is part of OpenStack Neutron. More -precisely: +Defect Management +----------------- + +Bugs affecting third-party code should *not* be filed in the Neutron project on +launchpad. Bug tracking can be done in any system you choose, but by creating a +third-party project in launchpad, bugs that affect both Neutron and your code +can be more easily tracked using launchpad's "also affects project" feature. + +Security Issues +~~~~~~~~~~~~~~~ + +Here are some answers to how to handle security issues in your repo, taken +from `this openstack-dev mailing list message +`_: + +- How should security your issues be managed? + +The OpenStack Vulnerability Management Team (VMT) follows a `documented process +`_ which can basically be +reused by any project-team when needed. + +- Should the OpenStack security team be involved? + +The OpenStack VMT directly oversees vulnerability reporting and disclosure for +a `subset of OpenStack source code repositories +`_. However they +are still quite happy to answer any questions you might have about +vulnerability management for your own projects even if they're not part of that +set. Feel free to reach out to the VMT in public or in private. + +Also, the VMT is an autonomous subgroup of the much larger `OpenStack Security +project-team +`_. They're a +knowledgeable bunch and quite responsive if you want to get their opinions or +help with security-related issues (vulnerabilities or otherwise). + +- Does a CVE need to be filed? + +It can vary widely. If a commercial distribution such as Red Hat is +redistributing a vulnerable version of your software then they may assign one +anyway even if you don't request one yourself. Or the reporter may request one; +the reporter may even be affiliated with an organization who has already +assigned/obtained a CVE before they initiate contact with you. + +- Do the maintainers need to publish OSSN or equivalent documents? + +OpenStack Security Advisories (OSSA) are official publications of the OpenStack +VMT and only cover VMT-supported software. OpenStack Security Notes (OSSN) are +published by editors within the OpenStack Security project-team on more general +security topics and may even cover issues in non-OpenStack software commonly +used in conjunction with OpenStack, so it's at their discretion as to whether +they would be able to accommodate a particular issue with an OSSN. + +However, these are all fairly arbitrary labels, and what really matters in the +grand scheme of things is that vulnerabilities are handled seriously, fixed +with due urgency and care, and announced widely -- not just on relevant +OpenStack mailing lists but also preferably somewhere with broader distribution +like the `Open Source Security mailing list +`_. The goal +is to get information on your vulnerabilities, mitigating measures and fixes +into the hands of the people using your software in a timely manner. + +- Anything else to consider here? + +The OpenStack VMT is in the process of trying to reinvent itself so that it can +better scale within the context of the "Big Tent." This includes making sure +the policy/process documentation is more consumable and reusable even by +project-teams working on software outside the scope of our charter. It's a work +in progress, and any input is welcome on how we can make this function well for +everyone. -* Bugs that affect vendor code can be filed against the Neutron integration, - if the integration code is at fault. Otherwise, the code maintainer may - decide to fix a bug without oversight, and update their requirements file - to target a new version of their vendor library. It makes sense to - require 3rd party CI for a given plugin/driver to pass when changing their - dependency before merging to any branch (i.e. both master and stable branches). -* Vendor specific code should follow the same review guidelines as any other - code in the tree. However, the maintainer has flexibility to choose who - can approve/merge changes in this repo. Backport Management Strategies ------------------------------ -As outlined in the `Spec proposal `_ -all new plugins and drivers will have to follow the contribution model -described here. As for existing plugins and drivers, no in-tree features can -be merged until some progress has been done to make the solution adhere to -this model. That said, there is the question of critical fixes and/or backports -to `stable branches `_. The possible -scenarios are: +This section applies only to third-party maintainers who had code in the +Neutron tree during the Kilo and earlier releases. It will be obsolete once the +Kilo release is no longer supported. + +If a change made to out-of-tree third-party code needs to be back-ported to +in-tree code in a stable branch, you may submit a review without a +corresponding master branch change. The change will be evaluated by core +reviewers for stable branches to ensure that the backport is justified and that +it does not affect Neutron core code stability. -* The decomposition just completed, we are in the cycle (X) where the decomposition - initiated: in this case, the Neutron master branch no longer have the vendor - library code, but the stable branch still does. Backports via straight - cherry-picks may not be possible, or as easy, therefore a custom backport to - stable could be deemed acceptable to Neutron's stable branches (e.g. stable/X-1 - and/or stable/X-2), as required. -* The decomposition is complete, we are in the next cycle where the - decomposition work completed (X+1): backports will be done to the stable branch - available of the vendor library (stable/X), and Neutron's stable branch - (stable/X-1), as outlined in the previous step. -* The decomposition is complete, we are in two or more cycles after the - decomposition work completed (X+2, or later). Backports will be done to the - stable branch(s) available of the vendor library (stable/X, stable/X+1). -* The decomposition is in progress: as long as the vendor code is still in - master, patches will need to go to master before a backport to stable. - Acceptance will be determined on the scope of changes (based on both the - amount of work and severity of the issue). In this case, the plugin or - driver maintainer will need to ensure that the fix gets applied to the - external repo, if necessary (to avoid missing it during the migration process). -* The decomposition has not started: in this case, depending on the issue, - review attention from core members is best effort, and although there is no - explicit rule to prevent them from merging to master, it is in the best interest - of the maintainer to avoid introducing or modifying existing code that will - ultimately be deprecated. DevStack Integration Strategies ------------------------------- @@ -221,79 +247,34 @@ make sense depending on whether you are contributing a new or existing plugin or driver. If you are contributing a new plugin, the approach to choose should be based on -`Extras.d Hooks' externally hosted plugins `_. -With the extra.d hooks, the DevStack integration is colocated with the vendor integration -library, and it leads to the greatest level of flexibility when dealing with DevStack based -dev/test deployments. +`Extras.d Hooks' externally hosted plugins +`_. +With the extra.d hooks, the DevStack integration is co-located with the +third-party integration library, and it leads to the greatest level of +flexibility when dealing with DevStack based dev/test deployments. -Having said that, most Neutron plugins developed in the past likely already have -integration with DevStack in the form of `neutron_plugins `_. -If the plugin is being decomposed in vendor integration plus vendor library, it would -be necessary to adjust the instructions provided in the neutron_plugin file to pull the -vendor library code as a new dependency. For instance, the instructions below: +One final consideration is worth making for third-party CI setups: if `Devstack +Gate `_ is used, +it does provide hook functions that can be executed at specific times of the +devstack-gate-wrap script run. For example, the `Neutron Functional job +`_ +uses them. For more details see `devstack-vm-gate-wrap.sh +`_. - :: - INSTALL_FROM_REQUIREMENTS=$(trueorfalse True INSTALL_FROM_REQUIREMENTS) +Project Initial Setup +--------------------- - if [[ "$INSTALL_FROM_REQUIREMENTS" == "False" ]]; then - git_clone $NEUTRON_LIB_REPO $NEUTRON_LIB_DIR $NEUTRON_LIB_BRANCH - setup_package $NEUTRON_LIB_DIR - else - # Retrieve the package from the vendor library's requirements.txt - plugin_package=$(cat $NEUTRON_LIB_REQUIREMENTS_FILE) - pip_install "$plugin_package" - fi - -could be placed in 'neutron_plugin_configure_service', ahead of the service -configuration. An alternative could be under the `third_party section -`_, -if available. This solution can be similarly exploited for both monolithic -plugins or ML2 mechanism drivers. The configuration of the plugin or driver itself can be -done by leveraging the extensibility mechanisms provided by `local.conf `_. In fact, since the .ini file for the vendor plugin or driver lives -in the Neutron tree, it is possible to do add the section below to local.conf: - - :: - - [[post-config|$THE_FILE_YOU_NEED_TO_CUSTOMIZE]] - - # Override your section config as you see fit - [DEFAULT] - verbose=True - -Which in turn it is going to edit the file with the options outlined in the post-config -section. - -The above mentioned approach, albeit valid, has the shortcoming of depending on DevStack's -explicit support for the plugin installation and configuration, and the plugin maintainer -is strongly encouraged to revise the existing DevStack integration, in order to evolve it -in an extras.d hooks based approach. - -One final consideration is worth making for 3rd party CI setups: if `Devstack Gate -`_ is used, it does provide hook -functions that can be executed at specific times of the devstack-gate-wrap script run. -For example, the `Neutron Functional job `_ uses them. For more details see `devstack-vm-gate-wrap.sh `_. - -Documentation Strategies ------------------------- - -It is the duty of the new contributor to provide working links that can be -referenced from the OpenStack upstream documentation. -#TODO(armax): provide more info, when available. - -How-to ------- - -The how-to below assumes that the vendor library will be hosted on StackForge. -Stackforge lets you tap in the entire OpenStack CI infrastructure and can be -a great place to start from to contribute your new or existing driver/plugin. -The list of steps below are somewhat the tl;dr; version of what you can find -on http://docs.openstack.org/infra/manual/creators.html. They are meant to +The how-to below assumes that the third-party library will be hosted on +git.openstack.org. This lets you tap in the entire OpenStack CI infrastructure +and can be a great place to start from to contribute your new or existing +driver/plugin. The list of steps below are summarized version of what you can +find on http://docs.openstack.org/infra/manual/creators.html. They are meant to be the bare minimum you have to complete in order to get you off the ground. * Create a public repository: this can be a personal git.openstack.org repo or any publicly available git repo, e.g. ``https://github.com/john-doe/foo.git``. This - would be a temporary buffer to be used to feed the StackForge one. + would be a temporary buffer to be used to feed the one on git.openstack.org. * Initialize the repository: if you are starting afresh, you may *optionally* want to use cookiecutter to get a skeleton project. You can learn how to use cookiecutter on https://git.openstack.org/cgit/openstack-dev/cookiecutter. @@ -301,104 +282,273 @@ be the bare minimum you have to complete in order to get you off the ground. want to skip this step now, build the history first (next step), and come back here to initialize the remainder of the repository with other files being generated by the cookiecutter (like tox.ini, setup.cfg, setup.py, etc.). -* Building the history: if you are contributing an existing driver/plugin, - you may want to preserve the existing history. If not, you can go to the - next step. To import the history from an existing project this is what - you need to do: - - * Clone a copy of the neutron repository to be manipulated. - * Go into the Neutron repo to be changed. - * Execute file split.sh, available in ./tools, and follow instructions. - - :: - - git clone https://git.openstack.org/openstack/neutron.git - cd neutron - ./tools/split.sh - # Sit and wait for a while, or grab a cup of your favorite drink - - At this point you will have the project pruned of everything else but - the files you want to export, with their history. The next steps are: - - * Check out stable branches for the project: even though stable branches - are not strictly necessary during the creation of the StackForge repository - (as outlined in the next step below), they do not hurt, and it is - recommended to keep them during the import process. - * Add a remote that points to the repository created before. - * (Optional) If the repository has already being initialized with - cookiecutter, you need to pull first; if not, you can either push - the existing commits/tags or apply and commit further changes to fix - up the structure of repo the way you see fit. - * Finally, push commits and tags to the public repository. If you followed - theses instructions step-by-step, you will have a source repository - that contains both a master and stable branches, as well as tags. Some - of these steps are outlined below: - - :: - - git remote add https://github.com/john-doe/foo.git - git pull foo master # OPTIONAL, if foo is non-empty - git push --all foo && git push --tags foo - -* Create a StackForge repository: for this you need the help of the OpenStack - infra team. It is worth noting that you only get one shot at creating the - StackForge repository. This is the time you get to choose whether you want - to start from a clean slate, or you want to import the repo created during - the previous step. In the latter case, you can do so by specifying the - upstream section for your project in project-config/gerrit/project.yaml. - Steps are documented on the - `Repository Creator's Guide `_. +* Create a repository on git.openstack.org (see `Official Sub-Projects + `_). For + this you need the help of the OpenStack infra team. It is worth noting that + you only get one shot at creating the repository on git.openstack.org. This + is the time you get to choose whether you want to start from a clean slate, + or you want to import the repo created during the previous step. In the + latter case, you can do so by specifying the upstream section for your + project in project-config/gerrit/project.yaml. Steps are documented on the + `Repository Creator's Guide + `_. * Ask for a Launchpad user to be assigned to the core team created. Steps are - documented in - `this section `_. -* Fix, fix, fix: at this point you have an external base to work on. You - can develop against the new stackforge project, the same way you work - with any other OpenStack project: you have pep8, docs, and python27 CI - jobs that validate your patches when posted to Gerrit. For instance, one - thing you would need to do is to define an entry point for your plugin - or driver in your own setup.cfg similarly as to how it is done - `here `_. + documented in `this section + `_. +* Fix, fix, fix: at this point you have an external base to work on. You can + develop against the new git.openstack.org project, the same way you work with + any other OpenStack project: you have pep8, docs, and python27 CI jobs that + validate your patches when posted to Gerrit. For instance, one thing you + would need to do is to define an entry point for your plugin or driver in + your own setup.cfg similarly as to how it is done in the `setup.cfg for ODL + `_. * Define an entry point for your plugin or driver in setup.cfg -* Create 3rd Party CI account: if you do not already have one, follow - instructions for - `3rd Party CI `_ to get one. -* TODO(armax): ... +* Create third-party CI account: if you do not already have one, follow + instructions for `third-party CI + `_ to get + one. -Decomposition progress chart -============================ +Integrating with the Neutron system +----------------------------------- -The chart below captures the progress of the core-vendor-decomposition effort -for existing plugins and drivers at the time the decomp effort started. New -drivers and plugins are not required to be listed here. This chart is short -lived: once the effort is complete, this chart no longer needs to exist and -will be removed. The following aspects are captured: +(This section currently describes the goals and progress of the completion of +the decomposition work during the Liberty development cycle. The content here +will be updated as the work progresses. In its final form this section will be +merged with the previous section. When all existing plugins/drivers are fully +decomposed, this document will be a recipe for how to add a new Neutron plugin +or driver completely out-of-tree.) -* Name: the name of the project that implements a Neutron plugin or driver. The - name is an internal target for links that point to source code, etc. -* Plugins/Drivers: whether the source code contains a core (aka monolithic) - plugin, a set of ML2 drivers, and/or (service) plugins (or extensions) for - firewall, vpn, and load balancers. -* Launchpad: whether the project is managed through Launchpad. -* PyPI: whether the project deliverables are available through PyPI. -* State: a code to represent the current state of the decomposition. Possible - values are: +For the Liberty cycle we aim to move all the existing third-party code out of +the Neutron tree. Each category of code and its removal plan is described +below. - * [A] External repo available, no code decomposition - * [B] External repo available, partial code decomposition - * [C] External repo available, code decomposition is complete - * [D] Not deemed required. Driver is already bare-bone and decomposition - effort is not considered justified. Assessment may change in the - future. - Absence of an entry for an existing plugin or driver means no active effort - has been observed or potentially not required. -* Completed in: the release in which the effort is considered completed. Code - completion can be deemed as such, if there is no overlap/duplication between - what exists in the Neutron tree, and what it exists in the vendor repo. +Existing Shims +~~~~~~~~~~~~~~ -+-------------------------------+-----------------------+-----------+------------------+---------+--------------+ -| Name | Plugins/Drivers | Launchpad | PyPI | State | Completed in | -+===============================+=======================+===========+==================+=========+==============+ -| freescale-nscs | ml2,fw | no | no | [D] | | -+-------------------------------+-----------------------+-----------+------------------+---------+--------------+ +Liberty Steps ++++++++++++++ + +The existing shims shall now be moved out of tree, together with any test +code. The entry points shall be moved as described below in `Entry Points`_. + + +Configuration Files +~~~~~~~~~~~~~~~~~~~ + +The ``data_files`` in the ``[files]`` section of ``setup.cfg`` of Neutron shall +not contain any third-party references. These shall be located in the same +section of the third-party repo's own ``setup.cfg`` file. + +* Note: Care should be taken when naming sections in configuration files. When + the Neutron service or an agent starts, oslo.config loads sections from all + specified config files. This means that if a section [foo] exists in multiple + config files, duplicate settings will collide. It is therefore recommended to + prefix section names with a third-party string, e.g. [vendor_foo]. + +Liberty Steps ++++++++++++++ + +Third-party configuration files still in the neutron tree have no dependencies +and can simply be moved. The maintainers should add their configuration file(s) +to their repo and then remove them from neutron. + +**ToDo: Inclusion in OpenStack documentation?** + Is there a recommended way to have third-party config options listed in the + configuration guide in docs.openstack.org? + + +Database Models and Migrations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A third-party repo may contain database models for its own tables. Although +these tables are in the Neutron database, they are independently managed +entirely within the third-party code. Third-party code shall **never** modify +neutron core tables in any way. + +Each repo has its own alembic migration branch that adds, removes and modifies +its own tables in the neutron database schema. + +* Note: Care should be taken when adding new tables. To prevent collision of + table names it is recommended to prefix them with a vendor/plugin string. + +* Note: A third-party maintainer may opt to use a separate database for their + tables. This may complicate cases where there are foreign key constraints + across schemas for DBMS that do not support this well. Third-party maintainer + discretion advised. + +The database tables owned by a third-party repo can have references to fields +in neutron core tables. However, the alembic branch for a plugin/driver repo +shall never update any part of a table that it does not own. + +**Note: What happens when a referenced item changes?** + +* **Q:** If a driver's table has a reference (for example a foreign key) to a + neutron core table, and the referenced item is changed in neutron, what + should you do? + +* **A:** Fortunately, this should be an extremely rare occurrence. Neutron core + reviewers will not allow such a change unless there is a very carefully + thought-out design decision behind it. That design will include how to + address any third-party code affected. (This is another good reason why you + should stay actively involved with the Neutron developer community.) + +The ``neutron-db-manage`` alembic wrapper script for neutron detects alembic +branches for installed third-party repos, and the upgrade command automatically +applies to all of them. A third-party repo must register its alembic migrations +at installation time. This is done by providing an entrypoint in setup.cfg as +follows: + +For a third-party repo named ``networking-foo``, add the alembic_migrations +directory as an entrypoint in the ``neutron.db.alembic_migrations`` group:: + + [entry_points] + neutron.db.alembic_migrations = + networking-foo = networking_foo.db.migration:alembic_migrations + +Liberty Steps ++++++++++++++ + +Each decomposed plugin/driver that has its own tables in the neutron database +should take these steps to move the models for the tables out of tree. + +#. Add the models to the external repo. +#. Create a start migration for the repo's alembic branch. Note: it is + recommended to keep the migration file(s) in the same location in the + third-party repo as is done in the neutron repo, + i.e. ``networking_foo/db/migration/alembic_migrations/versions/*.py`` +#. Remove the models from the neutron repo. +#. Add the names of the removed tables to ``DRIVER_TABLES`` in + ``neutron/db/migration/alembic_migrations/external.py`` (this is used for + testing, see below). + +**ToDo: neutron-db-manage autogenerate** + The alembic autogenerate command needs to support branches in external + repos. Bug #1471333 has been filed for this. + + +DB Model/Migration Testing +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Here is a `template functional test +`_ (TODO:Ann) third-party +maintainers can use to develop tests for model-vs-migration sync in their +repos. It is recommended that each third-party CI sets up such a test, and runs +it regularly against Neutron master. + +Liberty Steps ++++++++++++++ + +The model_sync test will be updated to ignore the models that have been moved +out of tree. A ``DRIVER_TABLES`` list will be maintained in +``neutron/db/migration/alembic_migrations/external.py``. + + +Entry Points +~~~~~~~~~~~~ + +The `Python setuptools `_ installs all +entry points for packages in one global namespace for an environment. Thus each +third-party repo can define its package's own ``[entry_points]`` in its own +``setup.cfg`` file. + +For example, for the ``networking-foo`` repo:: + + [entry_points] + console_scripts = + neutron-foo-agent = networking_foo.cmd.eventlet.agents.foo:main + neutron.core_plugins = + foo_monolithic = networking_foo.plugins.monolithic.plugin:FooPluginV2 + neutron.service_plugins = + foo_l3 = networking_foo.services.l3_router.l3_foo:FooL3ServicePlugin + neutron.ml2.type_drivers = + foo_type = networking_foo.plugins.ml2.drivers.foo:FooType + neutron.ml2.mechanism_drivers = + foo_ml2 = networking_foo.plugins.ml2.drivers.foo:FooDriver + neutron.ml2.extension_drivers = + foo_ext = networking_foo.plugins.ml2.drivers.foo:FooExtensionDriver + +* Note: It is advisable to include ``foo`` in the names of these entry points to + avoid conflicts with other third-party packages that may get installed in the + same environment. + + +API Extensions +~~~~~~~~~~~~~~ + +Extensions can be loaded in two ways: + +#. Use the ``append_api_extensions_path()`` library API. This method is defined + in ``neutron/api/extensions.py`` in the neutron tree. +#. Leverage the ``api_extensions_path`` config variable when deploying. See the + example config file ``etc/neutron.conf`` in the neutron tree where this + variable is commented. + + +Interface Drivers +~~~~~~~~~~~~~~~~~ + +Interface (VIF) drivers for the reference implementations are defined in +``neutron/agent/linux/interface.py``. Third-party interface drivers shall be +defined in a similar location within their own repo. + +The entry point for the interface driver is a Neutron config option. It is up to +the installer to configure this item in the ``[default]`` section. For example:: + + [default] + interface_driver = networking_foo.agent.linux.interface.FooInterfaceDriver + +**ToDo: Interface Driver port bindings.** + These are currently defined by the ``VIF_TYPES`` in + ``neutron/extensions/portbindings.py``. We could make this config-driven + for agents. For Nova, selecting the VIF driver can be done outside of + Neutron (using the new `os-vif python library + `_?). Armando and Akihiro to discuss. + + +Rootwrap Filters +~~~~~~~~~~~~~~~~ + +If a third-party repo needs a rootwrap filter for a command that is not used by +Neutron core, then the filter shall be defined in the third-party repo. + +For example, to add a rootwrap filters for commands in repo ``networking-foo``: + +* In the repo, create the file: + ``etc/neutron/rootwrap.d/foo.filters`` + +* In the repo's ``setup.cfg`` add the filters to data_files:: + + [files] + data_files = + etc/neutron/rootwrap.d = + etc/neutron/rootwrap.d/foo.filters + + +Extending python-neutronclient +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The maintainer of a third-party component may wish to add extensions to the +Neutron CLI client. Thanks to https://review.openstack.org/148318 this can now +be accomplished. See `Client Command Extensions +`_. + + +Other repo-split items +~~~~~~~~~~~~~~~~~~~~~~ + +(These are still TBD.) + +* Splitting policy.json? **ToDo** Armando will investigate. + +* Generic instructions (or a template) for installing an out-of-tree plugin or + driver for Neutron. Possibly something for the networking guide, and/or a + template that plugin/driver maintainers can modify and include with their + package. + + +Decomposition Phase II Progress Chart +===================================== + +TBD. From f9e9de9f810f2752d295a379459b9a93aa01ee4d Mon Sep 17 00:00:00 2001 From: Carl Baldwin Date: Tue, 30 Jun 2015 20:22:46 +0000 Subject: [PATCH 46/54] Refactor init_l3 to separate router port use case Future work will extend init_l3 with more code specific to router ports. It makes sense to separate these out in to one basic method with basic L3 and another for router port specific logic. Change-Id: Iec9a46cd0490c4f48bb306083711ff0c5e70ba87 Partially-Implements: blueprint address-scopes --- neutron/agent/l3/router_info.py | 20 ++++++----- neutron/agent/linux/interface.py | 36 ++++++++++++++++--- neutron/tests/unit/agent/l3/test_agent.py | 19 +++++----- .../tests/unit/agent/linux/test_interface.py | 36 ++++++++++--------- 4 files changed, 71 insertions(+), 40 deletions(-) diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index f698a94d61c..978f2f8c8a3 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -291,7 +291,8 @@ class RouterInfo(object): prefix=prefix) ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips) - self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name) + self.driver.init_router_port( + interface_name, ip_cidrs, namespace=ns_name) for fixed_ip in fixed_ips: ip_lib.send_ip_addr_adv_notif(ns_name, interface_name, @@ -456,14 +457,15 @@ class RouterInfo(object): ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) gateway_ips, enable_ra_on_gw = self._get_external_gw_ips(ex_gw_port) - self.driver.init_l3(interface_name, - ip_cidrs, - namespace=ns_name, - gateway_ips=gateway_ips, - extra_subnets=ex_gw_port.get('extra_subnets', []), - preserve_ips=preserve_ips, - enable_ra_on_gw=enable_ra_on_gw, - clean_connections=True) + self.driver.init_router_port( + interface_name, + ip_cidrs, + namespace=ns_name, + gateway_ips=gateway_ips, + extra_subnets=ex_gw_port.get('extra_subnets', []), + preserve_ips=preserve_ips, + enable_ra_on_gw=enable_ra_on_gw, + clean_connections=True) for fixed_ip in ex_gw_port['fixed_ips']: ip_lib.send_ip_addr_adv_notif(ns_name, interface_name, diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py index 470e8f34f25..cd7f9c6903d 100644 --- a/neutron/agent/linux/interface.py +++ b/neutron/agent/linux/interface.py @@ -78,14 +78,13 @@ class LinuxInterfaceDriver(object): self.conf = conf def init_l3(self, device_name, ip_cidrs, namespace=None, - preserve_ips=[], gateway_ips=None, extra_subnets=[], - enable_ra_on_gw=False, clean_connections=False): + preserve_ips=[], gateway_ips=None, + clean_connections=False): """Set the L3 settings for the interface using data from the port. ip_cidrs: list of 'X.X.X.X/YY' strings preserve_ips: list of ip cidrs that should not be removed from device gateway_ips: For gateway ports, list of external gateway ip addresses - enable_ra_on_gw: Boolean to indicate configuring acceptance of IPv6 RA clean_connections: Boolean to indicate if we should cleanup connections associated to removed ips """ @@ -123,10 +122,39 @@ class LinuxInterfaceDriver(object): for gateway_ip in gateway_ips or []: device.route.add_gateway(gateway_ip) + def init_router_port(self, + device_name, + ip_cidrs, + namespace, + preserve_ips=None, + gateway_ips=None, + extra_subnets=None, + enable_ra_on_gw=False, + clean_connections=False): + """Set the L3 settings for a router interface using data from the port. + + ip_cidrs: list of 'X.X.X.X/YY' strings + preserve_ips: list of ip cidrs that should not be removed from device + gateway_ips: For gateway ports, list of external gateway ip addresses + enable_ra_on_gw: Boolean to indicate configuring acceptance of IPv6 RA + clean_connections: Boolean to indicate if we should cleanup connections + associated to removed ips + extra_subnets: An iterable of cidrs to add as routes without address + """ + self.init_l3(device_name=device_name, + ip_cidrs=ip_cidrs, + namespace=namespace, + preserve_ips=preserve_ips or [], + gateway_ips=gateway_ips, + clean_connections=clean_connections) + if enable_ra_on_gw: self.configure_ipv6_ra(namespace, device_name) - new_onlink_routes = set(s['cidr'] for s in extra_subnets) + device = ip_lib.IPDevice(device_name, namespace=namespace) + + # Manage on-link routes (routes without an associated address) + new_onlink_routes = set(s['cidr'] for s in extra_subnets or []) existing_onlink_routes = set( device.route.list_onlink_routes(n_const.IP_VERSION_4) + device.route.list_onlink_routes(n_const.IP_VERSION_6)) diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index 234e91cbe64..b683727fdb5 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -283,7 +283,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self.device_exists.return_value = False ri.internal_network_added(port) self.assertEqual(self.mock_driver.plug.call_count, 1) - self.assertEqual(self.mock_driver.init_l3.call_count, 1) + self.assertEqual(self.mock_driver.init_router_port.call_count, 1) self.send_adv_notif.assert_called_once_with(ri.ns_name, interface_name, '99.0.1.9', mock.ANY) @@ -395,7 +395,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): ri.external_gateway_added(ex_gw_port, interface_name) if not router.get('distributed'): self.assertEqual(self.mock_driver.plug.call_count, 1) - self.assertEqual(self.mock_driver.init_l3.call_count, 1) + self.assertEqual(self.mock_driver.init_router_port.call_count, 1) if no_subnet and not dual_stack: self.assertEqual(self.send_adv_notif.call_count, 0) ip_cidrs = [] @@ -430,9 +430,8 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'enable_ra_on_gw': enable_ra_on_gw, 'clean_connections': True} - self.mock_driver.init_l3.assert_called_with(interface_name, - ip_cidrs, - **kwargs) + self.mock_driver.init_router_port.assert_called_with( + interface_name, ip_cidrs, **kwargs) else: ri._create_dvr_gateway.assert_called_once_with( ex_gw_port, interface_name, @@ -551,7 +550,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): router[l3_constants.FLOATINGIP_KEY] = fake_fip['floatingips'] ri.external_gateway_updated(ex_gw_port, interface_name) self.assertEqual(1, self.mock_driver.plug.call_count) - self.assertEqual(self.mock_driver.init_l3.call_count, 1) + self.assertEqual(self.mock_driver.init_router_port.call_count, 1) exp_arp_calls = [mock.call(ri.ns_name, interface_name, '20.0.0.30', mock.ANY)] if dual_stack: @@ -570,9 +569,9 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): 'extra_subnets': [{'cidr': '172.16.0.0/24'}], 'enable_ra_on_gw': False, 'clean_connections': True} - self.mock_driver.init_l3.assert_called_with(interface_name, - ip_cidrs, - **kwargs) + self.mock_driver.init_router_port.assert_called_with(interface_name, + ip_cidrs, + **kwargs) def test_external_gateway_updated(self): self._test_external_gateway_updated() @@ -1967,7 +1966,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): # check 2 internal ports are plugged # check 1 ext-gw-port is plugged self.assertEqual(self.mock_driver.plug.call_count, 3) - self.assertEqual(self.mock_driver.init_l3.call_count, 3) + self.assertEqual(self.mock_driver.init_router_port.call_count, 3) def test_get_service_plugin_list(self): service_plugins = [p_const.L3_ROUTER_NAT] diff --git a/neutron/tests/unit/agent/linux/test_interface.py b/neutron/tests/unit/agent/linux/test_interface.py index 2d6eb286825..0fdf3d744f0 100644 --- a/neutron/tests/unit/agent/linux/test_interface.py +++ b/neutron/tests/unit/agent/linux/test_interface.py @@ -80,7 +80,7 @@ class TestABCDriver(TestBase): device_name = bc.get_device_name(FakePort()) self.assertEqual('tapabcdef01-12', device_name) - def test_l3_init(self): + def test_init_router_port(self): addresses = [dict(scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) @@ -88,18 +88,19 @@ class TestABCDriver(TestBase): bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' - bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns, - extra_subnets=[{'cidr': '172.20.0.0/24'}]) + bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns, + extra_subnets=[{'cidr': '172.20.0.0/24'}]) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24'), mock.call().addr.delete('172.16.77.240/24'), + mock.call('tap0', namespace=ns), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')]) - def test_l3_init_delete_onlink_routes(self): + def test_init_router_port_delete_onlink_routes(self): addresses = [dict(scope='global', dynamic=False, cidr='172.16.77.240/24')] self.ip_dev().addr.list = mock.Mock(return_value=addresses) @@ -107,7 +108,7 @@ class TestABCDriver(TestBase): bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' - bc.init_l3('tap0', ['192.168.1.2/24'], namespace=ns) + bc.init_router_port('tap0', ['192.168.1.2/24'], namespace=ns) self.ip_dev.assert_has_calls( [mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), @@ -152,7 +153,7 @@ class TestABCDriver(TestBase): def test_l3_init_without_clean_connections(self): self._test_l3_init_clean_connections(False) - def _test_l3_init_with_ipv6(self, include_gw_ip): + def _test_init_router_port_with_ipv6(self, include_gw_ip): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] @@ -166,7 +167,7 @@ class TestABCDriver(TestBase): 'extra_subnets': [{'cidr': '2001:db8:b::/64'}]} if include_gw_ip: kwargs['gateway_ips'] = ['2001:db8:a::1'] - bc.init_l3('tap0', [new_cidr], **kwargs) + bc.init_router_port('tap0', [new_cidr], **kwargs) expected_calls = ( [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), @@ -176,18 +177,19 @@ class TestABCDriver(TestBase): expected_calls += ( [mock.call().route.add_gateway('2001:db8:a::1')]) expected_calls += ( - [mock.call().route.list_onlink_routes(constants.IP_VERSION_4), + [mock.call('tap0', namespace=ns), + mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('2001:db8:b::/64')]) self.ip_dev.assert_has_calls(expected_calls) - def test_l3_init_ipv6_with_gw_ip(self): - self._test_l3_init_with_ipv6(include_gw_ip=True) + def test_init_router_port_ipv6_with_gw_ip(self): + self._test_init_router_port_with_ipv6(include_gw_ip=True) - def test_l3_init_ipv6_without_gw_ip(self): - self._test_l3_init_with_ipv6(include_gw_ip=False) + def test_init_router_port_ipv6_without_gw_ip(self): + self._test_init_router_port_with_ipv6(include_gw_ip=False) - def test_l3_init_ext_gw_with_dual_stack(self): + def test_init_router_port_ext_gw_with_dual_stack(self): old_addrs = [dict(ip_version=4, scope='global', dynamic=False, cidr='172.16.77.240/24'), dict(ip_version=6, scope='global', @@ -197,8 +199,8 @@ class TestABCDriver(TestBase): bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' new_cidrs = ['192.168.1.2/24', '2001:db8:a::124/64'] - bc.init_l3('tap0', new_cidrs, namespace=ns, - extra_subnets=[{'cidr': '172.20.0.0/24'}]) + bc.init_router_port('tap0', new_cidrs, namespace=ns, + extra_subnets=[{'cidr': '172.20.0.0/24'}]) self.ip_dev.assert_has_calls( [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), @@ -211,7 +213,7 @@ class TestABCDriver(TestBase): mock.call().route.add_onlink_route('172.20.0.0/24')], any_order=True) - def test_l3_init_with_ipv6_delete_onlink_routes(self): + def test_init_router_port_with_ipv6_delete_onlink_routes(self): addresses = [dict(scope='global', dynamic=False, cidr='2001:db8:a::123/64')] route = '2001:db8:a::/64' @@ -220,7 +222,7 @@ class TestABCDriver(TestBase): bc = BaseChild(self.conf) ns = '12345678-1234-5678-90ab-ba0987654321' - bc.init_l3('tap0', ['2001:db8:a::124/64'], namespace=ns) + bc.init_router_port('tap0', ['2001:db8:a::124/64'], namespace=ns) self.ip_dev.assert_has_calls( [mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), From 18bc67d56faef30a0f73429a5ee580e052858cb5 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Thu, 2 Jul 2015 12:56:24 -0700 Subject: [PATCH 47/54] COMMON_PREFIXES cleanup - patch 5/5 Get rid of COMMON_PREFIXES, as now the prefix is a service's declaritive property. Change-Id: I3d306131df94188f75e69edb13d262721d10bee5 Depends-on: I0450d0b2bf409d470a3a87bfd96518939759a84e Depends-on: Ia34695967cbbec0a1cf0884dad82e096de8539b8 Depends-on: Ib9517b772fe426eaf0809c439aa3ba0448c7abaa --- neutron/api/v2/resource_helper.py | 8 ++++++-- neutron/plugins/common/constants.py | 7 ------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/neutron/api/v2/resource_helper.py b/neutron/api/v2/resource_helper.py index 61090638b91..05e403d030d 100644 --- a/neutron/api/v2/resource_helper.py +++ b/neutron/api/v2/resource_helper.py @@ -14,6 +14,7 @@ # under the License. from oslo_config import cfg +from oslo_log import log as logging from neutron.api import extensions from neutron.api.v2 import base @@ -21,6 +22,8 @@ from neutron import manager from neutron.plugins.common import constants from neutron import quota +LOG = logging.getLogger(__name__) + def build_plural_mappings(special_mappings, resource_map): """Create plural to singular mapping for all resources. @@ -68,6 +71,9 @@ def build_resource_info(plural_mappings, resource_map, which_service, plugin = manager.NeutronManager.get_service_plugins()[which_service] else: plugin = manager.NeutronManager.get_plugin() + path_prefix = getattr(plugin, "path_prefix", "") + LOG.debug('Service %(service)s assigned prefix: %(prefix)s' + % {'service': which_service, 'prefix': path_prefix}) for collection_name in resource_map: resource_name = plural_mappings[collection_name] params = resource_map.get(collection_name, {}) @@ -82,8 +88,6 @@ def build_resource_info(plural_mappings, resource_map, which_service, allow_bulk=allow_bulk, allow_pagination=cfg.CONF.allow_pagination, allow_sorting=cfg.CONF.allow_sorting) - path_prefix = getattr(plugin, "path_prefix", - constants.COMMON_PREFIXES.get(which_service, "")) resource = extensions.ResourceExtension( collection_name, controller, diff --git a/neutron/plugins/common/constants.py b/neutron/plugins/common/constants.py index 401b01dd093..63947ae6fd1 100644 --- a/neutron/plugins/common/constants.py +++ b/neutron/plugins/common/constants.py @@ -34,13 +34,6 @@ EXT_TO_SERVICE_MAPPING = { 'router': L3_ROUTER_NAT } -COMMON_PREFIXES = { - LOADBALANCER: "/lb", - LOADBALANCERV2: "/lbaas", - FIREWALL: "/fw", - VPN: "/vpn", -} - # Service operation status constants ACTIVE = "ACTIVE" DOWN = "DOWN" From 04197bc4bbf2bc611371060db839028c2686f87a Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Mon, 29 Jun 2015 21:05:08 -0700 Subject: [PATCH 48/54] Add ARP spoofing protection for LinuxBridge agent This patch adds ARP spoofing protection for the Linux Bridge agent based on ebtables. This code was written to be minimally invasive with the intent of back-porting to Kilo. The protection is enabled and disabled with the same 'prevent_arp_spoofing' agent config flag added for the OVS agent in I7c079b779245a0af6bc793564fa8a560e4226afe. The protection works by setting up an ebtables chain for each port and jumping all ARP traffic to that chain. The port-specific chains have a default DROP policy and then have allow rules installed that only allow ARP traffic with a source CIDR that matches one of the port's fixed IPs or an allowed address pair. Closes-Bug: #1274034 Change-Id: I0b0e3b1272472385dff060897ecbd25e93fd78e7 --- .../drivers/linuxbridge/agent/arp_protect.py | 128 ++++++++++++++++++ .../linuxbridge/agent/common/config.py | 16 +++ .../agent/linuxbridge_neutron_agent.py | 13 ++ .../openvswitch/agent/common/config.py | 5 +- neutron/tests/common/machine_fixtures.py | 6 +- .../linux/test_linuxbridge_arp_protect.py | 100 ++++++++++++++ .../agent/test_linuxbridge_neutron_agent.py | 1 + 7 files changed, 264 insertions(+), 5 deletions(-) create mode 100644 neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py create mode 100644 neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py new file mode 100644 index 00000000000..10fcae52a63 --- /dev/null +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/arp_protect.py @@ -0,0 +1,128 @@ +# Copyright (c) 2015 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo_concurrency import lockutils +from oslo_log import log as logging + +from neutron.agent.linux import ip_lib +from neutron.i18n import _LI + +LOG = logging.getLogger(__name__) +SPOOF_CHAIN_PREFIX = 'neutronARP-' + + +def setup_arp_spoofing_protection(vif, port_details): + current_rules = ebtables(['-L']).splitlines() + if not port_details.get('port_security_enabled', True): + # clear any previous entries related to this port + delete_arp_spoofing_protection([vif], current_rules) + LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because " + "it has port security disabled"), vif) + return + # collect all of the addresses and cidrs that belong to the port + addresses = {f['ip_address'] for f in port_details['fixed_ips']} + if port_details.get('allowed_address_pairs'): + addresses |= {p['ip_address'] + for p in port_details['allowed_address_pairs']} + + addresses = {ip for ip in addresses + if netaddr.IPNetwork(ip).version == 4} + if any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in addresses): + # don't try to install protection because a /0 prefix allows any + # address anyway and the ARP_SPA can only match on /1 or more. + return + + install_arp_spoofing_protection(vif, addresses, current_rules) + + +def chain_name(vif): + # start each chain with a common identifer for cleanup to find + return '%s%s' % (SPOOF_CHAIN_PREFIX, vif) + + +@lockutils.synchronized('ebtables') +def delete_arp_spoofing_protection(vifs, current_rules=None): + if not current_rules: + current_rules = ebtables(['-L']).splitlines() + # delete the jump rule and then delete the whole chain + jumps = [vif for vif in vifs if vif_jump_present(vif, current_rules)] + for vif in jumps: + ebtables(['-D', 'FORWARD', '-i', vif, '-j', + chain_name(vif), '-p', 'ARP']) + for vif in vifs: + if chain_exists(chain_name(vif), current_rules): + ebtables(['-X', chain_name(vif)]) + + +def delete_unreferenced_arp_protection(current_vifs): + # deletes all jump rules and chains that aren't in current_vifs but match + # the spoof prefix + output = ebtables(['-L']).splitlines() + to_delete = [] + for line in output: + # we're looking to find and turn the following: + # Bridge chain: SPOOF_CHAIN_PREFIXtap199, entries: 0, policy: DROP + # into 'tap199' + if line.startswith('Bridge chain: %s' % SPOOF_CHAIN_PREFIX): + devname = line.split(SPOOF_CHAIN_PREFIX, 1)[1].split(',')[0] + if devname not in current_vifs: + to_delete.append(devname) + LOG.info(_LI("Clearing orphaned ARP spoofing entries for devices %s"), + to_delete) + delete_arp_spoofing_protection(to_delete, output) + + +@lockutils.synchronized('ebtables') +def install_arp_spoofing_protection(vif, addresses, current_rules): + # make a VIF-specific ARP chain so we don't conflict with other rules + vif_chain = chain_name(vif) + if not chain_exists(vif_chain, current_rules): + ebtables(['-N', vif_chain, '-P', 'DROP']) + # flush the chain to clear previous accepts. this will cause dropped ARP + # packets until the allows are installed, but that's better than leaked + # spoofed packets and ARP can handle losses. + ebtables(['-F', vif_chain]) + for addr in addresses: + ebtables(['-A', vif_chain, '-p', 'ARP', '--arp-ip-src', addr, + '-j', 'ACCEPT']) + # check if jump rule already exists, if not, install it + if not vif_jump_present(vif, current_rules): + ebtables(['-A', 'FORWARD', '-i', vif, '-j', + vif_chain, '-p', 'ARP']) + + +def chain_exists(chain, current_rules): + for rule in current_rules: + if rule.startswith('Bridge chain: %s' % chain): + return True + return False + + +def vif_jump_present(vif, current_rules): + searches = (('-i %s' % vif), ('-j %s' % chain_name(vif)), ('-p ARP')) + for line in current_rules: + if all(s in line for s in searches): + return True + return False + + +# Used to scope ebtables commands in testing +NAMESPACE = None + + +def ebtables(comm): + execute = ip_lib.IPWrapper(NAMESPACE).netns.execute + return execute(['ebtables'] + comm, run_as_root=True) diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py index fa1487c6b49..c31e51736da 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/common/config.py @@ -66,6 +66,22 @@ agent_opts = [ help=_("Set new timeout in seconds for new rpc calls after " "agent receives SIGTERM. If value is set to 0, rpc " "timeout won't be changed")), + # TODO(kevinbenton): The following opt is duplicated between the OVS agent + # and the Linuxbridge agent to make it easy to back-port. These shared opts + # should be moved into a common agent config options location as part of + # the deduplication work. + cfg.BoolOpt('prevent_arp_spoofing', default=True, + help=_("Enable suppression of ARP responses that don't match " + "an IP address that belongs to the port from which " + "they originate. Note: This prevents the VMs attached " + "to this agent from spoofing, it doesn't protect them " + "from other devices which have the capability to spoof " + "(e.g. bare metal or VMs attached to agents without " + "this flag set to True). Spoofing rules will not be " + "added to any ports that have port security disabled. " + "For LinuxBridge, this requires ebtables. For OVS, it " + "requires a version that supports matching ARP " + "headers.")) ] diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index b359dac62ec..d97e21bc659 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -47,6 +47,7 @@ from neutron.i18n import _LE, _LI, _LW from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.l2pop.rpc_manager \ import l2population_rpc as l2pop_rpc +from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect from neutron.plugins.ml2.drivers.linuxbridge.agent.common import config # noqa from neutron.plugins.ml2.drivers.linuxbridge.agent.common \ import constants as lconst @@ -768,6 +769,7 @@ class LinuxBridgeNeutronAgentRPC(service.Service): self.quitting_rpc_timeout = quitting_rpc_timeout def start(self): + self.prevent_arp_spoofing = cfg.CONF.AGENT.prevent_arp_spoofing self.setup_linux_bridge(self.interface_mappings) configurations = {'interface_mappings': self.interface_mappings} if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE: @@ -895,6 +897,11 @@ class LinuxBridgeNeutronAgentRPC(service.Service): if 'port_id' in device_details: LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), {'device': device, 'details': device_details}) + if self.prevent_arp_spoofing: + port = self.br_mgr.get_tap_device_name( + device_details['port_id']) + arp_protect.setup_arp_spoofing_protection(port, + device_details) if device_details['admin_state_up']: # create the networking for the port network_type = device_details.get('network_type') @@ -948,6 +955,8 @@ class LinuxBridgeNeutronAgentRPC(service.Service): LOG.info(_LI("Port %s updated."), device) else: LOG.debug("Device %s not defined on plugin", device) + if self.prevent_arp_spoofing: + arp_protect.delete_arp_spoofing_protection(devices) return resync def scan_devices(self, previous, sync): @@ -968,6 +977,10 @@ class LinuxBridgeNeutronAgentRPC(service.Service): 'current': set(), 'updated': set(), 'removed': set()} + # clear any orphaned ARP spoofing rules (e.g. interface was + # manually deleted) + if self.prevent_arp_spoofing: + arp_protect.delete_unreferenced_arp_protection(current_devices) if sync: # This is the first iteration, or the previous one had a problem. diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py b/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py index e7f512a00af..98b6210f937 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/common/config.py @@ -86,8 +86,9 @@ agent_opts = [ "(e.g. bare metal or VMs attached to agents without " "this flag set to True). Spoofing rules will not be " "added to any ports that have port security disabled. " - "This requires a version of OVS that supports matching " - "ARP headers.")), + "For LinuxBridge, this requires ebtables. For OVS, it " + "requires a version that supports matching ARP " + "headers.")), cfg.BoolOpt('dont_fragment', default=True, help=_("Set or un-set the don't fragment (DF) bit on " "outgoing IP packet carrying GRE/VXLAN tunnel.")), diff --git a/neutron/tests/common/machine_fixtures.py b/neutron/tests/common/machine_fixtures.py index 6e46c879b79..c6ff0f78f8a 100644 --- a/neutron/tests/common/machine_fixtures.py +++ b/neutron/tests/common/machine_fixtures.py @@ -76,19 +76,19 @@ class PeerMachines(fixtures.Fixture): :type machines: FakeMachine list """ - AMOUNT = 2 CIDR = '192.168.0.1/24' - def __init__(self, bridge, ip_cidr=None, gateway_ip=None): + def __init__(self, bridge, ip_cidr=None, gateway_ip=None, amount=2): super(PeerMachines, self).__init__() self.bridge = bridge self.ip_cidr = ip_cidr or self.CIDR self.gateway_ip = gateway_ip + self.amount = amount def _setUp(self): self.machines = [] - for index in range(self.AMOUNT): + for index in range(self.amount): ip_cidr = net_helpers.increment_ip_cidr(self.ip_cidr, index) self.machines.append( self.useFixture( diff --git a/neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py b/neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py new file mode 100644 index 00000000000..8ccd7159dc2 --- /dev/null +++ b/neutron/tests/functional/agent/linux/test_linuxbridge_arp_protect.py @@ -0,0 +1,100 @@ +# Copyright (c) 2015 Mirantis, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.plugins.ml2.drivers.linuxbridge.agent import arp_protect + +from neutron.tests.common import machine_fixtures +from neutron.tests.common import net_helpers +from neutron.tests.functional import base as functional_base + +no_arping = net_helpers.assert_no_arping +arping = net_helpers.assert_arping + + +class LinuxBridgeARPSpoofTestCase(functional_base.BaseSudoTestCase): + + def setUp(self): + super(LinuxBridgeARPSpoofTestCase, self).setUp() + + lbfixture = self.useFixture(net_helpers.LinuxBridgeFixture()) + self.addCleanup(setattr, arp_protect, 'NAMESPACE', None) + arp_protect.NAMESPACE = lbfixture.namespace + bridge = lbfixture.bridge + self.source, self.destination, self.observer = self.useFixture( + machine_fixtures.PeerMachines(bridge, amount=3)).machines + + def _add_arp_protection(self, machine, addresses, extra_port_dict=None): + port_dict = {'fixed_ips': [{'ip_address': a} for a in addresses]} + if extra_port_dict: + port_dict.update(extra_port_dict) + name = net_helpers.VethFixture.get_peer_name(machine.port.name) + arp_protect.setup_arp_spoofing_protection(name, port_dict) + self.addCleanup(arp_protect.delete_arp_spoofing_protection, + [name]) + + def test_arp_no_protection(self): + arping(self.source.namespace, self.destination.ip) + arping(self.destination.namespace, self.source.ip) + + def test_arp_correct_protection(self): + self._add_arp_protection(self.source, [self.source.ip]) + self._add_arp_protection(self.destination, [self.destination.ip]) + arping(self.source.namespace, self.destination.ip) + arping(self.destination.namespace, self.source.ip) + + def test_arp_fails_incorrect_protection(self): + self._add_arp_protection(self.source, ['1.1.1.1']) + self._add_arp_protection(self.destination, ['2.2.2.2']) + no_arping(self.source.namespace, self.destination.ip) + no_arping(self.destination.namespace, self.source.ip) + + def test_arp_protection_removal(self): + self._add_arp_protection(self.source, ['1.1.1.1']) + self._add_arp_protection(self.destination, ['2.2.2.2']) + no_arping(self.observer.namespace, self.destination.ip) + no_arping(self.observer.namespace, self.source.ip) + name = net_helpers.VethFixture.get_peer_name(self.source.port.name) + arp_protect.delete_arp_spoofing_protection([name]) + # spoofing should have been removed from source, but not dest + arping(self.observer.namespace, self.source.ip) + no_arping(self.observer.namespace, self.destination.ip) + + def test_arp_protection_update(self): + self._add_arp_protection(self.source, ['1.1.1.1']) + self._add_arp_protection(self.destination, ['2.2.2.2']) + no_arping(self.observer.namespace, self.destination.ip) + no_arping(self.observer.namespace, self.source.ip) + self._add_arp_protection(self.source, ['192.0.0.0/1']) + # spoofing should have been updated on source, but not dest + arping(self.observer.namespace, self.source.ip) + no_arping(self.observer.namespace, self.destination.ip) + + def test_arp_protection_port_security_disabled(self): + self._add_arp_protection(self.source, ['1.1.1.1']) + no_arping(self.observer.namespace, self.source.ip) + self._add_arp_protection(self.source, ['1.1.1.1'], + {'port_security_enabled': False}) + arping(self.observer.namespace, self.source.ip) + + def test_arp_protection_dead_reference_removal(self): + self._add_arp_protection(self.source, ['1.1.1.1']) + self._add_arp_protection(self.destination, ['2.2.2.2']) + no_arping(self.observer.namespace, self.destination.ip) + no_arping(self.observer.namespace, self.source.ip) + name = net_helpers.VethFixture.get_peer_name(self.source.port.name) + # This should remove all arp protect rules that aren't source port + arp_protect.delete_unreferenced_arp_protection([name]) + no_arping(self.observer.namespace, self.source.ip) + arping(self.observer.namespace, self.destination.ip) diff --git a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py index 8651a14d8ff..c0324f4e8d4 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py +++ b/neutron/tests/unit/plugins/ml2/drivers/linuxbridge/agent/test_linuxbridge_neutron_agent.py @@ -89,6 +89,7 @@ class TestLinuxBridgeAgent(base.BaseTestCase): super(TestLinuxBridgeAgent, self).setUp() # disable setting up periodic state reporting cfg.CONF.set_override('report_interval', 0, 'AGENT') + cfg.CONF.set_override('prevent_arp_spoofing', False, 'AGENT') cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') From 8b13609edac2c136e1a0acbc05ad93059bb59fc1 Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Thu, 2 Jul 2015 11:35:18 +0300 Subject: [PATCH 49/54] Track allocation_pools in SubnetRequest To keep pluggable and non-pluggable ipam implementation consistent non-pluggable one has to be switched to track allocation_pools and gateway_ip using SubnetRequests. SubnetRequest requires allocation_pools to be list of IPRanges. Previously allocation_pools were tracked as list of dicts. So allocation_pools generating and validating was moved before SubnetRequest is created. Partially-Implements: blueprint neutron-ipam Change-Id: I8d2fec3013b302db202121f946b53a0610ae8321 --- neutron/db/db_base_plugin_common.py | 8 +--- neutron/db/ipam_backend_mixin.py | 40 ++++++++----------- neutron/db/ipam_non_pluggable_backend.py | 22 +++++++--- neutron/ipam/__init__.py | 4 +- .../unit/agent/test_securitygroups_rpc.py | 2 +- .../tests/unit/db/test_db_base_plugin_v2.py | 6 ++- 6 files changed, 45 insertions(+), 37 deletions(-) diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py index 29816ca39ac..1b795c0752c 100644 --- a/neutron/db/db_base_plugin_common.py +++ b/neutron/db/db_base_plugin_common.py @@ -229,6 +229,7 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): def _make_subnet_args(self, shared, detail, subnet, subnetpool_id=None): + gateway_ip = str(detail.gateway_ip) if detail.gateway_ip else None args = {'tenant_id': detail.tenant_id, 'id': detail.subnet_id, 'name': subnet['name'], @@ -237,7 +238,7 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): 'cidr': str(detail.subnet_cidr), 'subnetpool_id': subnetpool_id, 'enable_dhcp': subnet['enable_dhcp'], - 'gateway_ip': self._gateway_ip_str(subnet, detail.subnet_cidr), + 'gateway_ip': gateway_ip, 'shared': shared} if subnet['ip_version'] == 6 and subnet['enable_dhcp']: if attributes.is_attr_set(subnet['ipv6_ra_mode']): @@ -251,8 +252,3 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): return [{'subnet_id': ip["subnet_id"], 'ip_address': ip["ip_address"]} for ip in ips] - - def _gateway_ip_str(self, subnet, cidr_net): - if subnet.get('gateway_ip') is attributes.ATTR_NOT_SPECIFIED: - return str(cidr_net.network + 1) - return subnet.get('gateway_ip') diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index cb82b4ede6b..f50d160a3a4 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -46,6 +46,12 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): """ pass + @staticmethod + def _gateway_ip_str(subnet, cidr_net): + if subnet.get('gateway_ip') is attributes.ATTR_NOT_SPECIFIED: + return str(netaddr.IPNetwork(cidr_net).network + 1) + return subnet.get('gateway_ip') + def _validate_pools_with_subnetpool(self, subnet): """Verifies that allocation pools are set correctly @@ -169,18 +175,6 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): subnet.update(s) return subnet, changes - def _allocate_pools_for_subnet(self, context, subnet): - """Create IP allocation pools for a given subnet - - Pools are defined by the 'allocation_pools' attribute, - a list of dict objects with 'start' and 'end' keys for - defining the pool range. - """ - pools = ipam_utils.generate_pools(subnet['cidr'], subnet['gateway_ip']) - return [{'start': str(netaddr.IPAddress(pool.first)), - 'end': str(netaddr.IPAddress(pool.last))} - for pool in pools] - def _validate_subnet_cidr(self, context, network, new_subnet_cidr): """Validate the CIDR for a subnet. @@ -297,15 +291,17 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): pool_2=r_range, subnet_cidr=subnet_cidr) - def _prepare_allocation_pools(self, context, allocation_pools, subnet): + def _prepare_allocation_pools(self, allocation_pools, cidr, gateway_ip): + """Returns allocation pools represented as list of IPRanges""" if not attributes.is_attr_set(allocation_pools): - return self._allocate_pools_for_subnet(context, subnet) + return ipam_utils.generate_pools(cidr, gateway_ip) - self._validate_allocation_pools(allocation_pools, subnet['cidr']) - if subnet['gateway_ip']: - self._validate_gw_out_of_pools(subnet['gateway_ip'], + self._validate_allocation_pools(allocation_pools, cidr) + if gateway_ip: + self._validate_gw_out_of_pools(gateway_ip, allocation_pools) - return allocation_pools + return [netaddr.IPRange(p['start'], p['end']) + for p in allocation_pools] def _validate_gw_out_of_pools(self, gateway_ip, pools): for allocation_pool in pools: @@ -385,10 +381,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): subnet_args, dns_nameservers, host_routes, - allocation_pools): - allocation_pools = self._prepare_allocation_pools(context, - allocation_pools, - subnet_args) + subnet_request): self._validate_subnet_cidr(context, network, subnet_args['cidr']) self._validate_network_subnetpools(network, subnet_args['subnetpool_id'], @@ -410,6 +403,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): nexthop=rt['nexthop']) context.session.add(route) - self._save_allocation_pools(context, subnet, allocation_pools) + self._save_allocation_pools(context, subnet, + subnet_request.allocation_pools) return subnet diff --git a/neutron/db/ipam_non_pluggable_backend.py b/neutron/db/ipam_non_pluggable_backend.py index f97d603867d..45ba102f686 100644 --- a/neutron/db/ipam_non_pluggable_backend.py +++ b/neutron/db/ipam_non_pluggable_backend.py @@ -188,14 +188,16 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): def _save_allocation_pools(self, context, subnet, allocation_pools): for pool in allocation_pools: + first_ip = str(netaddr.IPAddress(pool.first, pool.version)) + last_ip = str(netaddr.IPAddress(pool.last, pool.version)) ip_pool = models_v2.IPAllocationPool(subnet=subnet, - first_ip=pool['start'], - last_ip=pool['end']) + first_ip=first_ip, + last_ip=last_ip) context.session.add(ip_pool) ip_range = models_v2.IPAvailabilityRange( ipallocationpool=ip_pool, - first_ip=pool['start'], - last_ip=pool['end']) + first_ip=first_ip, + last_ip=last_ip) context.session.add(ip_range) def _allocate_ips_for_port_and_store(self, context, port, port_id): @@ -474,6 +476,16 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): subnetpool = self._get_subnetpool(context, subnetpool_id) self._validate_ip_version_with_subnetpool(subnet, subnetpool) + # gateway_ip and allocation pools should be validated or generated + # only for specific request + if subnet['cidr'] is not attributes.ATTR_NOT_SPECIFIED: + subnet['gateway_ip'] = self._gateway_ip_str(subnet, + subnet['cidr']) + # allocation_pools are converted to list of IPRanges + subnet['allocation_pools'] = self._prepare_allocation_pools( + subnet['allocation_pools'], + subnet['cidr'], + subnet['gateway_ip']) subnet_request = ipam.SubnetRequestFactory.get_request(context, subnet, subnetpool) @@ -492,5 +504,5 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): subnetpool_id), subnet['dns_nameservers'], subnet['host_routes'], - subnet['allocation_pools']) + subnet_request) return subnet diff --git a/neutron/ipam/__init__.py b/neutron/ipam/__init__.py index 16c8151e358..7d45e235776 100644 --- a/neutron/ipam/__init__.py +++ b/neutron/ipam/__init__.py @@ -284,4 +284,6 @@ class SubnetRequestFactory(object): else: return SpecificSubnetRequest(subnet['tenant_id'], subnet_id, - cidr) + cidr, + subnet.get('gateway_ip'), + subnet.get('allocation_pools')) diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index 161560b403f..0ff86ceb212 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -43,7 +43,7 @@ FAKE_PREFIX = {const.IPv4: '10.0.0.0/24', const.IPv6: '2001:db8::/64'} FAKE_IP = {const.IPv4: '10.0.0.1', const.IPv6: 'fe80::1', - 'IPv6_GLOBAL': '2001:0db8::1', + 'IPv6_GLOBAL': '2001:db8::1', 'IPv6_LLA': 'fe80::123', 'IPv6_DHCP': '2001:db8::3'} diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 6fef2cbbd8f..1ffc269e79f 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -3178,6 +3178,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): set_context=False) def test_create_subnet_nonzero_cidr(self): + # Pass None as gateway_ip to prevent ip auto allocation for gw + # Previously gateway ip was allocated after validations, + # so no errors were raised if gw ip was out of range. with self.subnet(cidr='10.129.122.5/8') as v1,\ self.subnet(cidr='11.129.122.5/15') as v2,\ self.subnet(cidr='12.129.122.5/16') as v3,\ @@ -3185,7 +3188,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.subnet(cidr='14.129.122.5/22') as v5,\ self.subnet(cidr='15.129.122.5/24') as v6,\ self.subnet(cidr='16.129.122.5/28') as v7,\ - self.subnet(cidr='17.129.122.5/32', enable_dhcp=False) as v8: + self.subnet(cidr='17.129.122.5/32', gateway_ip=None, + enable_dhcp=False) as v8: subs = (v1, v2, v3, v4, v5, v6, v7, v8) # the API should accept and correct these for users self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8') From a863342caf7da9a1c0430549c1ea1e53408b34af Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Tue, 7 Jul 2015 14:25:06 +0000 Subject: [PATCH 50/54] Python3: cast the result of zip() to list The result of get_sorts was a 'zip object' in Python 3, and it was later used as a list, which fails. Just cast the result to a list to fix this issue. Change-Id: I12017f79cad92b1da4fe5f9939b38436db7219eb Blueprint: neutron-python3 --- neutron/api/api_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py index 778c40794da..595c592bd72 100644 --- a/neutron/api/api_common.py +++ b/neutron/api/api_common.py @@ -147,8 +147,8 @@ def get_sorts(request, attr_info): 'asc': constants.SORT_DIRECTION_ASC, 'desc': constants.SORT_DIRECTION_DESC}) raise exc.HTTPBadRequest(explanation=msg) - return zip(sort_keys, - [x == constants.SORT_DIRECTION_ASC for x in sort_dirs]) + return list(zip(sort_keys, + [x == constants.SORT_DIRECTION_ASC for x in sort_dirs])) def get_page_reverse(request): From 13b0f6f8e2fd1e84ff3580cd75bb879e18064da6 Mon Sep 17 00:00:00 2001 From: Carl Baldwin Date: Tue, 7 Jul 2015 16:41:03 +0000 Subject: [PATCH 51/54] Add IP_ANY dict to ease choosing between IPv4 and IPv6 "any" address I'm working on a new patch that will add one more case where we need to choose between 0.0.0.0/0 and ::/0 based on the ip version. I thought I'd add a new constant and simplify a couple of existing uses. Change-Id: I376d60c7de4bafcaf2387685ddcc1d98978ce446 --- neutron/agent/l3/ha_router.py | 4 +--- neutron/agent/linux/ip_lib.py | 4 +--- neutron/common/constants.py | 1 + 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/neutron/agent/l3/ha_router.py b/neutron/agent/l3/ha_router.py index b430f44e5a3..31a2c18dc58 100644 --- a/neutron/agent/l3/ha_router.py +++ b/neutron/agent/l3/ha_router.py @@ -191,9 +191,7 @@ class HaRouter(router.RouterInfo): for gw_ip in gateway_ips: # TODO(Carl) This is repeated everywhere. A method would # be nice. - default_gw = (n_consts.IPv4_ANY if - netaddr.IPAddress(gw_ip).version == 4 else - n_consts.IPv6_ANY) + default_gw = n_consts.IP_ANY[netaddr.IPAddress(gw_ip).version] instance = self._get_keepalived_instance() default_gw_rts.append(keepalived.KeepalivedVirtualRoute( default_gw, gw_ip, interface_name)) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index edb35d5a7d7..36d2b09523b 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -282,8 +282,6 @@ class IPRule(SubProcessBase): class IpRuleCommand(IpCommandBase): COMMAND = 'rule' - ALL = {4: constants.IPv4_ANY, 6: constants.IPv6_ANY} - def _parse_line(self, ip_version, line): # Typical rules from 'ip rule show': # 4030201: from 1.2.3.4/24 lookup 10203040 @@ -299,7 +297,7 @@ class IpRuleCommand(IpCommandBase): # Canonicalize some arguments if settings.get('from') == "all": - settings['from'] = self.ALL[ip_version] + settings['from'] = constants.IP_ANY[ip_version] if 'lookup' in settings: settings['table'] = settings.pop('lookup') diff --git a/neutron/common/constants.py b/neutron/common/constants.py index d935273e527..fc9c4b24633 100644 --- a/neutron/common/constants.py +++ b/neutron/common/constants.py @@ -74,6 +74,7 @@ IPv6_BITS = 128 IPv4_ANY = '0.0.0.0/0' IPv6_ANY = '::/0' +IP_ANY = {IP_VERSION_4: IPv4_ANY, IP_VERSION_6: IPv6_ANY} DHCP_RESPONSE_PORT = 68 From 5b6ca5ce898a2e9a810ec49a1712337a41822788 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Tue, 7 Jul 2015 11:13:41 -0700 Subject: [PATCH 52/54] Make sure path_prefix is set during unit tests Change 18bc67d5 broke *-aas unit tests. This change ensures that mocking is done correctly, the same way it is done for the other plugin attributes Change-Id: I4167f18560e3a3aad652aae1ea9d3c6bc34dc796 Closes-bug: #1472361 --- neutron/tests/unit/extensions/base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/neutron/tests/unit/extensions/base.py b/neutron/tests/unit/extensions/base.py index a4d0206918c..75ba95a8378 100644 --- a/neutron/tests/unit/extensions/base.py +++ b/neutron/tests/unit/extensions/base.py @@ -84,6 +84,7 @@ class ExtensionTestCase(testlib_api.WebTestCase): quota.QUOTAS._driver = None cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver', group='QUOTAS') + setattr(instance, 'path_prefix', resource_prefix) class ExtensionTestExtensionManager(object): def get_resources(self): From c28b6b0ef8606abea00eeea4fde96a4f646da952 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 7 Jul 2015 17:03:04 -0400 Subject: [PATCH 53/54] Remove lingering traces of q_ The rename from Quantum to Neutron left a few q_ strings around, let's go ahead and clean them up. Change-Id: I06e6bdbd0c2f3a25bb90b5fa291009b9ec2d471d --- neutron/common/utils.py | 14 ++++++------ neutron/db/dvr_mac_db.py | 4 ++-- neutron/db/l3_dvrscheduler_db.py | 12 +++++----- neutron/db/securitygroups_rpc_base.py | 22 +++++++++---------- neutron/plugins/brocade/NeutronPlugin.py | 10 ++++----- .../plugins/cisco/n1kv/n1kv_neutron_plugin.py | 4 ++-- .../agent/linuxbridge_neutron_agent.py | 4 ++-- .../mech_sriov/agent/sriov_nic_agent.py | 10 ++++----- .../openvswitch/agent/ovs_neutron_agent.py | 20 ++++++++--------- neutron/plugins/ml2/rpc.py | 10 ++++----- neutron/plugins/oneconvergence/plugin.py | 6 ++--- neutron/services/l3_router/l3_arista.py | 8 +++---- .../services/l3_router/l3_router_plugin.py | 6 ++--- neutron/services/l3_router/l3_sdnve.py | 4 ++-- neutron/tests/retargetable/client_fixtures.py | 4 ++-- .../tests/unit/db/test_db_base_plugin_v2.py | 8 +++---- neutron/tests/unit/dummy_plugin.py | 4 ++-- .../mech_sriov/agent/common/test_config.py | 8 +++---- .../openvswitch/agent/test_ovs_tunnel.py | 4 ++-- .../unit/scheduler/test_l3_agent_scheduler.py | 16 +++++++------- 20 files changed, 89 insertions(+), 89 deletions(-) diff --git a/neutron/common/utils.py b/neutron/common/utils.py index f4b286015a4..bd2dccdb0d2 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -37,7 +37,7 @@ from oslo_log import log as logging from oslo_utils import excutils import six -from neutron.common import constants as q_const +from neutron.common import constants as n_const TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" LOG = logging.getLogger(__name__) @@ -343,8 +343,8 @@ def is_dvr_serviced(device_owner): if they are required for DVR or any service directly or indirectly associated with DVR. """ - dvr_serviced_device_owners = (q_const.DEVICE_OWNER_LOADBALANCER, - q_const.DEVICE_OWNER_DHCP) + dvr_serviced_device_owners = (n_const.DEVICE_OWNER_LOADBALANCER, + n_const.DEVICE_OWNER_DHCP) return (device_owner.startswith('compute:') or device_owner in dvr_serviced_device_owners) @@ -396,15 +396,15 @@ def is_cidr_host(cidr): raise ValueError("cidr doesn't contain a '/'") net = netaddr.IPNetwork(cidr) if net.version == 4: - return net.prefixlen == q_const.IPv4_BITS - return net.prefixlen == q_const.IPv6_BITS + return net.prefixlen == n_const.IPv4_BITS + return net.prefixlen == n_const.IPv6_BITS def ip_version_from_int(ip_version_int): if ip_version_int == 4: - return q_const.IPv4 + return n_const.IPv4 if ip_version_int == 6: - return q_const.IPv6 + return n_const.IPv6 raise ValueError(_('Illegal IP version number')) diff --git a/neutron/db/dvr_mac_db.py b/neutron/db/dvr_mac_db.py index 951c45a9991..c0f0d656aa7 100644 --- a/neutron/db/dvr_mac_db.py +++ b/neutron/db/dvr_mac_db.py @@ -20,7 +20,7 @@ from oslo_log import log as logging import sqlalchemy as sa from sqlalchemy.orm import exc -from neutron.common import exceptions as q_exc +from neutron.common import exceptions as n_exc from neutron.common import utils from neutron.db import model_base from neutron.extensions import dvr as ext_dvr @@ -158,7 +158,7 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase): def get_subnet_for_dvr(self, context, subnet): try: subnet_info = self.plugin.get_subnet(context, subnet) - except q_exc.SubnetNotFound: + except n_exc.SubnetNotFound: return {} else: # retrieve the gateway port on this subnet diff --git a/neutron/db/l3_dvrscheduler_db.py b/neutron/db/l3_dvrscheduler_db.py index eeaf3220593..b170cfbf09a 100644 --- a/neutron/db/l3_dvrscheduler_db.py +++ b/neutron/db/l3_dvrscheduler_db.py @@ -25,7 +25,7 @@ from sqlalchemy.orm import joinedload from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources -from neutron.common import constants as q_const +from neutron.common import constants as n_const from neutron.common import utils as n_utils from neutron.db import agents_db from neutron.db import l3_agentschedulers_db as l3agent_sch_db @@ -104,7 +104,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): subnet = ip['subnet_id'] filter_sub = {'fixed_ips': {'subnet_id': [subnet]}, 'device_owner': - [q_const.DEVICE_OWNER_DVR_INTERFACE]} + [n_const.DEVICE_OWNER_DVR_INTERFACE]} router_id = None ports = self._core_plugin.get_ports(context, filters=filter_sub) for port in ports: @@ -126,7 +126,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): vm_subnet = fixedip['subnet_id'] filter_sub = {'fixed_ips': {'subnet_id': [vm_subnet]}, 'device_owner': - [q_const.DEVICE_OWNER_DVR_INTERFACE]} + [n_const.DEVICE_OWNER_DVR_INTERFACE]} subnet_ports = self._core_plugin.get_ports( context, filters=filter_sub) for subnet_port in subnet_ports: @@ -188,7 +188,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): continue filter_rtr = {'device_id': [router_id], 'device_owner': - [q_const.DEVICE_OWNER_DVR_INTERFACE]} + [n_const.DEVICE_OWNER_DVR_INTERFACE]} int_ports = self._core_plugin.get_ports( admin_context, filters=filter_rtr) for prt in int_ports: @@ -201,7 +201,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): dvr_binding['router_id'] = None dvr_binding.update(dvr_binding) agent = self._get_agent_by_type_and_host(context, - q_const.AGENT_TYPE_L3, + n_const.AGENT_TYPE_L3, port_host) info = {'router_id': router_id, 'host': port_host, 'agent_id': str(agent.id)} @@ -320,7 +320,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): def _get_active_l3_agent_routers_sync_data(self, context, host, agent, router_ids): - if n_utils.is_extension_supported(self, q_const.L3_HA_MODE_EXT_ALIAS): + if n_utils.is_extension_supported(self, n_const.L3_HA_MODE_EXT_ALIAS): return self.get_ha_sync_data_for_host(context, host, router_ids=router_ids, active=True) diff --git a/neutron/db/securitygroups_rpc_base.py b/neutron/db/securitygroups_rpc_base.py index 3e90c124b42..3be75c37865 100644 --- a/neutron/db/securitygroups_rpc_base.py +++ b/neutron/db/securitygroups_rpc_base.py @@ -17,7 +17,7 @@ import netaddr from oslo_log import log as logging from sqlalchemy.orm import exc -from neutron.common import constants as q_const +from neutron.common import constants as n_const from neutron.common import ipv6_utils as ipv6 from neutron.common import utils from neutron.db import allowedaddresspairs_db as addr_pair @@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__) DIRECTION_IP_PREFIX = {'ingress': 'source_ip_prefix', 'egress': 'dest_ip_prefix'} -DHCP_RULE_PORT = {4: (67, 68, q_const.IPv4), 6: (547, 546, q_const.IPv6)} +DHCP_RULE_PORT = {4: (67, 68, n_const.IPv4), 6: (547, 546, n_const.IPv6)} class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): @@ -161,12 +161,12 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): sg_provider_updated_networks = set() sec_groups = set() for port in ports: - if port['device_owner'] == q_const.DEVICE_OWNER_DHCP: + if port['device_owner'] == n_const.DEVICE_OWNER_DHCP: sg_provider_updated_networks.add( port['network_id']) # For IPv6, provider rule need to be updated in case router # interface is created or updated after VM port is created. - elif port['device_owner'] == q_const.DEVICE_OWNER_ROUTER_INTF: + elif port['device_owner'] == n_const.DEVICE_OWNER_ROUTER_INTF: if any(netaddr.IPAddress(fixed_ip['ip_address']).version == 6 for fixed_ip in port['fixed_ips']): sg_provider_updated_networks.add( @@ -319,7 +319,7 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): models_v2.IPAllocation.ip_address) query = query.join(models_v2.IPAllocation) query = query.filter(models_v2.Port.network_id.in_(network_ids)) - owner = q_const.DEVICE_OWNER_DHCP + owner = n_const.DEVICE_OWNER_DHCP query = query.filter(models_v2.Port.device_owner == owner) ips = {} @@ -329,7 +329,7 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): for mac_address, network_id, ip in query: if (netaddr.IPAddress(ip).version == 6 and not netaddr.IPAddress(ip).is_link_local()): - ip = str(ipv6.get_ipv6_addr_by_EUI64(q_const.IPV6_LLA_PREFIX, + ip = str(ipv6.get_ipv6_addr_by_EUI64(n_const.IPV6_LLA_PREFIX, mac_address)) if ip not in ips[network_id]: ips[network_id].append(ip) @@ -382,7 +382,7 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): query = query.filter( models_v2.IPAllocation.ip_address == subnet['gateway_ip']) query = query.filter( - models_v2.Port.device_owner.in_(q_const.ROUTER_INTERFACE_OWNERS)) + models_v2.Port.device_owner.in_(n_const.ROUTER_INTERFACE_OWNERS)) try: mac_address = query.one()[0] except (exc.NoResultFound, exc.MultipleResultsFound): @@ -390,7 +390,7 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): 'found for IPv6 RA'), subnet['id']) return lla_ip = str(ipv6.get_ipv6_addr_by_EUI64( - q_const.IPV6_LLA_PREFIX, + n_const.IPV6_LLA_PREFIX, mac_address)) return lla_ip @@ -442,10 +442,10 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): ra_ips = ips.get(port['network_id']) for ra_ip in ra_ips: ra_rule = {'direction': 'ingress', - 'ethertype': q_const.IPv6, - 'protocol': q_const.PROTO_NAME_ICMP_V6, + 'ethertype': n_const.IPv6, + 'protocol': n_const.PROTO_NAME_ICMP_V6, 'source_ip_prefix': ra_ip, - 'source_port_range_min': q_const.ICMPV6_TYPE_RA} + 'source_port_range_min': n_const.ICMPV6_TYPE_RA} port['security_group_rules'].append(ra_rule) def _apply_provider_rule(self, context, ports): diff --git a/neutron/plugins/brocade/NeutronPlugin.py b/neutron/plugins/brocade/NeutronPlugin.py index 8108d29e025..ea4a18ff388 100644 --- a/neutron/plugins/brocade/NeutronPlugin.py +++ b/neutron/plugins/brocade/NeutronPlugin.py @@ -31,7 +31,7 @@ from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import l3_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.api.rpc.handlers import securitygroups_rpc -from neutron.common import constants as q_const +from neutron.common import constants as n_const from neutron.common import rpc as n_rpc from neutron.common import topics from neutron.common import utils @@ -94,7 +94,7 @@ class BridgeRpcCallbacks(object): LOG.debug("Device %(device)s details requested from %(agent_id)s", {'device': device, 'agent_id': agent_id}) port = brocade_db.get_port(rpc_context, - device[len(q_const.TAP_DEVICE_PREFIX):]) + device[len(n_const.TAP_DEVICE_PREFIX):]) if port: entry = {'device': device, 'vlan_id': port.vlan_id, @@ -154,7 +154,7 @@ class SecurityGroupServerRpcMixin(sg_db_rpc.SecurityGroupServerRpcMixin): # Doing what other plugins are doing session = db.get_session() port = brocade_db.get_port_from_device( - session, device[len(q_const.TAP_DEVICE_PREFIX):]) + session, device[len(n_const.TAP_DEVICE_PREFIX):]) # TODO(shiv): need to extend the db model to include device owners # make it appears that the device owner is of type network @@ -267,10 +267,10 @@ class BrocadePluginV2(db_base_plugin_v2.NeutronDbPluginV2, # Consume from all consumers in threads self.conn.consume_in_threads() self.notifier = AgentNotifierApi(topics.AGENT) - self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + self.agent_notifiers[n_const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) - self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + self.agent_notifiers[n_const.AGENT_TYPE_L3] = ( l3_rpc_agent_api.L3AgentNotifyAPI() ) diff --git a/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py b/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py index 9d4e5bd683c..63d37b79c85 100644 --- a/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py +++ b/neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py @@ -13,7 +13,7 @@ # under the License. import eventlet -from oslo_config import cfg as q_conf +from oslo_config import cfg as o_conf from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils @@ -99,7 +99,7 @@ class N1kvNeutronPluginV2(db_base_plugin_v2.NeutronDbPluginV2, self._setup_vsm() self._setup_rpc() self.network_scheduler = importutils.import_object( - q_conf.CONF.network_scheduler_driver + o_conf.CONF.network_scheduler_driver ) self.start_periodic_dhcp_agent_status_check() diff --git a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py index b359dac62ec..8bf873c36c0 100644 --- a/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -41,7 +41,7 @@ from neutron.common import config as common_config from neutron.common import constants from neutron.common import exceptions from neutron.common import topics -from neutron.common import utils as q_utils +from neutron.common import utils as n_utils from neutron import context from neutron.i18n import _LE, _LI, _LW from neutron.plugins.common import constants as p_const @@ -1040,7 +1040,7 @@ def main(): common_config.setup_logging() try: - interface_mappings = q_utils.parse_mappings( + interface_mappings = n_utils.parse_mappings( cfg.CONF.LINUX_BRIDGE.physical_interface_mappings) except ValueError as e: LOG.error(_LE("Parsing physical_interface_mappings failed: %s. " diff --git a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py index 45124fd42c5..54ff293e8d4 100644 --- a/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py +++ b/neutron/plugins/ml2/drivers/mech_sriov/agent/sriov_nic_agent.py @@ -29,9 +29,9 @@ from oslo_service import loopingcall from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.common import config as common_config -from neutron.common import constants as q_constants +from neutron.common import constants as n_constants from neutron.common import topics -from neutron.common import utils as q_utils +from neutron.common import utils as n_utils from neutron import context from neutron.i18n import _LE, _LI from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config # noqa @@ -78,9 +78,9 @@ class SriovNicSwitchAgent(object): self.agent_state = { 'binary': 'neutron-sriov-nic-agent', 'host': cfg.CONF.host, - 'topic': q_constants.L2_AGENT_TOPIC, + 'topic': n_constants.L2_AGENT_TOPIC, 'configurations': configurations, - 'agent_type': q_constants.AGENT_TYPE_NIC_SWITCH, + 'agent_type': n_constants.AGENT_TYPE_NIC_SWITCH, 'start_flag': True} # Stores port update notifications for processing in the main loop @@ -297,7 +297,7 @@ class SriovNicAgentConfigParser(object): Parse and validate the consistency in both mappings """ - self.device_mappings = q_utils.parse_mappings( + self.device_mappings = n_utils.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings) self.exclude_devices = config.parse_exclude_devices( cfg.CONF.SRIOV_NIC.exclude_devices) diff --git a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py index 1f4a4119c68..4ca3423605e 100644 --- a/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py @@ -35,10 +35,10 @@ from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.handlers import dvr_rpc from neutron.common import config -from neutron.common import constants as q_const +from neutron.common import constants as n_const from neutron.common import exceptions from neutron.common import topics -from neutron.common import utils as q_utils +from neutron.common import utils as n_utils from neutron import context from neutron.i18n import _LE, _LI, _LW from neutron.plugins.common import constants as p_const @@ -192,7 +192,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.agent_state = { 'binary': 'neutron-openvswitch-agent', 'host': self.conf.host, - 'topic': q_const.L2_AGENT_TOPIC, + 'topic': n_const.L2_AGENT_TOPIC, 'configurations': {'bridge_mappings': bridge_mappings, 'tunnel_types': self.tunnel_types, 'tunneling_ip': local_ip, @@ -203,7 +203,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.enable_distributed_routing, 'log_agent_heartbeats': self.conf.AGENT.log_agent_heartbeats}, - 'agent_type': q_const.AGENT_TYPE_OVS, + 'agent_type': n_const.AGENT_TYPE_OVS, 'start_flag': True} if tunnel_types: @@ -472,7 +472,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, agent_ports, self._tunnel_port_lookup) def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): - if port_info == q_const.FLOODING_ENTRY: + if port_info == n_const.FLOODING_ENTRY: lvm.tun_ofports.add(ofport) br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, lvm.tun_ofports) @@ -486,7 +486,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, port_info.mac_address) def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): - if port_info == q_const.FLOODING_ENTRY: + if port_info == n_const.FLOODING_ENTRY: if ofport not in lvm.tun_ofports: LOG.debug("attempt to remove a non-existent port %s", ofport) return @@ -922,20 +922,20 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, The peer name can not exceed the maximum length allowed for a linux device. Longer names are hashed to help ensure uniqueness. """ - if len(prefix + name) <= q_const.DEVICE_NAME_MAX_LEN: + if len(prefix + name) <= n_const.DEVICE_NAME_MAX_LEN: return prefix + name # We can't just truncate because bridges may be distinguished # by an ident at the end. A hash over the name should be unique. # Leave part of the bridge name on for easier identification hashlen = 6 - namelen = q_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen + namelen = n_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen new_name = ('%(prefix)s%(truncated)s%(hash)s' % {'prefix': prefix, 'truncated': name[0:namelen], 'hash': hashlib.sha1(name).hexdigest()[0:hashlen]}) LOG.warning(_LW("Creating an interface named %(name)s exceeds the " "%(limit)d character limitation. It was shortened to " "%(new_name)s to fit."), - {'name': name, 'limit': q_const.DEVICE_NAME_MAX_LEN, + {'name': name, 'limit': n_const.DEVICE_NAME_MAX_LEN, 'new_name': new_name}) return new_name @@ -1646,7 +1646,7 @@ def create_agent_config_map(config): :returns: a map of agent configuration parameters """ try: - bridge_mappings = q_utils.parse_mappings(config.OVS.bridge_mappings) + bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings) except ValueError as e: raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py index eeccde6a0e9..4187da6864e 100644 --- a/neutron/plugins/ml2/rpc.py +++ b/neutron/plugins/ml2/rpc.py @@ -22,7 +22,7 @@ from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources -from neutron.common import constants as q_const +from neutron.common import constants as n_const from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.common import topics @@ -97,8 +97,8 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): return {'device': device} if (not host or host == port_context.host): - new_status = (q_const.PORT_STATUS_BUILD if port['admin_state_up'] - else q_const.PORT_STATUS_DOWN) + new_status = (n_const.PORT_STATUS_BUILD if port['admin_state_up'] + else n_const.PORT_STATUS_DOWN) if port['status'] != new_status: plugin.update_port_status(rpc_context, port_id, @@ -157,7 +157,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): try: port_exists = bool(plugin.update_port_status( - rpc_context, port_id, q_const.PORT_STATUS_DOWN, host)) + rpc_context, port_id, n_const.PORT_STATUS_DOWN, host)) except exc.StaleDataError: port_exists = False LOG.debug("delete_port and update_device_down are being executed " @@ -183,7 +183,7 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): return port_id = plugin.update_port_status(rpc_context, port_id, - q_const.PORT_STATUS_ACTIVE, + n_const.PORT_STATUS_ACTIVE, host) try: # NOTE(armax): it's best to remove all objects from the diff --git a/neutron/plugins/oneconvergence/plugin.py b/neutron/plugins/oneconvergence/plugin.py index 50b425c848a..f0295cb7701 100644 --- a/neutron/plugins/oneconvergence/plugin.py +++ b/neutron/plugins/oneconvergence/plugin.py @@ -27,7 +27,7 @@ from neutron.api.rpc.handlers import dhcp_rpc from neutron.api.rpc.handlers import l3_rpc from neutron.api.rpc.handlers import metadata_rpc from neutron.api.rpc.handlers import securitygroups_rpc -from neutron.common import constants as q_const +from neutron.common import constants as n_const from neutron.common import exceptions as nexception from neutron.common import rpc as n_rpc from neutron.common import topics @@ -150,10 +150,10 @@ class OneConvergencePluginV2(db_base_plugin_v2.NeutronDbPluginV2, svc_constants.L3_ROUTER_NAT: topics.L3PLUGIN} self.conn = n_rpc.create_connection(new=True) self.notifier = NVSDPluginV2AgentNotifierApi(topics.AGENT) - self.agent_notifiers[q_const.AGENT_TYPE_DHCP] = ( + self.agent_notifiers[n_const.AGENT_TYPE_DHCP] = ( dhcp_rpc_agent_api.DhcpAgentNotifyAPI() ) - self.agent_notifiers[q_const.AGENT_TYPE_L3] = ( + self.agent_notifiers[n_const.AGENT_TYPE_L3] = ( l3_rpc_agent_api.L3AgentNotifyAPI() ) self.endpoints = [securitygroups_rpc.SecurityGroupServerRpcCallback(), diff --git a/neutron/services/l3_router/l3_arista.py b/neutron/services/l3_router/l3_arista.py index 68353ee3a2b..ac5a1d41c5a 100644 --- a/neutron/services/l3_router/l3_arista.py +++ b/neutron/services/l3_router/l3_arista.py @@ -24,8 +24,8 @@ from oslo_utils import excutils from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import l3_rpc -from neutron.common import constants as q_const -from neutron.common import rpc as q_rpc +from neutron.common import constants as n_const +from neutron.common import rpc as n_rpc from neutron.common import topics from neutron import context as nctx from neutron.db import db_base_plugin_v2 @@ -65,9 +65,9 @@ class AristaL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2, def setup_rpc(self): # RPC support self.topic = topics.L3PLUGIN - self.conn = q_rpc.create_connection(new=True) + self.conn = n_rpc.create_connection(new=True) self.agent_notifiers.update( - {q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) + {n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) self.endpoints = [l3_rpc.L3RpcCallback()] self.conn.create_consumer(self.topic, self.endpoints, fanout=False) diff --git a/neutron/services/l3_router/l3_router_plugin.py b/neutron/services/l3_router/l3_router_plugin.py index 32c30bd4764..91f8ad9d03c 100644 --- a/neutron/services/l3_router/l3_router_plugin.py +++ b/neutron/services/l3_router/l3_router_plugin.py @@ -18,7 +18,7 @@ from oslo_utils import importutils from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.rpc.handlers import l3_rpc -from neutron.common import constants as q_const +from neutron.common import constants as n_const from neutron.common import log as neutron_log from neutron.common import rpc as n_rpc from neutron.common import topics @@ -68,7 +68,7 @@ class L3RouterPlugin(common_db_mixin.CommonDbMixin, self.topic = topics.L3PLUGIN self.conn = n_rpc.create_connection(new=True) self.agent_notifiers.update( - {q_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) + {n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()}) self.endpoints = [l3_rpc.L3RpcCallback()] self.conn.create_consumer(self.topic, self.endpoints, fanout=False) @@ -96,4 +96,4 @@ class L3RouterPlugin(common_db_mixin.CommonDbMixin, """ return super(L3RouterPlugin, self).create_floatingip( context, floatingip, - initial_status=q_const.FLOATINGIP_STATUS_DOWN) + initial_status=n_const.FLOATINGIP_STATUS_DOWN) diff --git a/neutron/services/l3_router/l3_sdnve.py b/neutron/services/l3_router/l3_sdnve.py index 912644bf8c4..6db745ec40c 100644 --- a/neutron/services/l3_router/l3_sdnve.py +++ b/neutron/services/l3_router/l3_sdnve.py @@ -19,7 +19,7 @@ from networking_ibm.sdnve.l3plugin import sdnve_l3driver from oslo_log import log as logging from oslo_utils import excutils -from neutron.common import constants as q_const +from neutron.common import constants as n_const from neutron.common import exceptions as n_exc from neutron.db import db_base_plugin_v2 from neutron.db import extraroute_db @@ -141,7 +141,7 @@ class SdnveL3ServicePlugin(db_base_plugin_v2.NeutronDbPluginV2, subnet = super(SdnveL3ServicePlugin, self).\ get_subnet(context, subnet_id) device_filter = {'device_id': [router_id], - 'device_owner': [q_const.DEVICE_OWNER_ROUTER_INTF], + 'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF], 'network_id': [subnet['network_id']]} ports = super(SdnveL3ServicePlugin, self).get_ports(context, filters=device_filter) diff --git a/neutron/tests/retargetable/client_fixtures.py b/neutron/tests/retargetable/client_fixtures.py index 1161842331e..102338eceeb 100644 --- a/neutron/tests/retargetable/client_fixtures.py +++ b/neutron/tests/retargetable/client_fixtures.py @@ -20,7 +20,7 @@ import abc import fixtures import six -from neutron.common import exceptions as q_exc +from neutron.common import exceptions as n_exc from neutron import context from neutron import manager from neutron.tests import base @@ -89,7 +89,7 @@ class PluginClientFixture(AbstractClientFixture): @property def NotFound(self): - return q_exc.NetworkNotFound + return n_exc.NetworkNotFound def create_network(self, **kwargs): # Supply defaults that are expected to be set by the api diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 6fef2cbbd8f..b09009cfeac 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -1074,13 +1074,13 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s # Admin request - must return both ports self._test_list_resources('port', [port1, port2]) # Tenant_1 request - must return single port - q_context = context.Context('', 'tenant_1') + n_context = context.Context('', 'tenant_1') self._test_list_resources('port', [port1], - neutron_context=q_context) + neutron_context=n_context) # Tenant_2 request - must return single port - q_context = context.Context('', 'tenant_2') + n_context = context.Context('', 'tenant_2') self._test_list_resources('port', [port2], - neutron_context=q_context) + neutron_context=n_context) def test_list_ports_with_sort_native(self): if self._skip_native_sorting: diff --git a/neutron/tests/unit/dummy_plugin.py b/neutron/tests/unit/dummy_plugin.py index 41a37231935..c658683d15b 100644 --- a/neutron/tests/unit/dummy_plugin.py +++ b/neutron/tests/unit/dummy_plugin.py @@ -70,8 +70,8 @@ class Dummy(object): @classmethod def get_resources(cls): """Returns Extended Resource for dummy management.""" - q_mgr = manager.NeutronManager.get_instance() - dummy_inst = q_mgr.get_service_plugins()['DUMMY'] + n_mgr = manager.NeutronManager.get_instance() + dummy_inst = n_mgr.get_service_plugins()['DUMMY'] controller = base.create_resource( COLLECTION_NAME, RESOURCE_NAME, dummy_inst, RESOURCE_ATTRIBUTE_MAP[COLLECTION_NAME]) diff --git a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py index 7b6d473f491..d321f79c1ed 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/common/test_config.py @@ -16,7 +16,7 @@ from oslo_config import cfg -from neutron.common import utils as q_utils +from neutron.common import utils as n_utils from neutron.plugins.ml2.drivers.mech_sriov.agent.common import config from neutron.plugins.ml2.drivers.mech_sriov.agent \ import sriov_nic_agent as agent @@ -61,7 +61,7 @@ class TestSriovAgentConfig(base.BaseTestCase): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_LIST, 'SRIOV_NIC') - device_mappings = q_utils.parse_mappings( + device_mappings = n_utils.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings) self.assertEqual(device_mappings, self.DEVICE_MAPPING) @@ -69,14 +69,14 @@ class TestSriovAgentConfig(base.BaseTestCase): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_WITH_ERROR_LIST, 'SRIOV_NIC') - self.assertRaises(ValueError, q_utils.parse_mappings, + self.assertRaises(ValueError, n_utils.parse_mappings, cfg.CONF.SRIOV_NIC.physical_device_mappings) def test_device_mappings_with_spaces(self): cfg.CONF.set_override('physical_device_mappings', self.DEVICE_MAPPING_WITH_SPACES_LIST, 'SRIOV_NIC') - device_mappings = q_utils.parse_mappings( + device_mappings = n_utils.parse_mappings( cfg.CONF.SRIOV_NIC.physical_device_mappings) self.assertEqual(device_mappings, self.DEVICE_MAPPING) diff --git a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py index e0c8df7ef66..5daad999843 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py +++ b/neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py @@ -517,13 +517,13 @@ class TunnelTest(object): process_network_ports.side_effect = [ False, Exception('Fake exception to get out of the loop')] - q_agent = self._build_agent() + n_agent = self._build_agent() # Hack to test loop # We start method and expect it will raise after 2nd loop # If something goes wrong, assert_has_calls below will catch it try: - q_agent.daemon_loop() + n_agent.daemon_loop() except Exception: pass diff --git a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py index fc55120fd95..a0d22f7c826 100644 --- a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py @@ -27,7 +27,7 @@ from oslo_utils import timeutils from sqlalchemy.orm import query from neutron.common import constants -from neutron import context as q_context +from neutron import context as n_context from neutron.db import agents_db from neutron.db import common_db_mixin from neutron.db import db_base_plugin_v2 as db_v2 @@ -770,7 +770,7 @@ class L3SchedulerTestCaseMixin(l3_agentschedulers_db.L3AgentSchedulerDbMixin, super(L3SchedulerTestCaseMixin, self).setUp(plugin=plugin_str, ext_mgr=ext_mgr) - self.adminContext = q_context.get_admin_context() + self.adminContext = n_context.get_admin_context() self.plugin = manager.NeutronManager.get_plugin() self.plugin.router_scheduler = importutils.import_object( 'neutron.scheduler.l3_agent_scheduler.ChanceScheduler' @@ -899,7 +899,7 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin' self.setup_coreplugin(plugin) super(L3DvrSchedulerTestCase, self).setUp() - self.adminContext = q_context.get_admin_context() + self.adminContext = n_context.get_admin_context() self.dut = L3DvrScheduler() def test__notify_port_delete(self): @@ -1108,7 +1108,7 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): def test_dvr_deletens_if_no_port_no_routers(self): # Delete a vm port, the port subnet has no router interface. vm_tenant_id = 'tenant-1' - my_context = q_context.Context('user-1', vm_tenant_id, is_admin=False) + my_context = n_context.Context('user-1', vm_tenant_id, is_admin=False) vm_port_host = 'compute-node-1' vm_port = self._create_port( @@ -1139,7 +1139,7 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): # A VM port is deleted, but the router can't be unscheduled from the # compute node because there is another VM port present. vm_tenant_id = 'tenant-1' - my_context = q_context.Context('user-1', vm_tenant_id, is_admin=False) + my_context = n_context.Context('user-1', vm_tenant_id, is_admin=False) shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', vm_port_host = 'compute-node-1' @@ -1199,7 +1199,7 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): self.host = host self.agent_type = agent_type - my_context = q_context.Context('user-1', vm_tenant, is_admin=False) + my_context = n_context.Context('user-1', vm_tenant, is_admin=False) shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0', vm_port_host = 'compute-node-1' @@ -1417,7 +1417,7 @@ class L3HATestCaseMixin(testlib_api.SqlTestCase, def setUp(self): super(L3HATestCaseMixin, self).setUp() - self.adminContext = q_context.get_admin_context() + self.adminContext = n_context.get_admin_context() self.plugin = L3HAPlugin() self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') @@ -1732,7 +1732,7 @@ class TestGetL3AgentsWithAgentModeFilter(testlib_api.SqlTestCase, super(TestGetL3AgentsWithAgentModeFilter, self).setUp() self.plugin = L3HAPlugin() self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin') - self.adminContext = q_context.get_admin_context() + self.adminContext = n_context.get_admin_context() hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5'] agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy'] for host, agent_mode in zip(hosts, agent_modes): From a0a022373b90835059b8949a57b097030bcbc37e Mon Sep 17 00:00:00 2001 From: John Davidge Date: Tue, 7 Jul 2015 17:00:01 +0100 Subject: [PATCH 54/54] Fix issues with allocation pool generation for ::/64 cidr Passing a ::/64 cidr to certain netaddr functions without specifying the ip_version causes errors. Fix this by specifying ip_version. Change-Id: I31aaf9f5dabe4dd0845507f245387cd4186c410c Closes-Bug: 1472304 --- neutron/ipam/utils.py | 13 ++++++++----- neutron/tests/unit/ipam/test_utils.py | 7 +++++++ 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/neutron/ipam/utils.py b/neutron/ipam/utils.py index 1cb894fc808..6a4d96e8715 100644 --- a/neutron/ipam/utils.py +++ b/neutron/ipam/utils.py @@ -43,16 +43,19 @@ def generate_pools(cidr, gateway_ip): """ # Auto allocate the pool around gateway_ip net = netaddr.IPNetwork(cidr) - if net.first == net.last: + ip_version = net.version + first = netaddr.IPAddress(net.first, ip_version) + last = netaddr.IPAddress(net.last, ip_version) + if first == last: # handle single address subnet case - return [netaddr.IPRange(net.first, net.last)] - first_ip = net.first + 1 + return [netaddr.IPRange(first, last)] + first_ip = first + 1 # last address is broadcast in v4 - last_ip = net.last - (net.version == 4) + last_ip = last - (ip_version == 4) if first_ip >= last_ip: # /31 lands here return [] ipset = netaddr.IPSet(netaddr.IPRange(first_ip, last_ip)) if gateway_ip: - ipset.remove(netaddr.IPAddress(gateway_ip)) + ipset.remove(netaddr.IPAddress(gateway_ip, ip_version)) return list(ipset.iter_ipranges()) diff --git a/neutron/tests/unit/ipam/test_utils.py b/neutron/tests/unit/ipam/test_utils.py index 6cfb50fe8a3..db2ee9c5135 100644 --- a/neutron/tests/unit/ipam/test_utils.py +++ b/neutron/tests/unit/ipam/test_utils.py @@ -80,3 +80,10 @@ class TestIpamUtils(base.BaseTestCase): cidr = 'F111::0/64' expected = [netaddr.IPRange('F111::1', 'F111::FFFF:FFFF:FFFF:FFFF')] self.assertEqual(expected, utils.generate_pools(cidr, None)) + + def test_generate_pools_v6_empty(self): + # We want to be sure the range will begin and end with an IPv6 + # address, even if an ambiguous ::/64 cidr is given. + cidr = '::/64' + expected = [netaddr.IPRange('::1', '::FFFF:FFFF:FFFF:FFFF')] + self.assertEqual(expected, utils.generate_pools(cidr, None))