diff --git a/neutron/db/l3_dvr_db.py b/neutron/db/l3_dvr_db.py index b8da57d3f36..d7b6a52e39d 100644 --- a/neutron/db/l3_dvr_db.py +++ b/neutron/db/l3_dvr_db.py @@ -744,32 +744,54 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, return (n_utils.is_dvr_serviced(port_dict['device_owner']) and port_dict['fixed_ips']) + def _get_allowed_address_pair_fixed_ips(self, port_dict): + """Returns all fixed_ips associated with the allowed_address_pair.""" + aa_pair_fixed_ips = [] + if port_dict.get('allowed_address_pairs'): + for address_pair in port_dict['allowed_address_pairs']: + aap_ip_cidr = address_pair['ip_address'].split("/") + if len(aap_ip_cidr) == 1 or int(aap_ip_cidr[1]) == 32: + aa_pair_fixed_ips.append(aap_ip_cidr[0]) + return aa_pair_fixed_ips + def update_arp_entry_for_dvr_service_port(self, context, port_dict): """Notify L3 agents of ARP table entry for dvr service port. When a dvr service port goes up, look for the DVR router on the port's subnet, and send the ARP details to all L3 agents hosting the router to add it. + If there are any allowed_address_pairs associated with the port + those fixed_ips should also be updated in the ARP table. """ if not self._should_update_arp_entry_for_dvr_service_port(port_dict): return - changed_fixed_ips = port_dict['fixed_ips'] + fixed_ips = port_dict['fixed_ips'] + allowed_address_pair_fixed_ips = ( + self._get_allowed_address_pair_fixed_ips(port_dict)) + changed_fixed_ips = fixed_ips + allowed_address_pair_fixed_ips for fixed_ip in changed_fixed_ips: self._generate_arp_table_and_notify_agent( context, fixed_ip, port_dict['mac_address'], self.l3_rpc_notifier.add_arp_entry) - def delete_arp_entry_for_dvr_service_port(self, context, port_dict): + def delete_arp_entry_for_dvr_service_port( + self, context, port_dict, fixed_ips_to_delete=None): """Notify L3 agents of ARP table entry for dvr service port. When a dvr service port goes down, look for the DVR router on the port's subnet, and send the ARP details to all L3 agents hosting the router to delete it. + If there are any allowed_address_pairs associated with the + port, those fixed_ips should be removed from the ARP table. """ if not self._should_update_arp_entry_for_dvr_service_port(port_dict): return - changed_fixed_ips = port_dict['fixed_ips'] - for fixed_ip in changed_fixed_ips: + if not fixed_ips_to_delete: + fixed_ips = port_dict['fixed_ips'] + allowed_address_pair_fixed_ips = ( + self._get_allowed_address_pair_fixed_ips(port_dict)) + fixed_ips_to_delete = fixed_ips + allowed_address_pair_fixed_ips + for fixed_ip in fixed_ips_to_delete: self._generate_arp_table_and_notify_agent( context, fixed_ip, port_dict['mac_address'], self.l3_rpc_notifier.del_arp_entry) @@ -853,6 +875,73 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin, floating_ip = self._delete_floatingip(context, id) self._notify_floating_ip_change(context, floating_ip) + def _get_address_pair_active_port_with_fip( + self, context, port_dict, port_addr_pair_ip): + port_valid_state = (port_dict['admin_state_up'] or + (port_dict['status'] == l3_const.PORT_STATUS_ACTIVE)) + if not port_valid_state: + return + query = context.session.query(l3_db.FloatingIP).filter( + l3_db.FloatingIP.fixed_ip_address == port_addr_pair_ip) + fip = query.first() + return self._core_plugin.get_port( + context, fip.fixed_port_id) if fip else None + + def update_unbound_allowed_address_pair_port_binding( + self, context, service_port_dict, port_address_pairs): + """Update allowed address pair port with host and device_owner + + This function sets the host and device_owner to the port + associated with the port_addr_pair_ip with the port_dict's + host and device_owner. + """ + port_addr_pair_ip = port_address_pairs['ip_address'] + address_pair_port = self._get_address_pair_active_port_with_fip( + context, service_port_dict, port_addr_pair_ip) + if address_pair_port: + host = service_port_dict[portbindings.HOST_ID] + dev_owner = service_port_dict['device_owner'] + address_pair_dev_owner = address_pair_port.get('device_owner') + # If the allowed_address_pair port already has an associated + # device owner, and if the device_owner is a dvr serviceable + # port, then don't update the device_owner. + port_profile = address_pair_port.get(portbindings.PROFILE, {}) + if n_utils.is_dvr_serviced(address_pair_dev_owner): + port_profile['original_owner'] = address_pair_dev_owner + port_data = {portbindings.HOST_ID: host, + portbindings.PROFILE: port_profile} + else: + port_data = {portbindings.HOST_ID: host, + 'device_owner': dev_owner} + update_port = self._core_plugin.update_port( + context, address_pair_port['id'], {'port': port_data}) + return update_port + + def remove_unbound_allowed_address_pair_port_binding( + self, context, service_port_dict, port_address_pairs): + """Remove allowed address pair port binding and device_owner + + This function clears the host and device_owner associated with + the port_addr_pair_ip. + """ + port_addr_pair_ip = port_address_pairs['ip_address'] + address_pair_port = self._get_address_pair_active_port_with_fip( + context, service_port_dict, port_addr_pair_ip) + if address_pair_port: + # Before reverting the changes, fetch the original + # device owner saved in profile and update the port + port_profile = address_pair_port.get(portbindings.PROFILE) + orig_device_owner = "" + if port_profile: + orig_device_owner = port_profile.get('original_owner') + del port_profile['original_owner'] + port_data = {portbindings.HOST_ID: "", + 'device_owner': orig_device_owner, + portbindings.PROFILE: port_profile} + update_port = self._core_plugin.update_port( + context, address_pair_port['id'], {'port': port_data}) + return update_port + def is_distributed_router(router): """Return True if router to be handled is distributed.""" diff --git a/neutron/db/l3_dvrscheduler_db.py b/neutron/db/l3_dvrscheduler_db.py index 4772164bfa5..bda5b3f42df 100644 --- a/neutron/db/l3_dvrscheduler_db.py +++ b/neutron/db/l3_dvrscheduler_db.py @@ -339,6 +339,30 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin): return query.first() is not None +def _dvr_handle_unbound_allowed_addr_pair_add( + plugin, context, port, allowed_address_pair): + updated_port = plugin.update_unbound_allowed_address_pair_port_binding( + context, port, allowed_address_pair) + if updated_port: + LOG.debug("Allowed address pair port binding updated " + "based on service port binding: %s", updated_port) + plugin.dvr_handle_new_service_port(context, updated_port) + plugin.update_arp_entry_for_dvr_service_port(context, port) + + +def _dvr_handle_unbound_allowed_addr_pair_del( + plugin, context, port, allowed_address_pair): + updated_port = plugin.remove_unbound_allowed_address_pair_port_binding( + context, port, allowed_address_pair) + if updated_port: + LOG.debug("Allowed address pair port binding removed " + "from service port binding: %s", updated_port) + aa_fixed_ips = plugin._get_allowed_address_pair_fixed_ips(port) + if aa_fixed_ips: + plugin.delete_arp_entry_for_dvr_service_port( + context, port, fixed_ips_to_delete=aa_fixed_ips) + + def _notify_l3_agent_new_port(resource, event, trigger, **kwargs): LOG.debug('Received %(resource)s %(event)s', { 'resource': resource, @@ -360,6 +384,13 @@ def _notify_port_delete(event, resource, trigger, **kwargs): port = kwargs['port'] l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) + if port: + port_host = port.get(portbindings.HOST_ID) + allowed_address_pairs_list = port.get('allowed_address_pairs') + if allowed_address_pairs_list and port_host: + for address_pair in allowed_address_pairs_list: + _dvr_handle_unbound_allowed_addr_pair_del( + l3plugin, context, port, address_pair) l3plugin.delete_arp_entry_for_dvr_service_port(context, port) removed_routers = l3plugin.get_dvr_routers_to_remove(context, port) for info in removed_routers: @@ -399,10 +430,6 @@ def _notify_l3_agent_port_update(resource, event, trigger, **kwargs): event, resource, trigger, **removed_router_args) if not is_new_device_dvr_serviced: return - is_fixed_ips_changed = ( - 'fixed_ips' in new_port and - 'fixed_ips' in original_port and - new_port['fixed_ips'] != original_port['fixed_ips']) is_new_port_binding_changed = ( new_port[portbindings.HOST_ID] and (original_port[portbindings.HOST_ID] != @@ -420,7 +447,38 @@ def _notify_l3_agent_port_update(resource, event, trigger, **kwargs): dest_host=dest_host) l3plugin.update_arp_entry_for_dvr_service_port( context, new_port) - elif kwargs.get('mac_address_updated') or is_fixed_ips_changed: + return + # Check for allowed_address_pairs and port state + new_port_host = new_port.get(portbindings.HOST_ID) + allowed_address_pairs_list = new_port.get('allowed_address_pairs') + if allowed_address_pairs_list and new_port_host: + new_port_state = new_port.get('admin_state_up') + original_port_state = original_port.get('admin_state_up') + if new_port_state and not original_port_state: + # Case were we activate the port from inactive state. + for address_pair in allowed_address_pairs_list: + _dvr_handle_unbound_allowed_addr_pair_add( + l3plugin, context, new_port, address_pair) + return + elif original_port_state and not new_port_state: + # Case were we deactivate the port from active state. + for address_pair in allowed_address_pairs_list: + _dvr_handle_unbound_allowed_addr_pair_del( + l3plugin, context, original_port, address_pair) + return + elif new_port_state and original_port_state: + # Case were the same port has additional address_pairs + # added. + for address_pair in allowed_address_pairs_list: + _dvr_handle_unbound_allowed_addr_pair_add( + l3plugin, context, new_port, address_pair) + return + + is_fixed_ips_changed = ( + 'fixed_ips' in new_port and + 'fixed_ips' in original_port and + new_port['fixed_ips'] != original_port['fixed_ips']) + if kwargs.get('mac_address_updated') or is_fixed_ips_changed: l3plugin.update_arp_entry_for_dvr_service_port( context, new_port) diff --git a/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py index 170d38132c8..66a0371ce74 100644 --- a/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py +++ b/neutron/tests/functional/services/l3_router/test_l3_dvr_router_plugin.py @@ -453,6 +453,144 @@ class L3DvrTestCase(ml2_test_base.ML2TestFramework): self.assertEqual(1, len(snat_router_intfs[router1['id']])) self.assertEqual(1, len(fixed_ips)) + def test_update_service_port_with_allowed_address_pairs(self): + HOST1 = 'host1' + helpers.register_l3_agent( + host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR) + router = self._create_router() + private_net1 = self._make_network(self.fmt, 'net1', True) + test_allocation_pools = [{'start': '10.1.0.2', + 'end': '10.1.0.20'}] + fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}] + kwargs = {'arg_list': (external_net.EXTERNAL,), + external_net.EXTERNAL: True} + ext_net = self._make_network(self.fmt, '', True, **kwargs) + self._make_subnet( + self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24', + ip_version=4, enable_dhcp=True) + # Set gateway to router + self.l3_plugin._update_router_gw_info( + self.context, router['id'], + {'network_id': ext_net['network']['id']}) + private_subnet1 = self._make_subnet( + self.fmt, + private_net1, + '10.1.0.1', + cidr='10.1.0.0/24', + ip_version=4, + allocation_pools=test_allocation_pools, + enable_dhcp=True) + vrrp_port = self._make_port( + self.fmt, + private_net1['network']['id'], + device_owner=constants.DEVICE_OWNER_LOADBALANCER, + fixed_ips=fixed_vrrp_ip) + allowed_address_pairs = [ + {'ip_address': '10.1.0.201', + 'mac_address': vrrp_port['port']['mac_address']}] + with self.port( + subnet=private_subnet1, + device_owner=DEVICE_OWNER_COMPUTE) as int_port: + self.l3_plugin.add_router_interface( + self.context, router['id'], + {'subnet_id': private_subnet1['subnet']['id']}) + with mock.patch.object(self.l3_plugin, + '_l3_rpc_notifier') as l3_notifier: + self.core_plugin.update_port( + self.context, int_port['port']['id'], + {'port': {portbindings.HOST_ID: HOST1}}) + + l3_notifier.routers_updated_on_host.assert_called_once_with( + self.context, {router['id']}, HOST1) + + floating_ip = {'floating_network_id': ext_net['network']['id'], + 'router_id': router['id'], + 'port_id': vrrp_port['port']['id'], + 'tenant_id': vrrp_port['port']['tenant_id']} + floating_ip = self.l3_plugin.create_floatingip( + self.context, {'floatingip': floating_ip}) + + vrrp_port_db = self.core_plugin.get_port( + self.context, vrrp_port['port']['id']) + self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1) + # Now update the VM port with the allowed_address_pair + cur_int_port = self.core_plugin.update_port( + self.context, int_port['port']['id'], + {'port': { + 'allowed_address_pairs': allowed_address_pairs}}) + cur_vrrp_port_db = self.core_plugin.get_port( + self.context, vrrp_port['port']['id']) + # Check to make sure that we are not chaning the existing + # device_owner for the allowed_address_pair port. + self.assertEqual( + cur_vrrp_port_db['device_owner'], + constants.DEVICE_OWNER_LOADBALANCER) + self.assertEqual(cur_vrrp_port_db[portbindings.HOST_ID], HOST1) + self.assertTrue(cur_vrrp_port_db.get(portbindings.PROFILE)) + port_profile = cur_vrrp_port_db.get(portbindings.PROFILE) + self.assertTrue(port_profile) + self.assertEqual(port_profile['original_owner'], + constants.DEVICE_OWNER_LOADBALANCER) + # Now change the compute port admin_state_up from True to + # False, and see if the vrrp ports device_owner and binding + # inheritence reverts back to normal + mod_int_port = self.core_plugin.update_port( + self.context, cur_int_port['id'], + {'port': { + 'admin_state_up': False}}) + self.assertFalse(mod_int_port['admin_state_up']) + new_vrrp_port_db = self.core_plugin.get_port( + self.context, cur_vrrp_port_db['id']) + new_port_profile = new_vrrp_port_db.get(portbindings.PROFILE) + self.assertEqual({}, new_port_profile) + self.assertNotEqual( + new_vrrp_port_db[portbindings.HOST_ID], HOST1) + # Now change the compute port admin_state_up from False to + # True, and see if the vrrp ports device_owner and binding + # inherits from the associated parent compute port. + new_mod_int_port = self.core_plugin.update_port( + self.context, mod_int_port['id'], + {'port': { + 'admin_state_up': True}}) + self.assertTrue(new_mod_int_port['admin_state_up']) + cur_new_vrrp_port_db = self.core_plugin.get_port( + self.context, new_vrrp_port_db['id']) + self.assertNotEqual( + cur_new_vrrp_port_db['device_owner'], DEVICE_OWNER_COMPUTE) + self.assertEqual( + cur_new_vrrp_port_db[portbindings.HOST_ID], HOST1) + # Now let us try to remove vrrp_port device_owner and see + # how it inherits from the compute port. + updated_vrrp_port = self.core_plugin.update_port( + self.context, cur_new_vrrp_port_db['id'], + {'port': {'device_owner': "", + portbindings.PROFILE: {'original_owner': ""}}}) + updated_vm_port = self.core_plugin.update_port( + self.context, new_mod_int_port['id'], + {'port': { + 'admin_state_up': False}}) + self.assertFalse(updated_vm_port['admin_state_up']) + # This port admin_state down should not cause any issue + # with the existing vrrp port device_owner, but should + # only change the port_binding HOST_ID. + cur_new_vrrp_port_db = self.core_plugin.get_port( + self.context, updated_vrrp_port['id']) + self.assertEqual( + "", cur_new_vrrp_port_db['device_owner']) + self.assertEqual( + "", cur_new_vrrp_port_db[portbindings.HOST_ID]) + updated_vm_port = self.core_plugin.update_port( + self.context, new_mod_int_port['id'], + {'port': { + 'admin_state_up': True}}) + self.assertTrue(updated_vm_port['admin_state_up']) + updated_vrrp_port_db = self.core_plugin.get_port( + self.context, new_vrrp_port_db['id']) + self.assertEqual( + updated_vrrp_port_db['device_owner'], DEVICE_OWNER_COMPUTE) + self.assertEqual( + updated_vrrp_port_db[portbindings.HOST_ID], HOST1) + def test_update_vm_port_host_router_update(self): # register l3 agents in dvr mode in addition to existing dvr_snat agent HOST1 = 'host1' diff --git a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py index b1824ebfc65..2c8206f565d 100644 --- a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py @@ -870,6 +870,103 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): self.adminContext = n_context.get_admin_context() self.dut = L3DvrScheduler() + def test__notify_l3_agent_update_port_with_allowed_address_pairs_revert( + self): + port_id = str(uuid.uuid4()) + kwargs = { + 'context': self.adminContext, + 'port': { + 'id': port_id, + 'admin_state_up': False, + portbindings.HOST_ID: 'vm-host', + 'device_id': 'vm-id', + 'allowed_address_pairs': [ + {'ip_address': '10.1.0.201', + 'mac_address': 'aa:bb:cc:dd:ee:ff'}], + 'device_owner': DEVICE_OWNER_COMPUTE, + }, + 'original_port': { + 'id': port_id, + 'admin_state_up': True, + portbindings.HOST_ID: 'vm-host', + 'device_id': 'vm-id', + 'allowed_address_pairs': [ + {'ip_address': '10.1.0.201', + 'mac_address': 'aa:bb:cc:dd:ee:ff'}], + 'device_owner': DEVICE_OWNER_COMPUTE, + }, + } + port = kwargs.get('original_port') + port_addr_pairs = port['allowed_address_pairs'] + l3plugin = mock.Mock() + + with mock.patch.object(manager.NeutronManager, + 'get_service_plugins', + return_value={'L3_ROUTER_NAT': l3plugin}): + l3_dvrscheduler_db._notify_l3_agent_port_update( + 'port', 'after_update', mock.ANY, **kwargs) + l3plugin._get_allowed_address_pair_fixed_ips.return_value = ( + ['10.1.0.21']) + self.assertTrue( + l3plugin.remove_unbound_allowed_address_pair_port_binding. + called) + l3plugin.remove_unbound_allowed_address_pair_port_binding.\ + assert_called_once_with( + self.adminContext, + port, + port_addr_pairs[0]) + self.assertFalse( + l3plugin.update_arp_entry_for_dvr_service_port.called) + l3plugin.delete_arp_entry_for_dvr_service_port.\ + assert_called_once_with( + self.adminContext, + port, + fixed_ips_to_delete=mock.ANY) + self.assertFalse(l3plugin.dvr_handle_new_service_port.called) + + def test__notify_l3_agent_update_port_with_allowed_address_pairs(self): + port_id = str(uuid.uuid4()) + kwargs = { + 'context': self.adminContext, + 'port': { + 'id': port_id, + portbindings.HOST_ID: 'vm-host', + 'allowed_address_pairs': [ + {'ip_address': '10.1.0.201', + 'mac_address': 'aa:bb:cc:dd:ee:ff'}], + 'device_id': 'vm-id', + 'device_owner': DEVICE_OWNER_COMPUTE, + 'admin_state_up': True, + }, + 'original_port': { + 'id': port_id, + portbindings.HOST_ID: 'vm-host', + 'device_id': 'vm-id', + 'device_owner': DEVICE_OWNER_COMPUTE, + 'admin_state_up': True, + }, + } + port = kwargs.get('port') + port_addr_pairs = port['allowed_address_pairs'] + l3plugin = mock.Mock() + + with mock.patch.object(manager.NeutronManager, + 'get_service_plugins', + return_value={'L3_ROUTER_NAT': l3plugin}): + l3_dvrscheduler_db._notify_l3_agent_port_update( + 'port', 'after_update', mock.ANY, **kwargs) + self.assertTrue( + l3plugin.update_unbound_allowed_address_pair_port_binding. + called) + l3plugin.update_unbound_allowed_address_pair_port_binding.\ + assert_called_once_with( + self.adminContext, + port, + port_addr_pairs[0]) + self.assertTrue( + l3plugin.update_arp_entry_for_dvr_service_port.called) + self.assertTrue(l3plugin.dvr_handle_new_service_port.called) + def test__notify_l3_agent_update_port_no_removing_routers(self): port_id = 'fake-port' kwargs = { @@ -1106,12 +1203,22 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] + port = { + 'id': str(uuid.uuid4()), + 'device_id': 'abcd', + 'device_owner': DEVICE_OWNER_COMPUTE_NOVA, + portbindings.HOST_ID: 'host1', + } + with mock.patch.object(manager.NeutronManager, 'get_service_plugins', return_value={'L3_ROUTER_NAT': l3plugin}): kwargs = { 'context': self.adminContext, - 'port': mock.ANY, + 'port': port, + 'removed_routers': [ + {'agent_id': 'foo_agent', 'router_id': 'foo_id'}, + ], } removed_routers = [{'agent_id': 'foo_agent', 'router_id': 'foo_id',