DVR: Server side patch to schedule an unbound port with Floating IP

Unbound ports that are associated with a Floating IP and connected to
DVR Routers will not be serviced by the DVR Routers, unless we bind it
to a valid host.

This server side patch allows the neutron server to schedule the
unbound port Floating IP on the network node or the node with dvr_snat
agent where the SNAT functionality resides.

The DNAT rules for the unbound ports will be configured in the SNAT
namespace on the network node.

Related-Bug: #1583694
Change-Id: I05d0bfb3fa275b1e4e479928000cf8494da858f6
This commit is contained in:
Swaminathan Vasudevan 2016-05-24 14:03:39 -07:00
parent 1a11bc9605
commit cced31c6b9
6 changed files with 195 additions and 364 deletions

View File

@ -28,6 +28,7 @@ HA_ROUTER_STATE_KEY = '_ha_state'
METERING_LABEL_KEY = '_metering_labels'
FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
DVR_SNAT_BOUND = 'dvr_snat_bound'
HA_NETWORK_NAME = 'HA network tenant %s'
HA_SUBNET_NAME = 'HA subnet tenant %s'

View File

@ -350,10 +350,6 @@ class DVRResourceOperationHandler(object):
fixed_ip_address))
if not addr_pair_active_service_port_list:
return
if len(addr_pair_active_service_port_list) > 1:
LOG.warning(_LW("Multiple active ports associated "
"with the allowed_address_pairs."))
return
self._inherit_service_port_and_arp_update(
context, addr_pair_active_service_port_list[0],
port)
@ -365,16 +361,8 @@ class DVRResourceOperationHandler(object):
service_port)
address_pair_list = service_port_dict.get('allowed_address_pairs')
for address_pair in address_pair_list:
updated_port = (
self.l3plugin.update_unbound_allowed_address_pair_port_binding(
context, service_port_dict,
address_pair,
address_pair_port=allowed_address_port))
if not updated_port:
LOG.warning(_LW("Allowed_address_pair port update failed: %s"),
updated_port)
self.l3plugin.update_arp_entry_for_dvr_service_port(
context, service_port_dict)
self.update_arp_entry_for_dvr_service_port(context,
service_port_dict)
@registry.receives(resources.ROUTER_INTERFACE, [events.BEFORE_CREATE])
@db_api.retry_if_session_inactive()
@ -638,19 +626,57 @@ class _DVRAgentInterfaceMixin(object):
return routers_dict
def _process_floating_ips_dvr(self, context, routers_dict,
floating_ips, host):
floating_ips, host, agent):
LOG.debug("FIP Agent : %s ", agent.id)
for floating_ip in floating_ips:
router = routers_dict.get(floating_ip['router_id'])
if router:
router_floatingips = router.get(const.FLOATINGIP_KEY, [])
if router['distributed']:
if (floating_ip.get('host', None) != host and
floating_ip.get('dest_host') is None):
fip_host = floating_ip.get('host')
fip_dest_host = floating_ip.get('dest_host')
# Skip if floatingip need not be processed for the
# given agent.
if self._should_skip_floating_ip_processed_for_given_agent(
floating_ip, fip_host, fip_dest_host, agent):
continue
LOG.debug("Floating IP host: %s", floating_ip['host'])
# Also skip floatingip if the fip port have a host defined
# and if the host does not match.
if self._check_floating_ip_not_valid_for_given_host(
fip_host, fip_dest_host, host):
continue
LOG.debug("Floating IP host: %s", fip_host)
router_floatingips.append(floating_ip)
router[const.FLOATINGIP_KEY] = router_floatingips
def _check_floating_ip_not_valid_for_given_host(
self, fip_host, fip_dest_host, host):
"""Function to check if floatingip host match for the given agent.
Check if the given floatingip host matches with the requesting
host when floatingip dest_host is None.
If floatingip dest_host is not None it means that the floatingip
is migrating to a new compute host and the original host will not
match.
"""
host_mismatch = (
fip_host != host and fip_dest_host is None)
return (fip_host is not None and host_mismatch)
def _should_skip_floating_ip_processed_for_given_agent(
self, floating_ip, fip_host, fip_dest_host, agent):
"""Function to check if floatingip need to be processed or skipped.
Skip if host and dest_host is none and the agent
requesting is not dvr_snat agent, and the fip has
not already been assigned 'dvr_snat_bound' state.
"""
agent_mode = self._get_agent_mode(agent)
return (fip_host is None and (fip_dest_host is None) and
agent_mode in [const.L3_AGENT_MODE_LEGACY,
const.L3_AGENT_MODE_DVR] and
not floating_ip.get(l3_const.DVR_SNAT_BOUND))
def _get_fip_agent_gw_ports(self, context, fip_agent_id):
"""Return list of floating agent gateway ports for the agent."""
if not fip_agent_id:
@ -684,7 +710,11 @@ class _DVRAgentInterfaceMixin(object):
port_in_migration = (
port_profile and
port_profile.get('migrating_to') == host)
if (port[portbindings.HOST_ID] == host or port_in_migration):
# All unbound ports with floatingip irrespective of
# the device owner should be included as valid ports
# and updated.
if (port[portbindings.HOST_ID] == host or port_in_migration or
self._is_unbound_port(port)):
port_dict.update({port['id']: port})
# Add the port binding host to the floatingip dictionary
for fip in floating_ips:
@ -695,9 +725,14 @@ class _DVRAgentInterfaceMixin(object):
fip['dest_host'] = (
self._get_dvr_migrating_service_port_hostid(
context, fip['port_id'], port=vm_port))
# Handle the case were there is no host binding
# for the private ports that are associated with
# floating ip.
if not fip['host']:
fip[l3_const.DVR_SNAT_BOUND] = True
routers_dict = self._process_routers(context, routers, agent)
self._process_floating_ips_dvr(context, routers_dict,
floating_ips, host)
floating_ips, host, agent)
ports_to_populate = []
for router in routers_dict.values():
if router.get('gw_port'):
@ -711,12 +746,14 @@ class _DVRAgentInterfaceMixin(object):
self._process_interfaces(routers_dict, interfaces)
return list(routers_dict.values())
def _is_unbound_port(self, port):
"""Check for port-bindings irrespective of device_owner."""
return not port[portbindings.HOST_ID]
def _get_dvr_service_port_hostid(self, context, port_id, port=None):
"""Returns the portbinding host_id for dvr service port."""
port_db = port or self._core_plugin.get_port(context, port_id)
device_owner = port_db['device_owner'] if port_db else ""
if n_utils.is_dvr_serviced(device_owner):
return port_db[portbindings.HOST_ID]
return port_db[portbindings.HOST_ID] or None
def _get_dvr_migrating_service_port_hostid(
self, context, port_id, port=None):
@ -726,8 +763,6 @@ class _DVRAgentInterfaceMixin(object):
port_dest_host = None
if port_profile:
port_dest_host = port_profile.get('migrating_to')
device_owner = port_db['device_owner'] if port_db else ""
if n_utils.is_dvr_serviced(device_owner):
return port_dest_host
def _get_agent_gw_ports_exist_for_network(
@ -814,11 +849,6 @@ class _DVRAgentInterfaceMixin(object):
'subnet_id': subnet}
notifier(context, router_id, arp_table)
def _should_update_arp_entry_for_dvr_service_port(self, port_dict):
# Check this is a valid VM or service port
return (n_utils.is_dvr_serviced(port_dict['device_owner']) and
port_dict['fixed_ips'])
def _get_subnet_id_for_given_fixed_ip(
self, context, fixed_ip, port_dict):
"""Returns the subnet_id that matches the fixedip on a network."""
@ -855,9 +885,9 @@ class _DVRAgentInterfaceMixin(object):
If there are any allowed_address_pairs associated with the port
those fixed_ips should also be updated in the ARP table.
"""
if not self._should_update_arp_entry_for_dvr_service_port(port_dict):
return
fixed_ips = port_dict['fixed_ips']
if not fixed_ips:
return
allowed_address_pair_fixed_ips = (
self._get_allowed_address_pair_fixed_ips(context, port_dict))
changed_fixed_ips = fixed_ips + allowed_address_pair_fixed_ips
@ -876,10 +906,10 @@ class _DVRAgentInterfaceMixin(object):
If there are any allowed_address_pairs associated with the
port, those fixed_ips should be removed from the ARP table.
"""
if not self._should_update_arp_entry_for_dvr_service_port(port_dict):
fixed_ips = port_dict['fixed_ips']
if not fixed_ips:
return
if not fixed_ips_to_delete:
fixed_ips = port_dict['fixed_ips']
allowed_address_pair_fixed_ips = (
self._get_allowed_address_pair_fixed_ips(context, port_dict))
fixed_ips_to_delete = fixed_ips + allowed_address_pair_fixed_ips
@ -900,65 +930,6 @@ class _DVRAgentInterfaceMixin(object):
return self._core_plugin.get_port(
context, fip.fixed_port_id) if fip else None
def update_unbound_allowed_address_pair_port_binding(
self, context, service_port_dict,
port_address_pairs, address_pair_port=None):
"""Update allowed address pair port with host and device_owner
This function sets the host and device_owner to the port
associated with the port_addr_pair_ip with the port_dict's
host and device_owner.
"""
port_addr_pair_ip = port_address_pairs['ip_address']
if not address_pair_port:
address_pair_port = self._get_address_pair_active_port_with_fip(
context, service_port_dict, port_addr_pair_ip)
if address_pair_port:
host = service_port_dict[portbindings.HOST_ID]
dev_owner = service_port_dict['device_owner']
address_pair_dev_owner = address_pair_port.get('device_owner')
# If the allowed_address_pair port already has an associated
# device owner, and if the device_owner is a dvr serviceable
# port, then don't update the device_owner.
port_profile = address_pair_port.get(portbindings.PROFILE, {})
if n_utils.is_dvr_serviced(address_pair_dev_owner):
port_profile['original_owner'] = address_pair_dev_owner
port_data = {portbindings.HOST_ID: host,
portbindings.PROFILE: port_profile}
else:
port_data = {portbindings.HOST_ID: host,
'device_owner': dev_owner}
update_port = self._core_plugin.update_port(
context, address_pair_port['id'], {'port': port_data})
return update_port
def remove_unbound_allowed_address_pair_port_binding(
self, context, service_port_dict,
port_address_pairs, address_pair_port=None):
"""Remove allowed address pair port binding and device_owner
This function clears the host and device_owner associated with
the port_addr_pair_ip.
"""
port_addr_pair_ip = port_address_pairs['ip_address']
if not address_pair_port:
address_pair_port = self._get_address_pair_active_port_with_fip(
context, service_port_dict, port_addr_pair_ip)
if address_pair_port:
# Before reverting the changes, fetch the original
# device owner saved in profile and update the port
port_profile = address_pair_port.get(portbindings.PROFILE)
orig_device_owner = ""
if port_profile:
orig_device_owner = port_profile.get('original_owner')
del port_profile['original_owner']
port_data = {portbindings.HOST_ID: "",
'device_owner': orig_device_owner,
portbindings.PROFILE: port_profile}
update_port = self._core_plugin.update_port(
context, address_pair_port['id'], {'port': port_data})
return update_port
class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
DVRResourceOperationHandler,
@ -1009,6 +980,7 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
host = self._get_dvr_service_port_hostid(context, fixed_port_id)
dest_host = self._get_dvr_migrating_service_port_hostid(
context, fixed_port_id)
if host is not None:
self.l3_rpc_notifier.routers_updated_on_host(
context, [router_id], host)
if dest_host and dest_host != host:
@ -1016,6 +988,8 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
context, [router_id], dest_host)
else:
self.notify_router_updated(context, router_id)
else:
self.notify_router_updated(context, router_id)
@db_api.retry_if_session_inactive()
def update_floatingip(self, context, id, floatingip):

View File

@ -343,22 +343,11 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
def _dvr_handle_unbound_allowed_addr_pair_add(
plugin, context, port, allowed_address_pair):
updated_port = plugin.update_unbound_allowed_address_pair_port_binding(
context, port, allowed_address_pair)
if updated_port:
LOG.debug("Allowed address pair port binding updated "
"based on service port binding: %s", updated_port)
plugin.dvr_handle_new_service_port(context, updated_port)
plugin.update_arp_entry_for_dvr_service_port(context, port)
def _dvr_handle_unbound_allowed_addr_pair_del(
plugin, context, port, allowed_address_pair):
updated_port = plugin.remove_unbound_allowed_address_pair_port_binding(
context, port, allowed_address_pair)
if updated_port:
LOG.debug("Allowed address pair port binding removed "
"from service port binding: %s", updated_port)
aa_fixed_ips = plugin._get_allowed_address_pair_fixed_ips(context, port)
if aa_fixed_ips:
plugin.delete_arp_entry_for_dvr_service_port(

View File

@ -253,6 +253,13 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='20.0.0.0/24') as int_subnet,\
self.port(subnet=int_subnet,
device_owner=DEVICE_OWNER_COMPUTE) as int_port:
self.core_plugin.update_port(
self.context, int_port['port']['id'],
{'port': {portbindings.HOST_ID: 'host1'}})
# and create l3 agents on corresponding hosts
helpers.register_l3_agent(host='host1',
agent_mode=constants.L3_AGENT_MODE_DVR)
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
@ -279,7 +286,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
if dvr:
l3_notif.routers_updated_on_host.assert_called_once_with(
self.context, [router['id']],
int_port['port'][portbindings.HOST_ID])
'host1')
self.assertFalse(l3_notif.routers_updated.called)
else:
l3_notif.routers_updated.assert_called_once_with(
@ -378,6 +385,12 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.subnet(cidr='20.0.0.0/24') as int_subnet,\
self.port(subnet=int_subnet,
device_owner=DEVICE_OWNER_COMPUTE) as int_port:
self.core_plugin.update_port(
self.context, int_port['port']['id'],
{'port': {portbindings.HOST_ID: 'host1'}})
# and create l3 agents on corresponding hosts
helpers.register_l3_agent(host='host1',
agent_mode=constants.L3_AGENT_MODE_DVR)
# make net external
ext_net_id = ext_subnet['subnet']['network_id']
self._update('networks', ext_net_id,
@ -406,7 +419,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
if dvr:
l3_notif.routers_updated_on_host.assert_called_once_with(
self.context, [router['id']],
int_port['port'][portbindings.HOST_ID])
'host1')
self.assertFalse(l3_notif.routers_updated.called)
else:
l3_notif.routers_updated.assert_called_once_with(
@ -480,11 +493,14 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.assertEqual(1, len(snat_router_intfs[router1['id']]))
self.assertEqual(1, len(fixed_ips))
def test_allowed_addr_pairs_arp_update_for_port_with_original_owner(self):
def test_unbound_allowed_addr_pairs_fip_with_multiple_active_vms(self):
HOST1 = 'host1'
helpers.register_l3_agent(
host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
HOST2 = 'host2'
helpers.register_l3_agent(
host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router(ha=False)
private_net1 = self._make_network(self.fmt, 'net1', True)
test_allocation_pools = [{'start': '10.1.0.2',
'end': '10.1.0.20'}]
@ -495,6 +511,10 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=4, enable_dhcp=True)
self.l3_plugin.schedule_router(self.context,
router['id'],
candidates=[self.l3_agent])
# Set gateway to router
self.l3_plugin._update_router_gw_info(
self.context, router['id'],
@ -510,70 +530,80 @@ class L3DvrTestCase(L3DvrTestCaseBase):
vrrp_port = self._make_port(
self.fmt,
private_net1['network']['id'],
device_owner=constants.DEVICE_OWNER_LOADBALANCER,
device_owner='',
fixed_ips=fixed_vrrp_ip)
allowed_address_pairs = [
{'ip_address': '10.1.0.201',
'mac_address': vrrp_port['port']['mac_address']}]
with self.port(
subnet=private_subnet1,
device_owner=DEVICE_OWNER_COMPUTE) as int_port:
device_owner=DEVICE_OWNER_COMPUTE) as int_port1,\
self.port(
subnet=private_subnet1,
device_owner=DEVICE_OWNER_COMPUTE) as int_port2:
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': private_subnet1['subnet']['id']})
router_handle = (
self.l3_plugin.list_active_sync_routers_on_active_l3_agent(
self.context, self.l3_agent['host'], [router['id']]))
self.assertEqual(self.l3_agent['host'],
router_handle[0]['gw_port_host'])
with mock.patch.object(self.l3_plugin,
'_l3_rpc_notifier') as l3_notifier:
vm_port = self.core_plugin.update_port(
self.context, int_port['port']['id'],
vm_port1 = self.core_plugin.update_port(
self.context, int_port1['port']['id'],
{'port': {portbindings.HOST_ID: HOST1}})
l3_notifier.routers_updated_on_host.assert_called_once_with(
self.context, {router['id']}, HOST1)
self.assertEqual(1, l3_notifier.add_arp_entry.call_count)
l3_notifier.reset_mock()
vm_port2 = self.core_plugin.update_port(
self.context, int_port2['port']['id'],
{'port': {portbindings.HOST_ID: HOST2}})
vrrp_port_db = self.core_plugin.get_port(
self.context, vrrp_port['port']['id'])
# Make sure that the VRRP port is not bound to any host
self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1)
self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST2)
self.assertNotEqual(
vrrp_port_db[portbindings.HOST_ID], self.l3_agent['host'])
# Now update both the VM ports with the allowed_address_pair ip
self.core_plugin.update_port(
self.context, vm_port1['id'],
{'port': {
'allowed_address_pairs': allowed_address_pairs}})
updated_vm_port1 = self.core_plugin.get_port(
self.context, vm_port1['id'])
expected_allowed_address_pairs1 = updated_vm_port1.get(
'allowed_address_pairs')
self.assertEqual(expected_allowed_address_pairs1,
allowed_address_pairs)
self.core_plugin.update_port(
self.context, vm_port2['id'],
{'port': {
'allowed_address_pairs': allowed_address_pairs}})
updated_vm_port2 = self.core_plugin.get_port(
self.context, vm_port2['id'])
expected_allowed_address_pairs2 = updated_vm_port2.get(
'allowed_address_pairs')
self.assertEqual(expected_allowed_address_pairs2,
allowed_address_pairs)
# Now let us assign the floatingip to the vrrp port that is
# unbound to any host.
floating_ip = {'floating_network_id': ext_net['network']['id'],
'router_id': router['id'],
'port_id': vrrp_port['port']['id'],
'tenant_id': vrrp_port['port']['tenant_id']}
floating_ip = self.l3_plugin.create_floatingip(
self.context, {'floatingip': floating_ip})
vrrp_port_db = self.core_plugin.get_port(
self.context, vrrp_port['port']['id'])
self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1)
# Now update the VM port with the allowed_address_pair
l3_notifier.reset_mock()
self.core_plugin.update_port(
self.context, vm_port['id'],
{'port': {
'allowed_address_pairs': allowed_address_pairs}})
updated_vm_port = self.core_plugin.get_port(
self.context, vm_port['id'])
expected_allowed_address_pairs = updated_vm_port.get(
'allowed_address_pairs')
self.assertEqual(expected_allowed_address_pairs,
allowed_address_pairs)
cur_vrrp_port_db = self.core_plugin.get_port(
self.context, vrrp_port['port']['id'])
self.assertEqual(cur_vrrp_port_db[portbindings.HOST_ID], HOST1)
self.assertTrue(cur_vrrp_port_db.get(portbindings.PROFILE))
port_profile = cur_vrrp_port_db.get(portbindings.PROFILE)
self.assertTrue(port_profile)
self.assertEqual(port_profile['original_owner'],
constants.DEVICE_OWNER_LOADBALANCER)
l3_notifier.reset_mock()
port_profile['new_owner'] = 'test_owner'
self.core_plugin.update_port(
self.context, cur_vrrp_port_db['id'],
{'port': {portbindings.PROFILE: port_profile}})
# Now the vrrp port should have an 'original_owner'
# and gets updated with a new profile. In this case
# the update triggers a notification to the neutron
# server, but this should not trigger another arp
# update of this port or router_updated event to the
# agent, otherwise this will mess up with the arp
# table in the router namespace.
self.assertEqual(0, l3_notifier.add_arp_entry.call_count)
self.assertEqual(
0, l3_notifier.routers_updated_on_host.call_count)
expected_routers_updated_calls = [
mock.call(self.context, mock.ANY, HOST1),
mock.call(self.context, mock.ANY, HOST2)]
l3_notifier.routers_updated_on_host.assert_has_calls(
expected_routers_updated_calls)
self.assertTrue(l3_notifier.routers_updated.called)
router_info = (
self.l3_plugin.list_active_sync_routers_on_active_l3_agent(
self.context, self.l3_agent['host'], [router['id']]))
floatingips = router_info[0][constants.FLOATINGIP_KEY]
self.assertTrue(floatingips[0][n_const.DVR_SNAT_BOUND])
def test_allowed_addr_pairs_delayed_fip_and_update_arp_entry(self):
HOST1 = 'host1'
@ -582,7 +612,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
HOST2 = 'host2'
helpers.register_l3_agent(
host=HOST2, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
router = self._create_router(ha=False)
private_net1 = self._make_network(self.fmt, 'net1', True)
test_allocation_pools = [{'start': '10.1.0.2',
'end': '10.1.0.20'}]
@ -593,6 +623,10 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=4, enable_dhcp=True)
self.l3_plugin.schedule_router(self.context,
router['id'],
candidates=[self.l3_agent])
# Set gateway to router
self.l3_plugin._update_router_gw_info(
self.context, router['id'],
@ -620,6 +654,11 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': private_subnet1['subnet']['id']})
router_handle = (
self.l3_plugin.list_active_sync_routers_on_active_l3_agent(
self.context, self.l3_agent['host'], [router['id']]))
self.assertEqual(self.l3_agent['host'],
router_handle[0]['gw_port_host'])
with mock.patch.object(self.l3_plugin,
'_l3_rpc_notifier') as l3_notifier:
vm_port = self.core_plugin.update_port(
@ -635,7 +674,6 @@ class L3DvrTestCase(L3DvrTestCaseBase):
vm_port2 = self.core_plugin.update_port(
self.context, int_port2['port']['id'],
{'port': {portbindings.HOST_ID: HOST2}})
l3_notifier.reset_mock()
# Now update the VM port with the allowed_address_pair
self.core_plugin.update_port(
self.context, vm_port['id'],
@ -646,12 +684,11 @@ class L3DvrTestCase(L3DvrTestCaseBase):
{'port': {
'allowed_address_pairs': allowed_address_pairs}})
self.assertEqual(
0, l3_notifier.routers_updated_on_host.call_count)
2, l3_notifier.routers_updated_on_host.call_count)
updated_vm_port1 = self.core_plugin.get_port(
self.context, vm_port['id'])
updated_vm_port2 = self.core_plugin.get_port(
self.context, vm_port2['id'])
self.assertEqual(4, l3_notifier.add_arp_entry.call_count)
expected_allowed_address_pairs = updated_vm_port1.get(
'allowed_address_pairs')
self.assertEqual(expected_allowed_address_pairs,
@ -669,52 +706,42 @@ class L3DvrTestCase(L3DvrTestCaseBase):
cur_vrrp_port_db[portbindings.HOST_ID], HOST1)
self.assertNotEqual(
cur_vrrp_port_db[portbindings.HOST_ID], HOST2)
# Before we try to associate a floatingip make sure that
# only one of the Service port associated with the
# allowed_address_pair port is active and the other one
# is DOWN
mod_vm_port2 = self.core_plugin.update_port(
self.context, updated_vm_port2['id'],
{'port': {
'admin_state_up': False}})
self.assertFalse(mod_vm_port2['admin_state_up'])
# Next we can try to associate the floatingip to the
# VRRP port that is already attached to the VM port
l3_notifier.reset_mock()
floating_ip = {'floating_network_id': ext_net['network']['id'],
'router_id': router['id'],
'port_id': vrrp_port['port']['id'],
'tenant_id': vrrp_port['port']['tenant_id']}
floating_ip = self.l3_plugin.create_floatingip(
self.context, {'floatingip': floating_ip})
self.assertEqual(
2, l3_notifier.routers_updated_on_host.call_count)
self.assertEqual(3, l3_notifier.add_arp_entry.call_count)
post_update_vrrp_port_db = self.core_plugin.get_port(
self.context, vrrp_port['port']['id'])
vrrp_port_fixed_ips = post_update_vrrp_port_db['fixed_ips']
vrrp_port_subnet_id = vrrp_port_fixed_ips[0]['subnet_id']
vrrp_arp_table = {
vrrp_arp_table1 = {
'ip_address': vrrp_port_fixed_ips[0]['ip_address'],
'mac_address': vm_port_mac,
'subnet_id': vrrp_port_subnet_id}
vrrp_arp_table1 = {
'ip_address': vrrp_port_fixed_ips[0]['ip_address'],
'mac_address': vrrp_port['port']['mac_address'],
'subnet_id': vrrp_port_subnet_id}
self.assertEqual(
post_update_vrrp_port_db[portbindings.HOST_ID], HOST1)
expected_calls = [
mock.call(self.context,
router['id'], vrrp_arp_table1),
mock.call(self.context,
router['id'], vm_arp_table),
mock.call(self.context,
router['id'], vrrp_arp_table)]
router['id'], vrrp_arp_table1)]
l3_notifier.add_arp_entry.assert_has_calls(
expected_calls)
expected_routers_updated_calls = [
mock.call(self.context, mock.ANY, HOST1),
mock.call(self.context, mock.ANY, HOST2)]
l3_notifier.routers_updated_on_host.assert_has_calls(
expected_routers_updated_calls)
self.assertTrue(l3_notifier.routers_updated.called)
router_info = (
self.l3_plugin.list_active_sync_routers_on_active_l3_agent(
self.context, self.l3_agent['host'], [router['id']]))
floatingips = router_info[0][constants.FLOATINGIP_KEY]
self.assertTrue(floatingips[0][n_const.DVR_SNAT_BOUND])
def test_dvr_gateway_host_binding_is_set(self):
router = self._create_router(ha=False)
@ -754,7 +781,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
HOST1 = 'host1'
helpers.register_l3_agent(
host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
router = self._create_router(ha=False)
private_net1 = self._make_network(self.fmt, 'net1', True)
test_allocation_pools = [{'start': '10.1.0.2',
'end': '10.1.0.20'}]
@ -765,6 +792,9 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=4, enable_dhcp=True)
self.l3_plugin.schedule_router(self.context,
router['id'],
candidates=[self.l3_agent])
# Set gateway to router
self.l3_plugin._update_router_gw_info(
self.context, router['id'],
@ -790,6 +820,11 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': private_subnet1['subnet']['id']})
router_handle = (
self.l3_plugin.list_active_sync_routers_on_active_l3_agent(
self.context, self.l3_agent['host'], [router['id']]))
self.assertEqual(self.l3_agent['host'],
router_handle[0]['gw_port_host'])
with mock.patch.object(self.l3_plugin,
'_l3_rpc_notifier') as l3_notifier:
vm_port = self.core_plugin.update_port(
@ -802,12 +837,7 @@ class L3DvrTestCase(L3DvrTestCaseBase):
'ip_address': vm_port_fixed_ips[0]['ip_address'],
'mac_address': vm_port_mac,
'subnet_id': vm_port_subnet_id}
l3_notifier.routers_updated_on_host.assert_called_once_with(
self.context, {router['id']}, HOST1)
self.assertEqual(1, l3_notifier.add_arp_entry.call_count)
l3_notifier.reset_mock()
floating_ip = {'floating_network_id': ext_net['network']['id'],
'router_id': router['id'],
'port_id': vrrp_port['port']['id'],
@ -818,16 +848,12 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.context, vrrp_port['port']['id'])
self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1)
# Now update the VM port with the allowed_address_pair
l3_notifier.reset_mock()
self.core_plugin.update_port(
self.context, vm_port['id'],
{'port': {
'allowed_address_pairs': allowed_address_pairs}})
self.assertEqual(
2, l3_notifier.routers_updated_on_host.call_count)
updated_vm_port = self.core_plugin.get_port(
self.context, vm_port['id'])
self.assertEqual(3, l3_notifier.add_arp_entry.call_count)
expected_allowed_address_pairs = updated_vm_port.get(
'allowed_address_pairs')
self.assertEqual(expected_allowed_address_pairs,
@ -836,163 +862,23 @@ class L3DvrTestCase(L3DvrTestCaseBase):
self.context, vrrp_port['port']['id'])
vrrp_port_fixed_ips = cur_vrrp_port_db['fixed_ips']
vrrp_port_subnet_id = vrrp_port_fixed_ips[0]['subnet_id']
vrrp_arp_table = {
vrrp_arp_table1 = {
'ip_address': vrrp_port_fixed_ips[0]['ip_address'],
'mac_address': vm_port_mac,
'subnet_id': vrrp_port_subnet_id}
vrrp_arp_table1 = {
'ip_address': vrrp_port_fixed_ips[0]['ip_address'],
'mac_address': vrrp_port['port']['mac_address'],
'subnet_id': vrrp_port_subnet_id}
self.assertEqual(cur_vrrp_port_db[portbindings.HOST_ID], HOST1)
expected_calls = [
mock.call(self.context,
router['id'], vrrp_arp_table1),
mock.call(self.context,
router['id'], vm_arp_table),
mock.call(self.context,
router['id'], vrrp_arp_table)]
router['id'], vrrp_arp_table1)]
l3_notifier.add_arp_entry.assert_has_calls(
expected_calls)
def test_update_service_port_with_allowed_address_pairs(self):
HOST1 = 'host1'
helpers.register_l3_agent(
host=HOST1, agent_mode=constants.L3_AGENT_MODE_DVR)
router = self._create_router()
private_net1 = self._make_network(self.fmt, 'net1', True)
test_allocation_pools = [{'start': '10.1.0.2',
'end': '10.1.0.20'}]
fixed_vrrp_ip = [{'ip_address': '10.1.0.201'}]
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
ext_net = self._make_network(self.fmt, '', True, **kwargs)
self._make_subnet(
self.fmt, ext_net, '10.20.0.1', '10.20.0.0/24',
ip_version=4, enable_dhcp=True)
# Set gateway to router
self.l3_plugin._update_router_gw_info(
self.context, router['id'],
{'network_id': ext_net['network']['id']})
private_subnet1 = self._make_subnet(
self.fmt,
private_net1,
'10.1.0.1',
cidr='10.1.0.0/24',
ip_version=4,
allocation_pools=test_allocation_pools,
enable_dhcp=True)
vrrp_port = self._make_port(
self.fmt,
private_net1['network']['id'],
device_owner=constants.DEVICE_OWNER_LOADBALANCER,
fixed_ips=fixed_vrrp_ip)
allowed_address_pairs = [
{'ip_address': '10.1.0.201',
'mac_address': vrrp_port['port']['mac_address']}]
with self.port(
subnet=private_subnet1,
device_owner=DEVICE_OWNER_COMPUTE) as int_port:
self.l3_plugin.add_router_interface(
self.context, router['id'],
{'subnet_id': private_subnet1['subnet']['id']})
with mock.patch.object(self.l3_plugin,
'_l3_rpc_notifier') as l3_notifier:
self.core_plugin.update_port(
self.context, int_port['port']['id'],
{'port': {portbindings.HOST_ID: HOST1}})
l3_notifier.routers_updated_on_host.assert_called_once_with(
self.context, {router['id']}, HOST1)
floating_ip = {'floating_network_id': ext_net['network']['id'],
'router_id': router['id'],
'port_id': vrrp_port['port']['id'],
'tenant_id': vrrp_port['port']['tenant_id']}
floating_ip = self.l3_plugin.create_floatingip(
self.context, {'floatingip': floating_ip})
vrrp_port_db = self.core_plugin.get_port(
self.context, vrrp_port['port']['id'])
self.assertNotEqual(vrrp_port_db[portbindings.HOST_ID], HOST1)
# Now update the VM port with the allowed_address_pair
cur_int_port = self.core_plugin.update_port(
self.context, int_port['port']['id'],
{'port': {
'allowed_address_pairs': allowed_address_pairs}})
cur_vrrp_port_db = self.core_plugin.get_port(
self.context, vrrp_port['port']['id'])
# Check to make sure that we are not chaning the existing
# device_owner for the allowed_address_pair port.
self.assertEqual(
cur_vrrp_port_db['device_owner'],
constants.DEVICE_OWNER_LOADBALANCER)
self.assertEqual(cur_vrrp_port_db[portbindings.HOST_ID], HOST1)
self.assertTrue(cur_vrrp_port_db.get(portbindings.PROFILE))
port_profile = cur_vrrp_port_db.get(portbindings.PROFILE)
self.assertTrue(port_profile)
self.assertEqual(port_profile['original_owner'],
constants.DEVICE_OWNER_LOADBALANCER)
# Now change the compute port admin_state_up from True to
# False, and see if the vrrp ports device_owner and binding
# inheritance reverts back to normal
mod_int_port = self.core_plugin.update_port(
self.context, cur_int_port['id'],
{'port': {
'admin_state_up': False}})
self.assertFalse(mod_int_port['admin_state_up'])
new_vrrp_port_db = self.core_plugin.get_port(
self.context, cur_vrrp_port_db['id'])
new_port_profile = new_vrrp_port_db.get(portbindings.PROFILE)
self.assertEqual({}, new_port_profile)
self.assertNotEqual(
new_vrrp_port_db[portbindings.HOST_ID], HOST1)
# Now change the compute port admin_state_up from False to
# True, and see if the vrrp ports device_owner and binding
# inherits from the associated parent compute port.
new_mod_int_port = self.core_plugin.update_port(
self.context, mod_int_port['id'],
{'port': {
'admin_state_up': True}})
self.assertTrue(new_mod_int_port['admin_state_up'])
cur_new_vrrp_port_db = self.core_plugin.get_port(
self.context, new_vrrp_port_db['id'])
self.assertNotEqual(
cur_new_vrrp_port_db['device_owner'], DEVICE_OWNER_COMPUTE)
self.assertEqual(
cur_new_vrrp_port_db[portbindings.HOST_ID], HOST1)
# Now let us try to remove vrrp_port device_owner and see
# how it inherits from the compute port.
updated_vrrp_port = self.core_plugin.update_port(
self.context, cur_new_vrrp_port_db['id'],
{'port': {'device_owner': "",
portbindings.PROFILE: {'original_owner': ""}}})
updated_vm_port = self.core_plugin.update_port(
self.context, new_mod_int_port['id'],
{'port': {
'admin_state_up': False}})
self.assertFalse(updated_vm_port['admin_state_up'])
# This port admin_state down should not cause any issue
# with the existing vrrp port device_owner, but should
# only change the port_binding HOST_ID.
cur_new_vrrp_port_db = self.core_plugin.get_port(
self.context, updated_vrrp_port['id'])
self.assertEqual(
"", cur_new_vrrp_port_db['device_owner'])
self.assertEqual(
"", cur_new_vrrp_port_db[portbindings.HOST_ID])
updated_vm_port = self.core_plugin.update_port(
self.context, new_mod_int_port['id'],
{'port': {
'admin_state_up': True}})
self.assertTrue(updated_vm_port['admin_state_up'])
updated_vrrp_port_db = self.core_plugin.get_port(
self.context, new_vrrp_port_db['id'])
self.assertEqual(
updated_vrrp_port_db['device_owner'], DEVICE_OWNER_COMPUTE)
self.assertEqual(
updated_vrrp_port_db[portbindings.HOST_ID], HOST1)
expected_routers_updated_calls = [
mock.call(self.context, mock.ANY, HOST1)]
l3_notifier.routers_updated_on_host.assert_has_calls(
expected_routers_updated_calls)
self.assertTrue(l3_notifier.routers_updated.called)
def test_update_vm_port_host_router_update(self):
# register l3 agents in dvr mode in addition to existing dvr_snat agent

View File

@ -382,8 +382,10 @@ class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
return_value=fipagent)
self.mixin._get_fip_agent_gw_ports = mock.Mock(
return_value='fip_interface')
agent = mock.Mock()
agent.id = fipagent['id']
self.mixin._process_floating_ips_dvr(self.ctx, routers, [floatingip],
hostid)
hostid, agent)
return (router, floatingip)
def test_floatingip_on_port_not_host(self):

View File

@ -827,21 +827,12 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
},
}
port = kwargs.get('original_port')
port_addr_pairs = port['allowed_address_pairs']
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
l3plugin._get_allowed_address_pair_fixed_ips.return_value = (
['10.1.0.21'])
self.assertTrue(
l3plugin.remove_unbound_allowed_address_pair_port_binding.
called)
l3plugin.remove_unbound_allowed_address_pair_port_binding.\
assert_called_once_with(
self.adminContext,
port,
port_addr_pairs[0])
self.assertFalse(
l3plugin.update_arp_entry_for_dvr_service_port.called)
l3plugin.delete_arp_entry_for_dvr_service_port.\
@ -849,7 +840,6 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
self.adminContext,
port,
fixed_ips_to_delete=mock.ANY)
self.assertFalse(l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_update_port_with_allowed_address_pairs(self):
port_id = uuidutils.generate_uuid()
@ -873,23 +863,12 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
'admin_state_up': True,
},
}
port = kwargs.get('port')
port_addr_pairs = port['allowed_address_pairs']
l3plugin = mock.Mock()
directory.add_plugin(plugin_constants.L3, l3plugin)
l3_dvrscheduler_db._notify_l3_agent_port_update(
'port', 'after_update', mock.ANY, **kwargs)
self.assertTrue(
l3plugin.update_unbound_allowed_address_pair_port_binding.
called)
l3plugin.update_unbound_allowed_address_pair_port_binding.\
assert_called_once_with(
self.adminContext,
port,
port_addr_pairs[0])
self.assertTrue(
l3plugin.update_arp_entry_for_dvr_service_port.called)
self.assertTrue(l3plugin.dvr_handle_new_service_port.called)
def test__notify_l3_agent_update_port_no_removing_routers(self):
port_id = 'fake-port'