Add DVR support

1. What is the problem?
Based on the patch to combine bridge network[1], we stil need some
changes to bring DVR support to the Tricircle.

2. What is the solution to the problem?
Here lists some major changes:
(1) Extend central plugin to add DVR support
(2) The "distributed" parameter xmanager sets to create local router
    is no longer hard coded as False.
(3) Device owner filters to query ports now includes DVR port.
(4) Two ports are created when attaching a netwok to a distributed
    router, one is of type "router_interface_distributed" and the
    other is of type "router_centralized_snat". We assign the ip of
    the pre-created interface port to the first one and assign the
    ip of the central subnet gateway to the second one.

3. What the features need to be implemented to the Tricircle
   to realize the solution?
Bring part of the DVR support to cross-pod layer-3 networking, now
users can create a distributed router.

[1] https://review.openstack.org/#/c/407956/

Change-Id: I0a9724e758bfa226520f536dd6055ca0c870fd89
This commit is contained in:
zhiyuan_cai 2016-12-12 11:13:37 +08:00
parent ac26a377f1
commit a1667dd4dd
6 changed files with 61 additions and 10 deletions

View File

@ -30,7 +30,11 @@ from neutron.db import extradhcpopt_db
# NOTE(zhiyuan) though not used, this import cannot be removed because Router
# relies on one table defined in l3_agentschedulers_db
from neutron.db import l3_agentschedulers_db # noqa
from neutron.db import l3_attrs_db
from neutron.db import l3_db
from neutron.db import l3_dvr_db
# import l3_hamode_db to load l3_ha option
from neutron.db import l3_hamode_db # noqa
from neutron.db import models_v2
from neutron.db import portbindings_db
from neutron.extensions import availability_zone as az_ext
@ -105,7 +109,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
external_net_db.External_net_db_mixin,
portbindings_db.PortBindingMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
l3_db.L3_NAT_dbonly_mixin):
l3_db.L3_NAT_dbonly_mixin,
l3_attrs_db.ExtraAttributesMixin):
__native_bulk_support = True
__native_pagination_support = True
@ -123,6 +128,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
"availability_zone",
"provider",
"network_availability_zone",
"dvr",
"router"]
def __init__(self):
@ -900,7 +906,15 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
self)._fields(p, fields) for p in ret]
def create_router(self, context, router):
return super(TricirclePlugin, self).create_router(context, router)
with context.session.begin(subtransactions=True):
router_db = super(TricirclePlugin, self).create_router(
context, router)
router_db['extra_attributes'] = None
dist = l3_dvr_db.is_distributed_router(router['router'])
self.set_extra_attr_value(context, router_db, 'distributed', dist)
router_db['distributed'] = router_db[
'extra_attributes'].distributed
return router_db
def _delete_top_bridge_resource(self, t_ctx, q_ctx, resource_type,
resource_id, resource_name):

View File

@ -54,6 +54,15 @@ class NetworkHelper(object):
self.clients[region_name] = client.Client(region_name)
return self.clients[region_name]
@staticmethod
def _merge_ip_range(ip_range, ip):
ip_set = netaddr.IPSet(ip_range)
ip_set.add(ip)
if ip_set.iscontiguous():
return ip_set.iprange(), True
else:
return ip_range, False
# operate top resource
def _prepare_top_element_by_call(self, t_ctx, q_ctx,
project_id, pod, ele, _type, body):
@ -228,15 +237,21 @@ class NetworkHelper(object):
:return: request body to create bottom subnet
"""
pools = t_subnet['allocation_pools']
t_gateway_ip = t_subnet['gateway_ip']
new_pools = []
g_ip = netaddr.IPAddress(gateway_ip)
ip_found = False
ip_merged = False
for pool in pools:
if ip_found:
new_pools.append({'start': pool['start'],
'end': pool['end']})
continue
ip_range = netaddr.IPRange(pool['start'], pool['end'])
if not ip_merged:
ip_range, ip_merged = NetworkHelper._merge_ip_range(
ip_range, t_gateway_ip)
ip_num = len(ip_range)
for i, ip in enumerate(ip_range):
if g_ip == ip:

View File

@ -358,7 +358,16 @@ class TricirclePlugin(plugin.Ml2Plugin):
return port.get('device_owner') in (
q_constants.DEVICE_OWNER_ROUTER_INTF,
q_constants.DEVICE_OWNER_FLOATINGIP,
q_constants.DEVICE_OWNER_ROUTER_GW)
q_constants.DEVICE_OWNER_ROUTER_GW,
q_constants.DEVICE_OWNER_ROUTER_SNAT,
q_constants.DEVICE_OWNER_DVR_INTERFACE)
def _handle_dvr_snat_port(self, t_ctx, port):
if port.get('device_owner') != q_constants.DEVICE_OWNER_ROUTER_SNAT:
return
subnet_id = port['fixed_ips'][0]['subnet_id']
t_subnet = self.neutron_handle.handle_get(t_ctx, 'subnet', subnet_id)
port['fixed_ips'][0]['ip_address'] = t_subnet['gateway_ip']
def create_port(self, context, port):
port_body = port['port']
@ -385,6 +394,7 @@ class TricirclePlugin(plugin.Ml2Plugin):
ip_address=fixed_ip['ip_address'])
t_port = t_ports[0]
else:
self._handle_dvr_snat_port(t_ctx, port_body)
t_port = port_body
else:
self._adapt_port_body_for_client(port['port'])

View File

@ -44,14 +44,15 @@ class HelperTest(unittest.TestCase):
body = self.helper.get_create_subnet_body(project_id, t_subnet,
b_net_id, '10.0.1.2')
six.assertCountEqual(self,
[{'start': '10.0.1.3', 'end': '10.0.1.254'}],
[{'start': '10.0.1.1', 'end': '10.0.1.1'},
{'start': '10.0.1.3', 'end': '10.0.1.254'}],
body['subnet']['allocation_pools'])
self.assertEqual('10.0.1.2', body['subnet']['gateway_ip'])
body = self.helper.get_create_subnet_body(project_id, t_subnet,
b_net_id, '10.0.1.254')
six.assertCountEqual(self,
[{'start': '10.0.1.2', 'end': '10.0.1.253'}],
[{'start': '10.0.1.1', 'end': '10.0.1.253'}],
body['subnet']['allocation_pools'])
self.assertEqual('10.0.1.254', body['subnet']['gateway_ip'])
@ -60,7 +61,7 @@ class HelperTest(unittest.TestCase):
{'start': '10.0.1.20', 'end': '10.0.1.254'}]
body = self.helper.get_create_subnet_body(project_id, t_subnet,
b_net_id, '10.0.1.5')
six.assertCountEqual(self, [{'start': '10.0.1.2', 'end': '10.0.1.4'},
six.assertCountEqual(self, [{'start': '10.0.1.1', 'end': '10.0.1.4'},
{'start': '10.0.1.6', 'end': '10.0.1.10'},
{'start': '10.0.1.20', 'end': '10.0.1.254'}],
body['subnet']['allocation_pools'])

View File

@ -237,6 +237,7 @@ class PluginTest(unittest.TestCase):
'name': 'subnet1',
'network_id': network_id,
'cidr': '10.0.1.0/24',
'gateway_ip': '10.0.1.1',
'ip_version': 4,
'allocation_pools': [{'start': '10.0.1.2',
'end': '10.0.1.254'}],
@ -282,13 +283,15 @@ class PluginTest(unittest.TestCase):
b_subnet.pop('project_id')
pool = subnet.pop('allocation_pools')[0]
b_pools = b_subnet.pop('allocation_pools')
t_gateway_ip = subnet.pop('gateway_ip')
b_gateway_ip = b_subnet.pop('gateway_ip')
def ip_to_digit(ip):
return int(ip[ip.rindex('.') + 1:])
pool_range = list(range(ip_to_digit(pool['start']),
pool_range = list(range(ip_to_digit(t_gateway_ip),
ip_to_digit(pool['end']) + 1))
# we include the top gateway ip in the bottom ip allocation pool
b_pool_range1 = list(range(ip_to_digit(b_pools[0]['start']),
ip_to_digit(b_pools[0]['end']) + 1))
b_pool_range2 = list(range(ip_to_digit(b_pools[1]['start']),

View File

@ -238,11 +238,14 @@ class XManager(PeriodicTasks):
@staticmethod
def _get_router_interfaces(cli, cxt, router_id, net_id):
return cli.list_ports(
interfaces = cli.list_ports(
cxt, filters=[{'key': 'network_id', 'comparator': 'eq',
'value': net_id},
{'key': 'device_id', 'comparator': 'eq',
'value': router_id}])
return [inf for inf in interfaces if inf['device_owner'] in (
q_constants.DEVICE_OWNER_ROUTER_INTF,
q_constants.DEVICE_OWNER_DVR_INTERFACE)]
@periodic_task.periodic_task
def redo_failed_job(self, ctx):
@ -303,8 +306,9 @@ class XManager(PeriodicTasks):
# pod and we may need to use it later.
b_client = self._get_client(b_pod['region_name'])
is_distributed = t_router.get('distributed', False)
router_body = {'router': {'name': t_router['id'],
'distributed': False}}
'distributed': is_distributed}}
project_id = t_router['tenant_id']
# create bottom router in target bottom pod
@ -561,11 +565,15 @@ class XManager(PeriodicTasks):
t_ext_net_id = None
non_vm_port_types = [q_constants.DEVICE_OWNER_ROUTER_INTF,
q_constants.DEVICE_OWNER_DVR_INTERFACE,
q_constants.DEVICE_OWNER_ROUTER_SNAT,
q_constants.DEVICE_OWNER_ROUTER_GW,
q_constants.DEVICE_OWNER_DHCP]
ew_attached_port_types = [q_constants.DEVICE_OWNER_ROUTER_INTF,
q_constants.DEVICE_OWNER_DVR_INTERFACE,
q_constants.DEVICE_OWNER_ROUTER_GW]
ns_attached_port_types = q_constants.DEVICE_OWNER_ROUTER_INTF
ns_attached_port_types = [q_constants.DEVICE_OWNER_ROUTER_INTF,
q_constants.DEVICE_OWNER_DVR_INTERFACE]
mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_router_id,
constants.RT_ROUTER)