Shared VxLAN (Part4: bridge network l3)

1. What is the problem?
VLAN network has some restrictions that VxLAN network doesn't have.
For more flexible networking deployment, we consider supporting
cross-pod VxLAN network.

We are going to use shadow agent/port mechanism to synchronize VTEP
information and make cross-pod VxLAN networking available, as discussed
in the specification document[1].

With the previous parts[2, 3, 4], VxLAN network already works for
tenant network, but bridge network still lacks VxLAN network support.

2. What is the solution to the problem?
We need to build VxLAN tunnels for bridge ports, so bridge port
creation should also trigger shadow agent and shadow port setup.

3. What the features need to be implemented to the Tricircle
to realize the solution?
This is the forth patch for cross-pod VxLAN networking support, which
introduces the following changes:

(1) Make bridge network gateway port creation also trigger shadow
agent and shadow port setup, so we can use VxLAN type bridge network
(2) Delete shadow bridge ports when clearing bridge network/subnet

[1] https://review.openstack.org/#/c/429155/
[2] https://review.openstack.org/#/c/425128/
[3] https://review.openstack.org/#/c/425129/
[4] https://review.openstack.org/#/c/425130/

Change-Id: I3f3054c9300566ddbdd5b6d523f547485462447c
This commit is contained in:
zhiyuan_cai 2017-01-24 15:14:18 +08:00
parent 76171efb29
commit b14e0b4066
12 changed files with 376 additions and 145 deletions

View File

@ -20,7 +20,6 @@ import six
from six.moves import xrange
import uuid
from keystoneclient.auth.identity import v3 as auth_identity
from keystoneclient.auth import token_endpoint
from keystoneclient import session
from keystoneclient.v3 import client as keystone_client
@ -212,14 +211,7 @@ class Client(object):
@staticmethod
def _get_keystone_session():
auth = auth_identity.Password(
auth_url=cfg.CONF.client.identity_url,
username=cfg.CONF.client.admin_username,
password=cfg.CONF.client.admin_password,
project_name=cfg.CONF.client.admin_tenant,
user_domain_name=cfg.CONF.client.admin_user_domain_name,
project_domain_name=cfg.CONF.client.admin_tenant_domain_name)
return session.Session(auth=auth)
return resource_handle.ResourceHandle.get_keystone_session()
@staticmethod
def get_admin_token():

View File

@ -81,6 +81,7 @@ SP_EXTRA_ID = '00000000-0000-0000-0000-000000000000'
TOP = 'top'
POD_NOT_SPECIFIED = 'not_specified_pod'
PROFILE_REGION = 'region'
PROFILE_DEVICE = 'device'
PROFILE_HOST = 'host'
PROFILE_AGENT_TYPE = 'type'
PROFILE_TUNNEL_IP = 'tunnel_ip'

View File

@ -67,6 +67,7 @@ def get_context_from_neutron_context(context):
ctx.tenant_name = context.tenant_name
ctx.user_name = context.user_name
ctx.resource_uuid = context.resource_uuid
ctx.is_admin = context.is_admin
return ctx

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.auth.identity import v3 as auth_identity
from keystoneclient import session
from neutronclient.common import exceptions as q_exceptions
from neutronclient.neutron import client as q_client
@ -59,6 +61,21 @@ class ResourceHandle(object):
def update_endpoint_url(self, url):
self.endpoint_url = url
@staticmethod
def get_keystone_session():
auth = auth_identity.Password(
auth_url=cfg.CONF.client.identity_url,
username=cfg.CONF.client.admin_username,
password=cfg.CONF.client.admin_password,
project_name=cfg.CONF.client.admin_tenant,
user_domain_name=cfg.CONF.client.admin_user_domain_name,
project_domain_name=cfg.CONF.client.admin_tenant_domain_name)
return session.Session(auth=auth)
@staticmethod
def get_admin_token():
return ResourceHandle.get_keystone_session().get_token()
class NeutronResourceHandle(ResourceHandle):
service_type = cons.ST_NEUTRON
@ -72,8 +89,11 @@ class NeutronResourceHandle(ResourceHandle):
'floatingip': LIST | CREATE | UPDATE | DELETE}
def _get_client(self, cxt):
token = cxt.auth_token
if not token and cxt.is_admin:
token = self.get_admin_token()
return q_client.Client('2.0',
token=cxt.auth_token,
token=token,
auth_url=self.auth_url,
endpoint_url=self.endpoint_url,
timeout=cfg.CONF.client.neutron_timeout)

View File

@ -242,12 +242,21 @@ def get_bottom_mappings_by_tenant_pod(context,
return routings
def delete_mappings_by_top_id(context, top_id):
def delete_mappings_by_top_id(context, top_id, pod_id=None):
"""Delete resource routing entry based on top resource ID
If pod ID is also provided, only entry in the specific pod will be deleted
:param context: context object
:param top_id: top resource ID
:param pod_id: optional pod ID
:return: None
"""
filters = [{'key': 'top_id', 'comparator': 'eq', 'value': top_id}]
if pod_id:
filters.append({'key': 'pod_id', 'comparator': 'eq', 'value': pod_id})
with context.session.begin():
core.delete_resources(
context, models.ResourceRouting,
filters=[{'key': 'top_id', 'comparator': 'eq',
'value': top_id}])
core.delete_resources(context, models.ResourceRouting, filters=filters)
def delete_mappings_by_bottom_id(context, bottom_id):

View File

@ -584,6 +584,46 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
t_ctx, sg_id, sg_id, bottom_pod['pod_id'], t_ctx.tenant,
t_constants.RT_SG)
@staticmethod
def _create_mapping_for_vm_port(t_ctx, port_body, pod):
entries = [(ip['subnet_id'],
t_constants.RT_SUBNET) for ip in port_body['fixed_ips']]
entries.append((port_body['network_id'], t_constants.RT_NETWORK))
entries.append((port_body['id'], t_constants.RT_PORT))
if port_body['security_groups']:
for sg_id in port_body['security_groups']:
entries.append((sg_id, t_constants.RT_SG))
for resource_id, resource_type in entries:
if db_api.get_bottom_id_by_top_id_region_name(
t_ctx, resource_id, pod['region_name'], resource_type):
continue
db_api.create_resource_mapping(
t_ctx, resource_id, resource_id, pod['pod_id'],
port_body['tenant_id'], resource_type)
def _trigger_router_xjob_for_vm_port(self, context, port_body, pod):
interfaces = super(TricirclePlugin, self).get_ports(
context,
{'network_id': [port_body['network_id']],
'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]},
fields=['device_id'])
router_ids = [
inf['device_id'] for inf in interfaces if inf['device_id']]
if router_ids:
# request may be come from service, we use an admin context
# to run the xjob
LOG.debug('Update port: network %s has been attached to the '
'following routers: %s, xjob triggered',
port_body['network_id'], router_ids)
admin_context = t_context.get_admin_context()
self.xjob_handler.setup_bottom_router(
admin_context, port_body['network_id'],
router_ids[0], pod['pod_id'])
else:
LOG.debug('Update port: no interfaces found, xjob not'
'triggered')
def update_port(self, context, port_id, port):
t_ctx = t_context.get_context_from_neutron_context(context)
top_port = super(TricirclePlugin, self).get_port(context, port_id)
@ -599,6 +639,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
port)
profile_dict = port['port']['binding:profile']
region_name = profile_dict[t_constants.PROFILE_REGION]
device_name = profile_dict[t_constants.PROFILE_DEVICE]
t_ctx = t_context.get_context_from_neutron_context(context)
pod = db_api.get_pod_by_name(t_ctx, region_name)
@ -610,41 +651,17 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
# plugin will still send agent info, so we double check here
self.helper.create_shadow_agent_if_needed(t_ctx,
profile_dict, pod)
if device_name.startswith('compute:'):
# local plugin will also update region information for bridge
# gateway port, but we only need to create resource routing
# entries, trigger xjob and configure security group rules for
# instance port
self._create_mapping_for_vm_port(t_ctx, res, pod)
# only trigger setup_bottom_router job
self._trigger_router_xjob_for_vm_port(context, res, pod)
self.xjob_handler.configure_security_group_rules(
t_ctx, res['tenant_id'])
entries = [(ip['subnet_id'],
t_constants.RT_SUBNET) for ip in res['fixed_ips']]
entries.append((res['network_id'], t_constants.RT_NETWORK))
entries.append((res['id'], t_constants.RT_PORT))
if res['security_groups']:
for sg_id in res['security_groups']:
entries.append((sg_id, t_constants.RT_SG))
for resource_id, resource_type in entries:
if db_api.get_bottom_id_by_top_id_region_name(
t_ctx, resource_id, pod['region_name'], resource_type):
continue
db_api.create_resource_mapping(t_ctx, resource_id, resource_id,
pod['pod_id'], res['tenant_id'],
resource_type)
interfaces = super(TricirclePlugin, self).get_ports(
context,
{'network_id': [res['network_id']],
'device_owner': [constants.DEVICE_OWNER_ROUTER_INTF]})
interfaces = [inf for inf in interfaces if inf['device_id']]
if interfaces:
# request may be come from service, we use an admin context
# to run the xjob
admin_context = t_context.get_admin_context()
self.xjob_handler.setup_bottom_router(
admin_context, res['network_id'],
interfaces[0]['device_id'], pod['pod_id'])
else:
LOG.debug('Update port: no interfaces found, xjob not'
'triggered')
self.xjob_handler.configure_security_group_rules(t_ctx,
res['tenant_id'])
if is_vxlan_network and (
cfg.CONF.client.cross_pod_vxlan_mode in (
t_constants.NM_P2P, t_constants.NM_L2GW)):
@ -1149,53 +1166,48 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
self._delete_top_bridge_resource(t_ctx, q_ctx, t_constants.RT_PORT,
bridge_port_id, bridge_port_name)
def _delete_shadow_bridge_port(self, t_ctx, bridge_port_id):
for pod, b_port_id in db_api.get_bottom_mappings_by_top_id(
t_ctx, bridge_port_id, t_constants.RT_SD_PORT):
region_name = pod['region_name']
self._get_client(region_name).delete_ports(t_ctx, b_port_id)
db_api.delete_mappings_by_top_id(t_ctx, bridge_port_id,
pod_id=pod['pod_id'])
def delete_router(self, context, _id):
router = super(TricirclePlugin,
self)._ensure_router_not_in_use(context, _id)
project_id = router['tenant_id']
t_ctx = t_context.get_context_from_neutron_context(context)
mappings = db_api.get_bottom_mappings_by_top_id(t_ctx, _id,
t_constants.RT_ROUTER)
is_local_router = self.helper.is_local_router(t_ctx, router)
for pod, b_router_id in mappings:
mappings = [
(m[0], m[1], False) for m in db_api.get_bottom_mappings_by_top_id(
t_ctx, _id, t_constants.RT_ROUTER)]
mappings.extend(
[(m[0], m[1], True) for m in db_api.get_bottom_mappings_by_top_id(
t_ctx, _id, t_constants.RT_NS_ROUTER)])
for pod, b_router_id, is_ns in mappings:
b_client = self._get_client(pod['region_name'])
if not is_local_router:
bridge_port_name = t_constants.bridge_port_name % (project_id,
b_router_id)
bridge_ports = super(TricirclePlugin, self).get_ports(
context, {'name': [bridge_port_name]})
context, {'name': [bridge_port_name]}, limit=1)
if bridge_ports:
t_ns_port_id = bridge_ports[0]['id']
# we will not create bridge ports for local router, so here no
# need to check "is_local_router" again
t_bridge_port_id = bridge_ports[0]['id']
if not is_ns:
b_client.action_routers(t_ctx, 'remove_gateway',
b_router_id)
self._delete_top_bridge_port(t_ctx, context, t_ns_port_id,
bridge_port_name)
b_client.delete_routers(t_ctx, b_router_id)
db_api.delete_mappings_by_bottom_id(t_ctx, b_router_id)
if is_local_router:
super(TricirclePlugin, self).delete_router(context, _id)
return
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, _id, t_constants.RT_NS_ROUTER)
for pod, b_ns_router_id in mappings:
b_client = self._get_client(pod['region_name'])
bridge_subnet_name = (t_constants.bridge_subnet_name % project_id)
bridge_subnets = super(TricirclePlugin,
self).get_subnets(
context, {'name': [bridge_subnet_name]})
if bridge_subnets:
t_bridge_subnet_id = bridge_subnets[0]['id']
b_bridge_subnet_id = \
db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_bridge_subnet_id, pod['region_name'],
t_constants.RT_SUBNET)
if b_bridge_subnet_id:
request_body = {'subnet_id': b_bridge_subnet_id}
else:
b_ns_port_id = t_bridge_port_id
request_body = {'port_id': b_ns_port_id}
try:
b_client.action_routers(t_ctx, 'remove_interface',
b_ns_router_id, request_body)
b_router_id, request_body)
except Exception as e:
if e.status_code == 404:
# 404 error means that the router interface has
@ -1203,9 +1215,13 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
pass
raise
b_client.delete_routers(t_ctx, b_ns_router_id)
db_api.delete_mappings_by_bottom_id(t_ctx, b_ns_router_id)
self._delete_shadow_bridge_port(t_ctx, t_bridge_port_id)
self._delete_top_bridge_port(t_ctx, context, t_bridge_port_id,
bridge_port_name)
b_client.delete_routers(t_ctx, b_router_id)
db_api.delete_mappings_by_bottom_id(t_ctx, b_router_id)
if not is_local_router:
routers = super(TricirclePlugin, self).get_routers(
context, {'tenant_id': [project_id]})
if len(routers) <= 1:
@ -1287,9 +1303,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
return net, subnet
def _get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
t_net_id, b_router_id):
port_id = self.helper.get_bridge_interface(t_ctx, q_ctx, project_id,
pod, t_net_id, b_router_id)
t_net_id, b_router_id, t_subnet=None):
port_id = self.helper.get_bridge_interface(
t_ctx, q_ctx, project_id, pod, t_net_id, b_router_id, t_subnet)
return super(TricirclePlugin, self).get_port(q_ctx, port_id)
def _get_bottom_bridge_elements(self, q_ctx, project_id,
@ -1426,8 +1442,12 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
is_attach = _is_bridge_network_attached()
if not is_attach:
# no need to explicitly create the top bridge port, the ip reserved
# for router interface will be used.
# though no need to explicitly create the top bridge port since the
# ip reserved for router interface will be used, we still create it
# for shadow port creation purpose
self._get_bridge_interface(
t_ctx, context, project_id, t_pod, t_bridge_net['id'],
b_router_id, t_bridge_subnet)
b_client.action_routers(t_ctx, 'add_interface', b_router_id,
{'subnet_id': b_bridge_subnet_id})

View File

@ -17,6 +17,7 @@ import copy
import netaddr
import six
from neutron_lib.api.definitions import provider_net
from neutron_lib import constants
import neutronclient.common.exceptions as q_cli_exceptions
from oslo_serialization import jsonutils
@ -35,6 +36,7 @@ import tricircle.network.exceptions as t_network_exc
AZ_HINTS = 'availability_zone_hints'
EXTERNAL = 'router:external' # neutron.extensions.external_net.EXTERNAL
TYPE_VLAN = 'vlan' # neutron.plugins.common.constants.TYPE_VLAN
TYPE_VXLAN = 'vxlan' # neutron.plugins.common.constants.TYPE_VXLAN
VIF_TYPE_OVS = 'ovs' # neutron.extensions.portbindings.VIF_TYPE_OVS
OVS_AGENT_DATA_TEMPLATE = {
@ -79,9 +81,22 @@ class NetworkHelper(object):
@staticmethod
def _transfer_network_type(network_type):
network_type_map = {t_constants.NT_VLAN: TYPE_VLAN}
network_type_map = {t_constants.NT_VLAN: TYPE_VLAN,
t_constants.NT_VxLAN: TYPE_VXLAN}
return network_type_map.get(network_type, network_type)
@staticmethod
def _get_provider_info(t_net):
ret = {
provider_net.NETWORK_TYPE: NetworkHelper._transfer_network_type(
t_net[provider_net.NETWORK_TYPE]),
provider_net.SEGMENTATION_ID: t_net[provider_net.SEGMENTATION_ID]
}
if t_net[provider_net.NETWORK_TYPE] == t_constants.NT_VLAN:
ret[provider_net.PHYSICAL_NETWORK] = t_net[
provider_net.PHYSICAL_NETWORK]
return ret
def _get_client(self, region_name=None):
if not region_name:
if t_constants.TOP not in self.clients:
@ -164,7 +179,7 @@ class NetworkHelper(object):
t_ctx, q_ctx, project_id, pod, ele, _type, body)
def get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
t_net_id, b_router_id):
t_net_id, b_router_id, t_subnet=None):
"""Get or create top bridge interface
:param t_ctx: tricircle context
@ -173,6 +188,7 @@ class NetworkHelper(object):
:param pod: dict of top pod
:param t_net_id: top bridge network id
:param b_router_id: bottom router id
:param t_subnet: optional top bridge subnet dict
:return: bridge interface id
"""
port_name = t_constants.bridge_port_name % (project_id,
@ -192,6 +208,10 @@ class NetworkHelper(object):
port_body['port'].update(
{'mac_address': constants.ATTR_NOT_SPECIFIED,
'fixed_ips': constants.ATTR_NOT_SPECIFIED})
if t_subnet:
port_body['port'].update(
{'fixed_ips': [{'subnet_id': t_subnet['id'],
'ip_address': t_subnet['gateway_ip']}]})
_, port_id = self.prepare_top_element(
t_ctx, q_ctx, project_id, pod, port_ele, 'port', port_body)
return port_id
@ -515,11 +535,8 @@ class NetworkHelper(object):
net_body = {'network': {
'tenant_id': project_id,
'name': t_net['id'],
'provider:network_type': self._transfer_network_type(
t_net['provider:network_type']),
'provider:physical_network': t_net['provider:physical_network'],
'provider:segmentation_id': t_net['provider:segmentation_id'],
'admin_state_up': True}}
net_body['network'].update(self._get_provider_info(t_net))
if is_external:
net_body['network'][EXTERNAL] = True
_, b_net_id = self.prepare_bottom_element(
@ -714,6 +731,32 @@ class NetworkHelper(object):
def get_agent_type_by_vif(vif_type):
return VIF_AGENT_TYPE_MAP.get(vif_type)
@staticmethod
def is_need_top_sync_port(port, bridge_cidr):
"""Judge if the port needs to be synced with top port
While synced with top port, shadow agent/port process is triggered
:param port: port dict
:param bridge_cidr: bridge subnet CIDR
:return: True/False
"""
device_owner = port.get('device_owner', '')
if device_owner.startswith('compute:'):
# sync with top port for instance port
return True
if device_owner not in (constants.DEVICE_OWNER_ROUTER_GW,
constants.DEVICE_OWNER_ROUTER_INTF):
# no need to sync with top port if the port is NOT instance port
# or router interface or router gateway. in DVR case, there are
# another two router port types, router_interface_distributed and
# router_centralized_snat, these two don't need to be synced wih
# top port neither
return False
ip = port['fixed_ips'][0]['ip_address']
# only sync with top port for bridge router port
return netaddr.IPAddress(ip) in netaddr.IPNetwork(bridge_cidr)
@staticmethod
def construct_agent_data(agent_type, host, tunnel_ip):
if agent_type not in AGENT_DATA_TEMPLATE_MAP:

View File

@ -432,6 +432,15 @@ class TricirclePlugin(plugin.Ml2Plugin):
t_ctx = t_context.get_context_from_neutron_context(context)
raw_client = self.neutron_handle._get_client(t_ctx)
def get_top_port_by_ip(ip):
params = {'fixed_ips': 'ip_address=%s' % ip,
'network_id': network_id}
t_ports = raw_client.list_ports(**params)['ports']
if not t_ports:
raise q_exceptions.InvalidIpForNetwork(
ip_address=fixed_ip['ip_address'])
return t_ports[0]
if port_body['fixed_ips'] is not q_constants.ATTR_NOT_SPECIFIED:
if not self._is_special_port(port_body):
fixed_ip = port_body['fixed_ips'][0]
@ -441,12 +450,14 @@ class TricirclePlugin(plugin.Ml2Plugin):
# specifying ip address, we just raise an exception to
# reject this request
raise q_exceptions.InvalidIpForNetwork(ip_address='None')
params = {'fixed_ips': 'ip_address=%s' % ip_address}
t_ports = raw_client.list_ports(**params)['ports']
if not t_ports:
raise q_exceptions.InvalidIpForNetwork(
ip_address=fixed_ip['ip_address'])
t_port = t_ports[0]
t_port = get_top_port_by_ip(ip_address)
elif helper.NetworkHelper.is_need_top_sync_port(
port_body, cfg.CONF.client.bridge_cidr):
# for port that needs to be synced with top port, we keep ids
# the same
ip_address = port_body['fixed_ips'][0]['ip_address']
port_body['id'] = get_top_port_by_ip(ip_address)['id']
t_port = port_body
else:
self._handle_dvr_snat_port(t_ctx, port_body)
t_port = port_body
@ -547,21 +558,30 @@ class TricirclePlugin(plugin.Ml2Plugin):
profile_dict,
tunnel_ip=l2gw_tunnel_ip)
@staticmethod
def _need_top_update(port, update_body):
if not update_body.get(portbindings.HOST_ID):
# no need to update top port if host is not updated
return False
# only for those ports that are synced with top port, we need to
# update top port
return helper.NetworkHelper.is_need_top_sync_port(
port, cfg.CONF.client.bridge_cidr)
def update_port(self, context, _id, port):
# ovs agent will not call update_port, it updates port status via rpc
# and direct db operation
profile_dict = port['port'].get(portbindings.PROFILE, {})
if profile_dict.pop(t_constants.PROFILE_FORCE_UP, None):
port['port']['status'] = q_constants.PORT_STATUS_ACTIVE
port['port'][
portbindings.VNIC_TYPE] = q_constants.ATTR_NOT_SPECIFIED
b_port = self.core_plugin.update_port(context, _id, port)
if port['port'].get('device_owner', '').startswith('compute') and (
port['port'].get(portbindings.HOST_ID)):
# we check both "device_owner" and "binding:host_id" to ensure the
# request comes from nova. and ovs agent will not call update_port.
# it updates port status via rpc and direct db operation
if self._need_top_update(b_port, port['port']):
region_name = cfg.CONF.nova.region_name
update_dict = {portbindings.PROFILE: {
t_constants.PROFILE_REGION: region_name}}
t_constants.PROFILE_REGION: region_name,
t_constants.PROFILE_DEVICE: b_port['device_owner']}}
self._fill_agent_info_in_profile(
context, _id, port['port'][portbindings.HOST_ID],
update_dict[portbindings.PROFILE])

View File

@ -336,11 +336,19 @@ class APITest(unittest.TestCase):
self._create_pod(2, 'test_az_uuid_2')
self._create_resource_mappings()
top_id = 'top_uuid'
api.delete_mappings_by_top_id(self.context, top_id)
api.delete_mappings_by_top_id(self.context, top_id,
pod_id='test_pod_uuid_0')
mappings = api.get_bottom_mappings_by_top_id(
self.context, top_id, 'network')
self.assertEqual(len(mappings), 0)
# entry in pod_uuid_0 is deleted, entry in pod_uuid_1 is left
self.assertEqual(1, len(mappings))
self.assertEqual('test_pod_uuid_1', mappings[0][0]['pod_id'])
api.delete_mappings_by_top_id(self.context, top_id)
mappings = api.get_bottom_mappings_by_top_id(
self.context, top_id, 'network')
self.assertEqual(0, len(mappings))
def test_update_pod(self):
self._create_pod(0, 'test_az_uuid_0')

View File

@ -68,6 +68,7 @@ from tricircle.db import models
import tricircle.network.central_plugin as plugin
from tricircle.network.drivers import type_local
from tricircle.network.drivers import type_vlan
from tricircle.network.drivers import type_vxlan
from tricircle.network import helper
from tricircle.network import managers
from tricircle.tests.unit.network import test_security_groups
@ -82,6 +83,7 @@ TOP_SUBNETPOOLS = []
TOP_SUBNETPOOLPREFIXES = []
TOP_IPALLOCATIONS = []
TOP_VLANALLOCATIONS = []
TOP_VXLANALLOCATIONS = []
TOP_SEGMENTS = []
TOP_EXTNETS = []
TOP_FLOATINGIPS = []
@ -104,9 +106,9 @@ BOTTOM2_SGS = []
BOTTOM2_FIPS = []
RES_LIST = [TOP_NETS, TOP_SUBNETS, TOP_PORTS, TOP_ROUTERS, TOP_ROUTERPORT,
TOP_SUBNETPOOLS, TOP_SUBNETPOOLPREFIXES, TOP_IPALLOCATIONS,
TOP_VLANALLOCATIONS, TOP_SEGMENTS, TOP_EXTNETS, TOP_FLOATINGIPS,
TOP_SGS, TOP_SG_RULES, TOP_NETWORK_RBAC, TOP_SUBNETROUTES,
TOP_DNSNAMESERVERS,
TOP_VLANALLOCATIONS, TOP_VXLANALLOCATIONS, TOP_SEGMENTS,
TOP_EXTNETS, TOP_FLOATINGIPS, TOP_SGS, TOP_SG_RULES,
TOP_NETWORK_RBAC, TOP_SUBNETROUTES, TOP_DNSNAMESERVERS,
BOTTOM1_NETS, BOTTOM1_SUBNETS, BOTTOM1_PORTS, BOTTOM1_ROUTERS,
BOTTOM1_SGS, BOTTOM1_FIPS,
BOTTOM2_NETS, BOTTOM2_SUBNETS, BOTTOM2_PORTS, BOTTOM2_ROUTERS,
@ -120,6 +122,7 @@ RES_MAP = {'networks': TOP_NETS,
'subnetpools': TOP_SUBNETPOOLS,
'subnetpoolprefixes': TOP_SUBNETPOOLPREFIXES,
'ml2_vlan_allocations': TOP_VLANALLOCATIONS,
'ml2_vxlan_allocations': TOP_VXLANALLOCATIONS,
'networksegments': TOP_SEGMENTS,
'externalnetworks': TOP_EXTNETS,
'floatingips': TOP_FLOATINGIPS,
@ -513,7 +516,28 @@ class FakeClient(object):
del TOP_IPALLOCATIONS[index]
def add_gateway_routers(self, ctx, *args, **kwargs):
# only for mock purpose
router_id, body = args
try:
t_name = constants.bridge_port_name % (TEST_TENANT_ID, router_id)
t_client = FakeClient()
t_ports = t_client.list_ports(
ctx, [{'key': 'name', 'comparator': 'eq', 'value': t_name}])
b_id = t_ports[0]['id'] if t_ports else uuidutils.generate_uuid()
host_id = 'host1' if self.region_name == 'pod_1' else 'host_2'
self.create_ports(ctx, {'port': {
'admin_state_up': True,
'id': b_id,
'name': '',
'network_id': body['network_id'],
'fixed_ips': body['external_fixed_ips'],
'mac_address': '',
'device_id': router_id,
'device_owner': 'network:router_gateway',
'binding:vif_type': helper.VIF_TYPE_OVS,
'binding:host_id': host_id
}})
except q_exceptions.IpAddressInUseClient:
# just skip if the gateway port is already there
pass
def add_interface_routers(self, ctx, *args, **kwargs):
@ -1071,6 +1095,8 @@ class FakeTypeManager(managers.TricircleTypeManager):
self.drivers[constants.NT_LOCAL] = FakeExtension(local_driver)
vlan_driver = type_vlan.VLANTypeDriver()
self.drivers[constants.NT_VLAN] = FakeExtension(vlan_driver)
vxlan_driver = type_vxlan.VxLANTypeDriver()
self.drivers[constants.NT_VxLAN] = FakeExtension(vxlan_driver)
def extend_network_dict_provider(self, cxt, net):
target_net = None
@ -1214,8 +1240,8 @@ class PluginTest(unittest.TestCase,
xmanager.IN_TEST = True
phynet = 'bridge'
vlan_min = 2000
vlan_max = 2001
vlan_min, vlan_max = 2000, 2001
vxlan_min, vxlan_max = 20001, 20002
cfg.CONF.set_override('type_drivers', ['local', 'vlan'],
group='tricircle')
cfg.CONF.set_override('tenant_network_types', ['local', 'vlan'],
@ -1231,6 +1257,9 @@ class PluginTest(unittest.TestCase,
TOP_VLANALLOCATIONS.append(
DotDict({'physical_network': phynet,
'vlan_id': vlan, 'allocated': False}))
for vxlan in (vxlan_min, vxlan_max):
TOP_VXLANALLOCATIONS.append(
DotDict({'vxlan_vni': vxlan, 'allocated': False}))
def fake_get_plugin(alias=q_constants.CORE):
return FakePlugin()
@ -1955,10 +1984,12 @@ class PluginTest(unittest.TestCase,
def _prepare_router_test(self, tenant_id, ctx, region_name, index,
router_az_hints=None, net_az_hints=None,
create_new_router=False):
create_new_router=False,
network_type=constants.NT_LOCAL):
(t_net_id, t_subnet_id, b_net_id,
b_subnet_id) = self._prepare_network_subnet(
tenant_id, ctx, region_name, index, az_hints=net_az_hints)
tenant_id, ctx, region_name, index, az_hints=net_az_hints,
network_type=network_type)
t_router_id = uuidutils.generate_uuid()
t_router = {
'id': t_router_id,
@ -2309,7 +2340,8 @@ class PluginTest(unittest.TestCase,
'region': 'pod_1',
'host': 'fake_host',
'type': 'Open vSwitch agent',
'tunnel_ip': '192.168.1.101'
'tunnel_ip': '192.168.1.101',
'device': 'compute: None'
}}
}
fake_plugin.update_port(
@ -2701,8 +2733,8 @@ class PluginTest(unittest.TestCase,
# test that we can success when bottom pod comes back
fake_plugin.add_router_interface(
q_ctx, t_router_id, {'subnet_id': t_subnet_id})
# bottom dhcp port and bottom interface
self.assertEqual(2, len(BOTTOM1_PORTS))
# bottom dhcp port, bottom interface and bridge gateway port
self.assertEqual(3, len(BOTTOM1_PORTS))
@patch.object(directory, 'get_plugin', new=fake_get_plugin)
@patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
@ -3483,6 +3515,7 @@ class PluginTest(unittest.TestCase,
update_body = {'port': {
'binding:profile': {
constants.PROFILE_REGION: 'pod_1',
constants.PROFILE_DEVICE: 'compute:None',
constants.PROFILE_HOST: 'host1',
constants.PROFILE_AGENT_TYPE: q_constants.AGENT_TYPE_OVS,
constants.PROFILE_TUNNEL_IP: '192.168.1.101'}}}
@ -3495,6 +3528,7 @@ class PluginTest(unittest.TestCase,
update_body = {'port': {
'binding:profile': {
constants.PROFILE_REGION: 'pod_2',
constants.PROFILE_DEVICE: 'compute:None',
constants.PROFILE_HOST: 'host2',
constants.PROFILE_AGENT_TYPE: q_constants.AGENT_TYPE_OVS,
constants.PROFILE_TUNNEL_IP: '192.168.1.102'}}}
@ -3510,6 +3544,83 @@ class PluginTest(unittest.TestCase,
# asynchronous job in pod_1 is registered
mock_setup.assert_called_once_with(t_ctx, 'pod_id_1', t_net_id)
@patch.object(directory, 'get_plugin', new=fake_get_plugin)
@patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
@patch.object(ipam_pluggable_backend.IpamPluggableBackend,
'_allocate_ips_for_port', new=fake_allocate_ips_for_port)
@patch.object(db_base_plugin_common.DbBasePluginCommon,
'_make_subnet_dict', new=fake_make_subnet_dict)
@patch.object(FakeBaseRPCAPI, 'configure_extra_routes', new=mock.Mock)
@patch.object(FakeBaseRPCAPI, 'setup_shadow_ports')
@patch.object(context, 'get_context_from_neutron_context')
def test_add_interface_trigger_l2pop(self, mock_context, mock_setup):
cfg.CONF.set_override('bridge_network_type', 'vxlan',
group='tricircle')
cfg.CONF.set_override('tenant_network_types', ['local', 'vxlan'],
group='tricircle')
self._basic_pod_route_setup()
fake_plugin = FakePlugin()
q_ctx = FakeNeutronContext()
t_ctx = context.get_db_context()
mock_context.return_value = t_ctx
tenant_id = TEST_TENANT_ID
(t_net_id, t_subnet_id,
t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
tenant_id, t_ctx, 'pod_1', 1, network_type=constants.NT_VxLAN)
fake_plugin.add_router_interface(
q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
(t_net_id, t_subnet_id, t_router_id,
b_another_net_id, b_another_subnet_id) = self._prepare_router_test(
tenant_id, t_ctx, 'pod_2', 2, network_type=constants.NT_VxLAN)
fake_plugin.add_router_interface(
q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
b_router_id1 = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_router_id, 'pod_1', constants.RT_ROUTER)
b_router_id2 = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_router_id, 'pod_2', constants.RT_ROUTER)
t_bridge_port_name1 = constants.bridge_port_name % (TEST_TENANT_ID,
b_router_id1)
t_bridge_port_name2 = constants.bridge_port_name % (TEST_TENANT_ID,
b_router_id2)
t_bridge_port_id1 = db_api.get_bottom_mappings_by_top_id(
t_ctx, t_bridge_port_name1, constants.RT_PORT)[0][1]
t_bridge_port_id2 = db_api.get_bottom_mappings_by_top_id(
t_ctx, t_bridge_port_name2, constants.RT_PORT)[0][1]
update_body = {'port': {
'binding:profile': {
constants.PROFILE_REGION: 'pod_1',
constants.PROFILE_DEVICE: 'network:router_gateway',
constants.PROFILE_HOST: 'host1',
constants.PROFILE_AGENT_TYPE: q_constants.AGENT_TYPE_OVS,
constants.PROFILE_TUNNEL_IP: '192.168.1.101'}}}
fake_plugin.update_port(q_ctx, t_bridge_port_id1, update_body)
update_body = {'port': {
'binding:profile': {
constants.PROFILE_REGION: 'pod_2',
constants.PROFILE_DEVICE: 'network:router_gateway',
constants.PROFILE_HOST: 'host2',
constants.PROFILE_AGENT_TYPE: q_constants.AGENT_TYPE_OVS,
constants.PROFILE_TUNNEL_IP: '192.168.1.102'}}}
fake_plugin.update_port(q_ctx, t_bridge_port_id2, update_body)
client = FakeClient('pod_2')
shadow_ports = client.list_ports(
t_ctx, [{'key': 'name', 'comparator': 'eq',
'value': constants.shadow_port_name % t_bridge_port_id1}])
# shadow port for bridge port is created
self.assertEqual(len(shadow_ports), 1)
# shadow port for bridge port is updated to active
self.assertIn(constants.PROFILE_FORCE_UP,
shadow_ports[0]['binding:profile'])
# asynchronous jobs are registered
calls = [mock.call(t_ctx, 'pod_id_2', shadow_ports[0]['network_id']),
mock.call(t_ctx, 'pod_id_1', shadow_ports[0]['network_id'])]
mock_setup.assert_has_calls(calls)
def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine())
for res in RES_LIST:

View File

@ -119,7 +119,7 @@ class FakeCorePlugin(object):
return port['port']
def update_port(self, context, _id, port):
pass
return update_resource('port', False, _id, port['port'])
def get_port(self, context, _id, fields=None):
return get_resource('port', False, _id)
@ -477,10 +477,9 @@ class PluginTest(unittest.TestCase):
b_port.pop('project_id')
self.assertDictEqual(t_ports[i], b_port)
@patch.object(FakeCorePlugin, 'update_port')
@patch.object(t_context, 'get_context_from_neutron_context')
@patch.object(FakeNeutronHandle, 'handle_update')
def test_update_port(self, mock_update, mock_context, mock_core_update):
def test_update_port(self, mock_update, mock_context):
t_net, t_subnet, _, _ = self._prepare_resource()
b_net = self.plugin.get_network(self.context, t_net['id'])
cfg.CONF.set_override('region_name', 'Pod1', 'nova')
@ -505,7 +504,8 @@ class PluginTest(unittest.TestCase):
# network is not vxlan type
mock_update.assert_called_with(
self.context, 'port', port_id,
{'port': {'binding:profile': {'region': 'Pod1'}}})
{'port': {'binding:profile': {'region': 'Pod1',
'device': 'compute:None'}}})
# update network type from vlan to vxlan
update_resource('network', False, b_net['id'],
@ -515,7 +515,8 @@ class PluginTest(unittest.TestCase):
# port vif type is not recognized
mock_update.assert_called_with(
self.context, 'port', port_id,
{'port': {'binding:profile': {'region': 'Pod1'}}})
{'port': {'binding:profile': {'region': 'Pod1',
'device': 'compute:None'}}})
# update network type from fake_vif_type to ovs
update_resource('port', False, port_id,
@ -527,7 +528,8 @@ class PluginTest(unittest.TestCase):
# agent in the specific host is not found
mock_update.assert_called_with(
self.context, 'port', port_id,
{'port': {'binding:profile': {'region': 'Pod1'}}})
{'port': {'binding:profile': {'region': 'Pod1',
'device': 'compute:None'}}})
self.plugin.update_port(self.context, port_id, update_body)
# default p2p mode, update with agent host tunnel ip
@ -536,7 +538,8 @@ class PluginTest(unittest.TestCase):
{'port': {'binding:profile': {'region': 'Pod1',
'tunnel_ip': '192.168.1.101',
'type': 'Open vSwitch agent',
'host': host_id}}})
'host': host_id,
'device': 'compute:None'}}})
cfg.CONF.set_override('cross_pod_vxlan_mode', 'l2gw', 'client')
cfg.CONF.set_override('l2gw_tunnel_ip', '192.168.1.105', 'tricircle')
@ -549,7 +552,8 @@ class PluginTest(unittest.TestCase):
{'port': {'binding:profile': {'region': 'Pod1',
'tunnel_ip': '192.168.1.105',
'type': 'Open vSwitch agent',
'host': host_id}}})
'host': 'fake_host',
'device': 'compute:None'}}})
cfg.CONF.set_override('l2gw_tunnel_ip', '', 'tricircle')
cfg.CONF.set_override('cross_pod_vxlan_mode', 'l2gw', 'client')
@ -557,31 +561,32 @@ class PluginTest(unittest.TestCase):
# l2gw mode, but l2 gateway tunnel ip is not configured
mock_update.assert_called_with(
self.context, 'port', port_id,
{'port': {'binding:profile': {'region': 'Pod1'}}})
{'port': {'binding:profile': {'region': 'Pod1',
'device': 'compute:None'}}})
cfg.CONF.set_override('cross_pod_vxlan_mode', 'noop', 'client')
self.plugin.update_port(self.context, port_id, update_body)
# noop mode
mock_update.assert_called_with(
self.context, 'port', port_id,
{'port': {'binding:profile': {'region': 'Pod1'}}})
{'port': {'binding:profile': {'region': 'Pod1',
'device': 'compute:None'}}})
FakeCorePlugin.supported_extension_aliases = []
self.plugin.update_port(self.context, port_id, update_body)
# core plugin doesn't support "agent" extension
mock_update.assert_called_with(
self.context, 'port', port_id,
{'port': {'binding:profile': {'region': 'Pod1'}}})
{'port': {'binding:profile': {'region': 'Pod1',
'device': 'compute:None'}}})
FakeCorePlugin.supported_extension_aliases = ['agent']
self.plugin.update_port(self.context, port_id,
{'port': {portbindings.PROFILE: {
constants.PROFILE_FORCE_UP: True}}})
mock_core_update.assert_called_with(
self.context, port_id,
{'port': {'status': q_constants.PORT_STATUS_ACTIVE,
portbindings.PROFILE: {},
portbindings.VNIC_TYPE: q_constants.ATTR_NOT_SPECIFIED}})
b_port = get_resource('port', False, port_id)
# port status is update to active
self.assertEqual(q_constants.PORT_STATUS_ACTIVE, b_port['status'])
@patch.object(t_context, 'get_context_from_neutron_context')
def test_update_subnet(self, mock_context):

View File

@ -943,7 +943,8 @@ class XManager(PeriodicTasks):
LOG.debug('Ports %s in pod %s %s',
b_ports, target_pod_id, run_label)
for b_port in b_ports:
if not b_port['device_owner'].startswith('compute:'):
if not self.helper.is_need_top_sync_port(
b_port, cfg.CONF.client.bridge_cidr):
continue
if b_port['device_owner'] == constants.DEVICE_OWNER_SHADOW:
continue