Cross Pod L3 Networking - Part1
Implement cross pod l3 networking functionality. In this first patch, we finish the logic to prepare related resources when user issues router-interface-add request, including bottom router, bottom network, bridge network, etc. In the first step, we only consider the scenario that networks do not cross pods. Each tenant has a cross-pods VLAN network (the bridge network mentioned above) to connect different tenant networks in different pods and we use extra routes to route packets between pods. We ultilize the AZ hint attribute of network to figure out which pod the network belongs to, so we do not need to wait for the creation of the first VM to know which pod to create the bottom network. The configuration of extra routes will be implemented in the second patch. Change-Id: Ia455812732bc825c9367df0e2c4e23c512d8f401
This commit is contained in:
parent
5e886aebda
commit
b502502f2f
|
@ -27,6 +27,8 @@ Q_FLOATING_ALLOCATION_POOL=start=10.100.100.160,end=10.100.100.192
|
|||
|
||||
PUBLIC_NETWORK_GATEWAY=10.100.100.3
|
||||
|
||||
TENANT_VLAN_RANGE=2001:3000
|
||||
PHYSICAL_NETWORK=bridge
|
||||
|
||||
Q_ENABLE_TRICIRCLE=True
|
||||
enable_plugin tricircle https://github.com/openstack/tricircle/ experiment
|
||||
|
|
|
@ -232,7 +232,6 @@ function start_new_neutron_server {
|
|||
iniset $NEUTRON_CONF.$server_index database connection `database_connection_url $Q_DB_NAME$server_index`
|
||||
iniset $NEUTRON_CONF.$server_index nova region_name $region_name
|
||||
iniset $NEUTRON_CONF.$server_index DEFAULT bind_port $q_port
|
||||
iniset $NEUTRON_CONF.$server_index DEFAULT service_plugins ""
|
||||
|
||||
recreate_database $Q_DB_NAME$server_index
|
||||
$NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF.$server_index --config-file /$Q_PLUGIN_CONF_FILE upgrade head
|
||||
|
@ -276,6 +275,9 @@ if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
|
|||
iniset $NEUTRON_CONF client admin_tenant demo
|
||||
iniset $NEUTRON_CONF client auto_refresh_endpoint True
|
||||
iniset $NEUTRON_CONF client top_pod_name $REGION_NAME
|
||||
|
||||
iniset $NEUTRON_CONF tricircle bridge_segmentation_id `echo $TENANT_VLAN_RANGE | awk -F: '{print $2}'`
|
||||
iniset $NEUTRON_CONF tricircle bridge_physical_network $PHYSICAL_NETWORK
|
||||
fi
|
||||
|
||||
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
|
||||
|
|
|
@ -54,7 +54,7 @@ class VolumeController(rest.RestController):
|
|||
pecan.abort(400, _('Availability zone not set in request'))
|
||||
return
|
||||
|
||||
pod = az_ag.get_pod_by_az_tenant(
|
||||
pod, pod_az = az_ag.get_pod_by_az_tenant(
|
||||
context,
|
||||
az_name=kw['volume']['availability_zone'],
|
||||
tenant_id=self.tenant_id)
|
||||
|
|
|
@ -116,19 +116,18 @@ def get_pod_by_az_tenant(context, az_name, tenant_id):
|
|||
'comparator': 'eq',
|
||||
'value': tenant_id}],
|
||||
[])
|
||||
if pod_bindings:
|
||||
for pod_b in pod_bindings:
|
||||
pod = core.get_resource(context,
|
||||
models.Pod,
|
||||
pod_b['pod_id'])
|
||||
if pod['az_name'] == az_name:
|
||||
return pod
|
||||
for pod_b in pod_bindings:
|
||||
pod = core.get_resource(context,
|
||||
models.Pod,
|
||||
pod_b['pod_id'])
|
||||
if pod['az_name'] == az_name:
|
||||
return pod, pod['pod_az_name']
|
||||
|
||||
# TODO(joehuang): schedule one dynamicly in the future
|
||||
# TODO(joehuang): schedule one dynamically in the future
|
||||
filters = [{'key': 'az_name', 'comparator': 'eq', 'value': az_name}]
|
||||
pods = db_api.list_pods(context, filters=filters)
|
||||
for pod in pods:
|
||||
if pod['pod_name'] != '' and az_name != '':
|
||||
if pod['pod_name'] != '':
|
||||
try:
|
||||
with context.session.begin():
|
||||
core.create_resource(
|
||||
|
@ -136,13 +135,13 @@ def get_pod_by_az_tenant(context, az_name, tenant_id):
|
|||
{'id': uuidutils.generate_uuid(),
|
||||
'tenant_id': tenant_id,
|
||||
'pod_id': pod['pod_id']})
|
||||
return pod
|
||||
return pod, pod['pod_az_name']
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Fail to create pod binding: %(exception)s'),
|
||||
{'exception': e})
|
||||
return None
|
||||
return None, None
|
||||
|
||||
return None
|
||||
return None, None
|
||||
|
||||
|
||||
def list_pods_by_tenant(context, tenant_id):
|
||||
|
|
|
@ -419,6 +419,7 @@ class Client(object):
|
|||
--------------------------
|
||||
aggregate -> add_host -> aggregate, host -> none
|
||||
volume -> set_bootable -> volume, flag -> none
|
||||
router -> add_interface -> router, body -> none
|
||||
--------------------------
|
||||
:return: None
|
||||
:raises: EndpointNotAvailable
|
||||
|
|
|
@ -28,6 +28,7 @@ RT_SNAPSHOT = 'snapshot'
|
|||
RT_NETWORK = 'network'
|
||||
RT_SUBNET = 'subnet'
|
||||
RT_PORT = 'port'
|
||||
RT_ROUTER = 'router'
|
||||
|
||||
# version list
|
||||
NOVA_VERSION_V21 = 'v2.1'
|
||||
|
@ -37,3 +38,9 @@ NEUTRON_VERSION_V2 = 'v2'
|
|||
# supported release
|
||||
R_LIBERTY = 'liberty'
|
||||
R_MITAKA = 'mitaka'
|
||||
|
||||
# l3 bridge networking elements
|
||||
bridge_subnet_pool_name = 'bridge_subnet_pool'
|
||||
bridge_net_name = 'bridge_net_%s'
|
||||
bridge_subnet_name = 'bridge_subnet_%s'
|
||||
bridge_port_name = 'bridge_port_%s_%s'
|
||||
|
|
|
@ -90,10 +90,20 @@ class ContextBase(oslo_ctx.RequestContext):
|
|||
ctx_dict = super(ContextBase, self).to_dict()
|
||||
ctx_dict.update({
|
||||
'user_name': self.user_name,
|
||||
'tenant_name': self.tenant_name
|
||||
'tenant_name': self.tenant_name,
|
||||
'tenant_id': self.tenant_id,
|
||||
'project_id': self.project_id
|
||||
})
|
||||
return ctx_dict
|
||||
|
||||
@property
|
||||
def project_id(self):
|
||||
return self.tenant
|
||||
|
||||
@property
|
||||
def tenant_id(self):
|
||||
return self.tenant
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, ctx):
|
||||
return cls(**ctx)
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import eventlet
|
||||
|
||||
import oslo_db.exception as db_exc
|
||||
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
||||
|
||||
def get_or_create_route(t_ctx, q_ctx,
|
||||
project_id, pod, _id, _type, list_ele_method):
|
||||
# use configuration option later
|
||||
route_expire_threshold = 30
|
||||
|
||||
with t_ctx.session.begin():
|
||||
routes = core.query_resource(
|
||||
t_ctx, models.ResourceRouting,
|
||||
[{'key': 'top_id', 'comparator': 'eq', 'value': _id},
|
||||
{'key': 'pod_id', 'comparator': 'eq',
|
||||
'value': pod['pod_id']}], [])
|
||||
if routes:
|
||||
route = routes[0]
|
||||
if route['bottom_id']:
|
||||
return route, False
|
||||
else:
|
||||
route_time = route['updated_at'] or route['created_at']
|
||||
current_time = datetime.datetime.utcnow()
|
||||
delta = current_time - route_time
|
||||
if delta.seconds > route_expire_threshold:
|
||||
# NOTE(zhiyuan) cannot directly remove the route, we have
|
||||
# a race here that other worker is updating this route, we
|
||||
# need to check if the corresponding element has been
|
||||
# created by other worker
|
||||
eles = list_ele_method(t_ctx, q_ctx, pod, _id, _type)
|
||||
if eles:
|
||||
route['bottom_id'] = eles[0]['id']
|
||||
core.update_resource(t_ctx,
|
||||
models.ResourceRouting,
|
||||
route['id'], route)
|
||||
return route, False
|
||||
try:
|
||||
core.delete_resource(t_ctx,
|
||||
models.ResourceRouting,
|
||||
route['id'])
|
||||
except db_exc.ResourceNotFound:
|
||||
pass
|
||||
try:
|
||||
# NOTE(zhiyuan) try/except block inside a with block will cause
|
||||
# problem, so move them out of the block and manually handle the
|
||||
# session context
|
||||
t_ctx.session.begin()
|
||||
route = core.create_resource(t_ctx, models.ResourceRouting,
|
||||
{'top_id': _id,
|
||||
'pod_id': pod['pod_id'],
|
||||
'project_id': project_id,
|
||||
'resource_type': _type})
|
||||
t_ctx.session.commit()
|
||||
return route, True
|
||||
except db_exc.DBDuplicateEntry:
|
||||
t_ctx.session.rollback()
|
||||
return None, False
|
||||
finally:
|
||||
t_ctx.session.close()
|
||||
|
||||
|
||||
def get_or_create_element(t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body,
|
||||
list_ele_method, create_ele_method):
|
||||
# use configuration option later
|
||||
max_tries = 5
|
||||
for _ in xrange(max_tries):
|
||||
route, is_new = get_or_create_route(
|
||||
t_ctx, q_ctx, project_id, pod, ele['id'], _type, list_ele_method)
|
||||
if not route:
|
||||
eventlet.sleep(0)
|
||||
continue
|
||||
if not is_new and not route['bottom_id']:
|
||||
eventlet.sleep(0)
|
||||
continue
|
||||
if not is_new and route['bottom_id']:
|
||||
break
|
||||
if is_new:
|
||||
try:
|
||||
ele = create_ele_method(t_ctx, q_ctx, pod, body, _type)
|
||||
except Exception:
|
||||
with t_ctx.session.begin():
|
||||
try:
|
||||
core.delete_resource(t_ctx,
|
||||
models.ResourceRouting,
|
||||
route['id'])
|
||||
except db_exc.ResourceNotFound:
|
||||
# NOTE(zhiyuan) this is a rare case that other worker
|
||||
# considers the route expires and delete it though it
|
||||
# was just created, maybe caused by out-of-sync time
|
||||
pass
|
||||
raise
|
||||
with t_ctx.session.begin():
|
||||
# NOTE(zhiyuan) it's safe to update route, the bottom network
|
||||
# has been successfully created, so other worker will not
|
||||
# delete this route
|
||||
route['bottom_id'] = ele['id']
|
||||
core.update_resource(t_ctx, models.ResourceRouting,
|
||||
route['id'], route)
|
||||
break
|
||||
if not route:
|
||||
raise Exception('Fail to create %s routing entry' % _type)
|
||||
if not route['bottom_id']:
|
||||
raise Exception('Fail to bind top and bottom %s' % _type)
|
||||
return is_new, route['bottom_id']
|
|
@ -119,7 +119,7 @@ class NeutronResourceHandle(ResourceHandle):
|
|||
support_resource = {'network': LIST | CREATE | DELETE | GET,
|
||||
'subnet': LIST | CREATE | DELETE | GET,
|
||||
'port': LIST | CREATE | DELETE | GET,
|
||||
'router': LIST,
|
||||
'router': LIST | CREATE | ACTION,
|
||||
'security_group': LIST,
|
||||
'security_group_rule': LIST}
|
||||
|
||||
|
@ -176,6 +176,16 @@ class NeutronResourceHandle(ResourceHandle):
|
|||
LOG.debug("Delete %(resource)s %(resource_id)s which not found",
|
||||
{'resource': resource, 'resource_id': resource_id})
|
||||
|
||||
def handle_action(self, cxt, resource, action, *args, **kwargs):
|
||||
try:
|
||||
client = self._get_client(cxt)
|
||||
return getattr(client, '%s_%s' % (action, resource))(*args,
|
||||
**kwargs)
|
||||
except q_exceptions.ConnectionFailed:
|
||||
self.endpoint_url = None
|
||||
raise exceptions.EndpointNotAvailable(
|
||||
'neutron', client.httpclient.endpoint_url)
|
||||
|
||||
|
||||
class NovaResourceHandle(ResourceHandle):
|
||||
service_type = cons.ST_NOVA
|
||||
|
|
|
@ -155,7 +155,7 @@ def upgrade(migrate_engine):
|
|||
cascaded_pods_resource_routing = sql.Table(
|
||||
'cascaded_pods_resource_routing', meta,
|
||||
sql.Column('id', sql.Integer, primary_key=True),
|
||||
sql.Column('top_id', sql.String(length=36), nullable=False),
|
||||
sql.Column('top_id', sql.String(length=127), nullable=False),
|
||||
sql.Column('bottom_id', sql.String(length=36)),
|
||||
sql.Column('pod_id', sql.String(length=64), nullable=False),
|
||||
sql.Column('project_id', sql.String(length=36)),
|
||||
|
|
|
@ -283,7 +283,7 @@ class ResourceRouting(core.ModelBase, core.DictBase, models.TimestampMixin):
|
|||
'resource_type', 'created_at', 'updated_at']
|
||||
|
||||
id = sql.Column('id', sql.Integer, primary_key=True)
|
||||
top_id = sql.Column('top_id', sql.String(length=36), nullable=False)
|
||||
top_id = sql.Column('top_id', sql.String(length=127), nullable=False)
|
||||
bottom_id = sql.Column('bottom_id', sql.String(length=36))
|
||||
pod_id = sql.Column('pod_id', sql.String(length=64),
|
||||
sql.ForeignKey('cascaded_pods.pod_id'),
|
||||
|
|
|
@ -13,13 +13,20 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from oslo_config import cfg
|
||||
import oslo_log.helpers as log_helpers
|
||||
from oslo_log import log
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import exceptions
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import external_net_db
|
||||
from neutron.db import extradhcpopt_db
|
||||
# NOTE(zhiyuan) though not used, this import cannot be removed because Router
|
||||
# relies on one table defined in l3_agentschedulers_db
|
||||
from neutron.db import l3_agentschedulers_db # noqa
|
||||
from neutron.db import l3_db
|
||||
from neutron.db import models_v2
|
||||
from neutron.db import portbindings_db
|
||||
from neutron.db import securitygroups_db
|
||||
|
@ -28,14 +35,33 @@ from neutron.extensions import availability_zone as az_ext
|
|||
|
||||
from sqlalchemy import sql
|
||||
|
||||
from tricircle.common import az_ag
|
||||
import tricircle.common.client as t_client
|
||||
import tricircle.common.constants as t_constants
|
||||
import tricircle.common.context as t_context
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LI
|
||||
import tricircle.common.lock_handle as t_lock
|
||||
import tricircle.db.api as db_api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
||||
|
||||
tricircle_opts = [
|
||||
# TODO(zhiyuan) change to segmentation range
|
||||
# currently all tenants share one VLAN id for bridge networks, should
|
||||
# allocate one isolated segmentation id for each tenant later
|
||||
cfg.IntOpt('bridge_segmentation_id',
|
||||
default=0,
|
||||
help='vlan id of l3 bridge network'),
|
||||
cfg.StrOpt('bridge_physical_network',
|
||||
default='',
|
||||
help='name of l3 bridge physical network')
|
||||
]
|
||||
tricircle_opt_group = cfg.OptGroup('tricircle')
|
||||
cfg.CONF.register_group(tricircle_opt_group)
|
||||
cfg.CONF.register_opts(tricircle_opts, group=tricircle_opt_group)
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -43,18 +69,25 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
securitygroups_db.SecurityGroupDbMixin,
|
||||
external_net_db.External_net_db_mixin,
|
||||
portbindings_db.PortBindingMixin,
|
||||
extradhcpopt_db.ExtraDhcpOptMixin):
|
||||
extradhcpopt_db.ExtraDhcpOptMixin,
|
||||
l3_db.L3_NAT_dbonly_mixin):
|
||||
|
||||
__native_bulk_support = True
|
||||
__native_pagination_support = True
|
||||
__native_sorting_support = True
|
||||
|
||||
supported_extension_aliases = ["quotas",
|
||||
# NOTE(zhiyuan) we don't support "agent" and "availability_zone" extensions
|
||||
# and also it's no need for us to support, but "network_availability_zone"
|
||||
# depends on these two extensions so we need to register them
|
||||
supported_extension_aliases = ["agent",
|
||||
"quotas",
|
||||
"extra_dhcp_opt",
|
||||
"binding",
|
||||
"security-group",
|
||||
"external-net",
|
||||
"network_availability_zone"]
|
||||
"availability_zone",
|
||||
"network_availability_zone",
|
||||
"router"]
|
||||
|
||||
def __init__(self):
|
||||
super(TricirclePlugin, self).__init__()
|
||||
|
@ -85,36 +118,63 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
return
|
||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||
with context.session.begin():
|
||||
pods = core.query_resource(t_ctx, models.PodMap, [], [])
|
||||
pods = core.query_resource(t_ctx, models.Pod, [], [])
|
||||
az_set = set(az_list)
|
||||
known_az_set = set([pod['pod_name'] for pod in pods])
|
||||
known_az_set = set([pod['az_name'] for pod in pods])
|
||||
diff = az_set - known_az_set
|
||||
if diff:
|
||||
raise az_ext.AvailabilityZoneNotFound(
|
||||
availability_zone=diff.pop())
|
||||
|
||||
@staticmethod
|
||||
def _extend_availability_zone(net_res, net_db):
|
||||
net_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list(
|
||||
net_db[az_ext.AZ_HINTS])
|
||||
|
||||
common_db_mixin.CommonDbMixin.register_dict_extend_funcs(
|
||||
attributes.NETWORKS, ['_extend_availability_zone'])
|
||||
|
||||
@property
|
||||
def _core_plugin(self):
|
||||
return self
|
||||
|
||||
def create_network(self, context, network):
|
||||
net_data = network['network']
|
||||
res = super(TricirclePlugin, self).create_network(context, network)
|
||||
if az_ext.AZ_HINTS in net_data:
|
||||
self._validate_availability_zones(context,
|
||||
net_data[az_ext.AZ_HINTS])
|
||||
az_hints = az_ext.convert_az_list_to_string(
|
||||
net_data[az_ext.AZ_HINTS])
|
||||
net_data[az_ext.AZ_HINTS] = az_hints
|
||||
return super(TricirclePlugin, self).create_network(context, network)
|
||||
update_res = super(TricirclePlugin, self).update_network(
|
||||
context, res['id'], {'network': {az_ext.AZ_HINTS: az_hints}})
|
||||
res[az_ext.AZ_HINTS] = update_res[az_ext.AZ_HINTS]
|
||||
return res
|
||||
|
||||
def delete_network(self, context, network_id):
|
||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||
try:
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, network_id, 'network')
|
||||
t_ctx, network_id, t_constants.RT_NETWORK)
|
||||
for mapping in mappings:
|
||||
pod_name = mapping[0]['pod_name']
|
||||
bottom_network_id = mapping[1]
|
||||
self._get_client(pod_name).delete_networks(
|
||||
t_ctx, bottom_network_id)
|
||||
with t_ctx.session.begin():
|
||||
core.delete_resources(
|
||||
t_ctx, models.ResourceRouting,
|
||||
filters=[{'key': 'top_id', 'comparator': 'eq',
|
||||
'value': network_id},
|
||||
{'key': 'pod_id', 'comparator': 'eq',
|
||||
'value': mapping[0]['pod_id']}])
|
||||
except Exception:
|
||||
raise
|
||||
with t_ctx.session.begin():
|
||||
core.delete_resources(t_ctx, models.ResourceRouting,
|
||||
filters=[{'key': 'top_id',
|
||||
'comparator': 'eq',
|
||||
'value': network_id}])
|
||||
super(TricirclePlugin, self).delete_network(context, network_id)
|
||||
|
||||
def update_network(self, context, network_id, network):
|
||||
|
@ -128,12 +188,19 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||
try:
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, subnet_id, 'network')
|
||||
t_ctx, subnet_id, t_constants.RT_SUBNET)
|
||||
for mapping in mappings:
|
||||
pod_name = mapping[0]['pod_name']
|
||||
bottom_subnet_id = mapping[1]
|
||||
self._get_client(pod_name).delete_subnets(
|
||||
t_ctx, bottom_subnet_id)
|
||||
with t_ctx.session.begin():
|
||||
core.delete_resources(
|
||||
t_ctx, models.ResourceRouting,
|
||||
filters=[{'key': 'top_id', 'comparator': 'eq',
|
||||
'value': subnet_id},
|
||||
{'key': 'pod_id', 'comparator': 'eq',
|
||||
'value': mapping[0]['pod_id']}])
|
||||
except Exception:
|
||||
raise
|
||||
super(TricirclePlugin, self).delete_subnet(context, subnet_id)
|
||||
|
@ -145,11 +212,19 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
def create_port(self, context, port):
|
||||
return super(TricirclePlugin, self).create_port(context, port)
|
||||
|
||||
def delete_port(self, context, port_id):
|
||||
def update_port(self, context, port_id, port):
|
||||
# TODO(zhiyuan) handle bottom port update
|
||||
# be careful that l3_db will call update_port to update device_id of
|
||||
# router interface, we cannot directly update bottom port in this case,
|
||||
# otherwise we will fail when attaching bottom port to bottom router
|
||||
# because its device_id is not empty
|
||||
return super(TricirclePlugin, self).update_port(context, port_id, port)
|
||||
|
||||
def delete_port(self, context, port_id, l3_port_check=True):
|
||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||
try:
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(t_ctx,
|
||||
port_id, 'port')
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, port_id, t_constants.RT_PORT)
|
||||
if mappings:
|
||||
pod_name = mappings[0][0]['pod_name']
|
||||
bottom_port_id = mappings[0][1]
|
||||
|
@ -157,16 +232,17 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
t_ctx, bottom_port_id)
|
||||
except Exception:
|
||||
raise
|
||||
with t_ctx.session.begin():
|
||||
core.delete_resources(t_ctx, models.ResourceRouting,
|
||||
filters=[{'key': 'top_id',
|
||||
'comparator': 'eq',
|
||||
'value': port_id}])
|
||||
super(TricirclePlugin, self).delete_port(context, port_id)
|
||||
|
||||
def update_port(self, context, port_id, port):
|
||||
return super(TricirclePlugin, self).update_port(
|
||||
context, port_id, port)
|
||||
|
||||
def get_port(self, context, port_id, fields=None):
|
||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(t_ctx,
|
||||
port_id, 'port')
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, port_id, t_constants.RT_PORT)
|
||||
if mappings:
|
||||
pod_name = mappings[0][0]['pod_name']
|
||||
bottom_port_id = mappings[0][1]
|
||||
|
@ -181,7 +257,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
|
||||
bottom_top_map = {}
|
||||
with t_ctx.session.begin():
|
||||
for resource in ('subnet', 'network'):
|
||||
for resource in (t_constants.RT_SUBNET, t_constants.RT_NETWORK,
|
||||
t_constants.RT_ROUTER):
|
||||
route_filters = [{'key': 'resource_type',
|
||||
'comparator': 'eq',
|
||||
'value': resource}]
|
||||
|
@ -191,13 +268,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
if route['bottom_id']:
|
||||
bottom_top_map[
|
||||
route['bottom_id']] = route['top_id']
|
||||
if 'network_id' in port and port['network_id'] in bottom_top_map:
|
||||
port['network_id'] = bottom_top_map[port['network_id']]
|
||||
if 'fixed_ips' in port:
|
||||
for ip in port['fixed_ips']:
|
||||
if ip['subnet_id'] in bottom_top_map:
|
||||
ip['subnet_id'] = bottom_top_map[ip['subnet_id']]
|
||||
|
||||
self._map_port_from_bottom_to_top(port, bottom_top_map)
|
||||
return port
|
||||
else:
|
||||
return super(TricirclePlugin, self).get_port(context,
|
||||
|
@ -265,22 +336,38 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
return ret
|
||||
|
||||
@staticmethod
|
||||
def _map_ports_from_bottom_to_top(res, bottom_top_map):
|
||||
def _map_port_from_bottom_to_top(port, bottom_top_map):
|
||||
if 'network_id' in port and port['network_id'] in bottom_top_map:
|
||||
port['network_id'] = bottom_top_map[port['network_id']]
|
||||
if 'fixed_ips' in port:
|
||||
for ip in port['fixed_ips']:
|
||||
if ip['subnet_id'] in bottom_top_map:
|
||||
ip['subnet_id'] = bottom_top_map[ip['subnet_id']]
|
||||
if 'device_id' in port and port['device_id'] in bottom_top_map:
|
||||
port['device_id'] = bottom_top_map[port['device_id']]
|
||||
|
||||
@staticmethod
|
||||
def _map_ports_from_bottom_to_top(ports, bottom_top_map):
|
||||
# TODO(zhiyuan) judge if it's fine to remove unmapped port
|
||||
port_list = []
|
||||
for port in res['ports']:
|
||||
for port in ports:
|
||||
if port['id'] not in bottom_top_map:
|
||||
continue
|
||||
port['id'] = bottom_top_map[port['id']]
|
||||
if 'network_id' in port and port['network_id'] in bottom_top_map:
|
||||
port['network_id'] = bottom_top_map[port['network_id']]
|
||||
if 'fixed_ips' in port:
|
||||
for ip in port['fixed_ips']:
|
||||
if ip['subnet_id'] in bottom_top_map:
|
||||
ip['subnet_id'] = bottom_top_map[ip['subnet_id']]
|
||||
TricirclePlugin._map_port_from_bottom_to_top(port, bottom_top_map)
|
||||
port_list.append(port)
|
||||
del res['ports']
|
||||
res['ports'] = port_list
|
||||
return port_list
|
||||
|
||||
@staticmethod
|
||||
def _get_map_filter_ids(key, value, top_bottom_map):
|
||||
if key in ('id', 'network_id', 'device_id'):
|
||||
id_list = []
|
||||
for _id in value:
|
||||
if _id in top_bottom_map:
|
||||
id_list.append(top_bottom_map[_id])
|
||||
else:
|
||||
id_list.append(_id)
|
||||
return id_list
|
||||
|
||||
def _get_ports_from_pod_with_number(self, context,
|
||||
current_pod, number, last_port_id,
|
||||
|
@ -297,21 +384,19 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
if filters:
|
||||
_filters = dict(filters)
|
||||
for key, value in _filters:
|
||||
if key == 'id' or key == 'network_id':
|
||||
id_list = []
|
||||
for _id in value:
|
||||
if _id in top_bottom_map:
|
||||
id_list.append(top_bottom_map[_id])
|
||||
else:
|
||||
id_list.append(_id)
|
||||
_filters['id'] = id_list
|
||||
id_list = self._get_map_filter_ids(key, value, top_bottom_map)
|
||||
if id_list:
|
||||
_filters[key] = id_list
|
||||
params.update(_filters)
|
||||
if last_port_id:
|
||||
# map top id to bottom id in request
|
||||
params['marker'] = top_bottom_map[last_port_id]
|
||||
res = q_client.get(q_client.ports_path, params=params)
|
||||
# map bottom id to top id in client response
|
||||
self._map_ports_from_bottom_to_top(res, bottom_top_map)
|
||||
mapped_port_list = self._map_ports_from_bottom_to_top(res['ports'],
|
||||
bottom_top_map)
|
||||
del res['ports']
|
||||
res['ports'] = mapped_port_list
|
||||
|
||||
if len(res['ports']) == number:
|
||||
return res
|
||||
|
@ -340,7 +425,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
with t_ctx.session.begin():
|
||||
bottom_top_map = {}
|
||||
top_bottom_map = {}
|
||||
for resource in ('port', 'subnet', 'network'):
|
||||
for resource in (t_constants.RT_PORT, t_constants.RT_SUBNET,
|
||||
t_constants.RT_NETWORK, t_constants.RT_ROUTER):
|
||||
route_filters = [{'key': 'resource_type',
|
||||
'comparator': 'eq',
|
||||
'value': resource}]
|
||||
|
@ -354,8 +440,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
|
||||
if limit:
|
||||
if marker:
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(t_ctx,
|
||||
marker, 'port')
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, marker, t_constants.RT_PORT)
|
||||
# NOTE(zhiyuan) if mapping exists, we retrieve port information
|
||||
# from bottom, otherwise from top
|
||||
if mappings:
|
||||
|
@ -395,13 +481,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
_filters = []
|
||||
if filters:
|
||||
for key, value in filters.iteritems():
|
||||
if key == 'id' or key == 'network_id':
|
||||
id_list = []
|
||||
for _id in value:
|
||||
if _id in top_bottom_map:
|
||||
id_list.append(top_bottom_map[_id])
|
||||
else:
|
||||
id_list.append(_id)
|
||||
id_list = self._get_map_filter_ids(key, value,
|
||||
top_bottom_map)
|
||||
if id_list:
|
||||
_filters.append({'key': key,
|
||||
'comparator': 'eq',
|
||||
'value': id_list})
|
||||
|
@ -411,7 +493,358 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
|||
'value': value})
|
||||
client = self._get_client(pod['pod_name'])
|
||||
ret.extend(client.list_ports(t_ctx, filters=_filters))
|
||||
self._map_ports_from_bottom_to_top({'ports': ret}, bottom_top_map)
|
||||
ret = self._map_ports_from_bottom_to_top(ret, bottom_top_map)
|
||||
ret.extend(self._get_ports_from_top(context, top_bottom_map,
|
||||
filters))
|
||||
return ret
|
||||
|
||||
def create_router(self, context, router):
|
||||
return super(TricirclePlugin, self).create_router(context, router)
|
||||
|
||||
def delete_router(self, context, _id):
|
||||
super(TricirclePlugin, self).delete_router(context, _id)
|
||||
|
||||
def _judge_network_across_pods(self, context, interface, add_by_port):
|
||||
if add_by_port:
|
||||
port = self.get_port(context, interface['port_id'])
|
||||
net_id = port['network_id']
|
||||
else:
|
||||
subnet = self.get_subnet(context, interface['subnet_id'])
|
||||
net_id = subnet['network_id']
|
||||
network = self.get_network(context, net_id)
|
||||
if len(network.get(az_ext.AZ_HINTS, [])) != 1:
|
||||
# Currently not support cross pods l3 networking so
|
||||
# raise an exception here
|
||||
raise Exception('Cross pods L3 networking not support')
|
||||
return network[az_ext.AZ_HINTS][0], network
|
||||
|
||||
def _prepare_top_element(self, t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
def list_resources(t_ctx_, q_ctx_, pod_, _id_, _type_):
|
||||
return getattr(self, 'get_%ss' % _type_)(
|
||||
q_ctx_, filters={'name': _id_})
|
||||
|
||||
def create_resources(t_ctx_, q_ctx_, pod_, body_, _type_):
|
||||
return getattr(self, 'create_%s' % _type_)(q_ctx_, body_)
|
||||
|
||||
return t_lock.get_or_create_element(
|
||||
t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body,
|
||||
list_resources, create_resources)
|
||||
|
||||
def _prepare_bottom_element(self, t_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
def list_resources(t_ctx_, q_ctx, pod_, _id_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
return client.list_resources(_type_, t_ctx_, [{'key': 'name',
|
||||
'comparator': 'eq',
|
||||
'value': _id_}])
|
||||
|
||||
def create_resources(t_ctx_, q_ctx, pod_, body_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
return client.create_resources(_type_, t_ctx_, body_)
|
||||
|
||||
return t_lock.get_or_create_element(
|
||||
t_ctx, None, # we don't need neutron context, so pass None
|
||||
project_id, pod, ele, _type, body,
|
||||
list_resources, create_resources)
|
||||
|
||||
def _get_bridge_subnet_pool_id(self, t_ctx, q_ctx, project_id, pod):
|
||||
pool_name = t_constants.bridge_subnet_pool_name
|
||||
pool_cidr = '100.0.0.0/8'
|
||||
pool_ele = {'id': pool_name}
|
||||
body = {'subnetpool': {'tenant_id': project_id,
|
||||
'name': pool_name,
|
||||
'shared': True,
|
||||
'is_default': False,
|
||||
'prefixes': [pool_cidr]}}
|
||||
|
||||
is_admin = q_ctx.is_admin
|
||||
q_ctx.is_admin = True
|
||||
_, pool_id = self._prepare_top_element(t_ctx, q_ctx, project_id, pod,
|
||||
pool_ele, 'subnetpool', body)
|
||||
q_ctx.is_admin = is_admin
|
||||
|
||||
return pool_id
|
||||
|
||||
def _get_bridge_network_subnet(self, t_ctx, q_ctx,
|
||||
project_id, pod, pool_id):
|
||||
bridge_net_name = t_constants.bridge_net_name % project_id
|
||||
bridge_net_ele = {'id': bridge_net_name}
|
||||
bridge_subnet_name = t_constants.bridge_subnet_name % project_id
|
||||
bridge_subnet_ele = {'id': bridge_subnet_name}
|
||||
|
||||
is_admin = q_ctx.is_admin
|
||||
q_ctx.is_admin = True
|
||||
|
||||
net_body = {'network': {'tenant_id': project_id,
|
||||
'name': bridge_net_name,
|
||||
'shared': False,
|
||||
'admin_state_up': True}}
|
||||
_, net_id = self._prepare_top_element(
|
||||
t_ctx, q_ctx, project_id, pod, bridge_net_ele, 'network', net_body)
|
||||
subnet_body = {
|
||||
'subnet': {
|
||||
'network_id': net_id,
|
||||
'name': bridge_subnet_name,
|
||||
'prefixlen': 24,
|
||||
'ip_version': 4,
|
||||
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
|
||||
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
|
||||
'host_routes': attributes.ATTR_NOT_SPECIFIED,
|
||||
'cidr': attributes.ATTR_NOT_SPECIFIED,
|
||||
'subnetpool_id': pool_id,
|
||||
'enable_dhcp': False,
|
||||
'tenant_id': project_id
|
||||
}
|
||||
}
|
||||
_, subnet_id = self._prepare_top_element(
|
||||
t_ctx, q_ctx,
|
||||
project_id, pod, bridge_subnet_ele, 'subnet', subnet_body)
|
||||
|
||||
q_ctx.is_admin = is_admin
|
||||
|
||||
net = self.get_network(q_ctx, net_id)
|
||||
subnet = self.get_subnet(q_ctx, subnet_id)
|
||||
|
||||
return net, subnet
|
||||
|
||||
def _get_bottom_elements(self, t_ctx, project_id, pod,
|
||||
t_net, t_subnet, t_port):
|
||||
net_body = {
|
||||
'network': {
|
||||
'tenant_id': project_id,
|
||||
'name': t_net['id'],
|
||||
'admin_state_up': True
|
||||
}
|
||||
}
|
||||
_, net_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_net, 'network', net_body)
|
||||
subnet_body = {
|
||||
'subnet': {
|
||||
'network_id': net_id,
|
||||
'name': t_subnet['id'],
|
||||
'ip_version': t_subnet['ip_version'],
|
||||
'cidr': t_subnet['cidr'],
|
||||
'gateway_ip': t_subnet['gateway_ip'],
|
||||
'allocation_pools': t_subnet['allocation_pools'],
|
||||
'enable_dhcp': t_subnet['enable_dhcp'],
|
||||
'tenant_id': project_id
|
||||
}
|
||||
}
|
||||
_, subnet_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_subnet, 'subnet', subnet_body)
|
||||
port_body = {
|
||||
'port': {
|
||||
'network_id': net_id,
|
||||
'name': t_port['id'],
|
||||
'admin_state_up': True,
|
||||
'fixed_ips': [
|
||||
{'subnet_id': subnet_id,
|
||||
'ip_address': t_port['fixed_ips'][0]['ip_address']}],
|
||||
'mac_address': t_port['mac_address']
|
||||
}
|
||||
}
|
||||
_, port_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_port, 'port', port_body)
|
||||
return port_id
|
||||
|
||||
def _get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
|
||||
t_net_id, b_router_id):
|
||||
bridge_port_name = t_constants.bridge_port_name % (project_id,
|
||||
b_router_id)
|
||||
bridge_port_ele = {'id': bridge_port_name}
|
||||
port_body = {
|
||||
'port': {
|
||||
'tenant_id': project_id,
|
||||
'admin_state_up': True,
|
||||
'name': bridge_port_name,
|
||||
'network_id': t_net_id,
|
||||
'device_id': '',
|
||||
'device_owner': '',
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'fixed_ips': attributes.ATTR_NOT_SPECIFIED
|
||||
}
|
||||
}
|
||||
_, port_id = self._prepare_top_element(
|
||||
t_ctx, q_ctx, project_id, pod, bridge_port_ele, 'port', port_body)
|
||||
return self.get_port(q_ctx, port_id)
|
||||
|
||||
def _get_bottom_bridge_elements(self, q_ctx, project_id,
|
||||
pod, t_net, t_subnet, t_port):
|
||||
t_ctx = t_context.get_context_from_neutron_context(q_ctx)
|
||||
|
||||
phy_net = cfg.CONF.tricircle.bridge_physical_network
|
||||
vlan = cfg.CONF.tricircle.bridge_segmentation_id
|
||||
net_body = {'network': {'tenant_id': project_id,
|
||||
'name': t_net['id'],
|
||||
'provider:network_type': 'vlan',
|
||||
'provider:physical_network': phy_net,
|
||||
'provider:segmentation_id': vlan,
|
||||
'admin_state_up': True}}
|
||||
_, b_net_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_net, 'network', net_body)
|
||||
|
||||
subnet_body = {'subnet': {'network_id': b_net_id,
|
||||
'name': t_subnet['id'],
|
||||
'ip_version': 4,
|
||||
'cidr': t_subnet['cidr'],
|
||||
'enable_dhcp': False,
|
||||
'tenant_id': project_id}}
|
||||
_, b_subnet_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_subnet, 'subnet', subnet_body)
|
||||
|
||||
port_body = {
|
||||
'port': {
|
||||
'tenant_id': project_id,
|
||||
'admin_state_up': True,
|
||||
'name': t_port['id'],
|
||||
'network_id': b_net_id,
|
||||
'fixed_ips': [
|
||||
{'subnet_id': b_subnet_id,
|
||||
'ip_address': t_port['fixed_ips'][0]['ip_address']}]
|
||||
}
|
||||
}
|
||||
is_new, b_port_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_port, 'port', port_body)
|
||||
|
||||
return is_new, b_port_id
|
||||
|
||||
# NOTE(zhiyuan) the origin implementation in l3_db uses port returned from
|
||||
# get_port in core plugin to check, change it to base plugin, since only
|
||||
# top port information should be checked.
|
||||
def _check_router_port(self, context, port_id, device_id):
|
||||
port = super(TricirclePlugin, self).get_port(context, port_id)
|
||||
if port['device_id'] != device_id:
|
||||
raise exceptions.PortInUse(net_id=port['network_id'],
|
||||
port_id=port['id'],
|
||||
device_id=port['device_id'])
|
||||
if not port['fixed_ips']:
|
||||
msg = _('Router port must have at least one fixed IP')
|
||||
raise exceptions.BadRequest(resource='router', msg=msg)
|
||||
return port
|
||||
|
||||
def _unbound_top_interface(self, context, router_id, port_id):
|
||||
super(TricirclePlugin, self).update_port(
|
||||
context, port_id, {'port': {'device_id': '',
|
||||
'device_owner': ''}})
|
||||
with context.session.begin():
|
||||
query = context.session.query(l3_db.RouterPort)
|
||||
query.filter_by(port_id=port_id, router_id=router_id).delete()
|
||||
|
||||
def add_router_interface(self, context, router_id, interface_info):
|
||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||
|
||||
router = self._get_router(context, router_id)
|
||||
project_id = router['tenant_id']
|
||||
admin_project_id = 'admin_project_id'
|
||||
add_by_port, _ = self._validate_interface_info(interface_info)
|
||||
# make sure network not crosses pods
|
||||
# TODO(zhiyuan) support cross-pod tenant network
|
||||
az, t_net = self._judge_network_across_pods(
|
||||
context, interface_info, add_by_port)
|
||||
b_pod, b_az = az_ag.get_pod_by_az_tenant(t_ctx, az, project_id)
|
||||
t_pod = None
|
||||
for pod in db_api.list_pods(t_ctx):
|
||||
if not pod['az_name']:
|
||||
t_pod = pod
|
||||
assert t_pod
|
||||
|
||||
router_body = {'router': {'name': router_id,
|
||||
'distributed': False}}
|
||||
_, b_router_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, b_pod, router, 'router', router_body)
|
||||
|
||||
pool_id = self._get_bridge_subnet_pool_id(
|
||||
t_ctx, context, admin_project_id, t_pod)
|
||||
t_bridge_net, t_bridge_subnet = self._get_bridge_network_subnet(
|
||||
t_ctx, context, project_id, t_pod, pool_id)
|
||||
t_bridge_port = self._get_bridge_interface(
|
||||
t_ctx, context, project_id, t_pod, t_bridge_net['id'],
|
||||
b_router_id)
|
||||
|
||||
is_new, b_bridge_port_id = self._get_bottom_bridge_elements(
|
||||
context, project_id, b_pod, t_bridge_net, t_bridge_subnet,
|
||||
t_bridge_port)
|
||||
|
||||
# NOTE(zhiyuan) subnet pool, network, subnet are reusable resource,
|
||||
# we decide not to remove them when operation fails, so before adding
|
||||
# router interface, no clearing is needed.
|
||||
is_success = False
|
||||
for _ in xrange(2):
|
||||
try:
|
||||
return_info = super(TricirclePlugin,
|
||||
self).add_router_interface(
|
||||
context, router_id, interface_info)
|
||||
is_success = True
|
||||
except exceptions.PortInUse:
|
||||
# NOTE(zhiyuan) so top interface is already bound to top
|
||||
# router, we need to check if bottom interface is bound.
|
||||
|
||||
# safe to get port_id since only adding interface by port will
|
||||
# get PortInUse exception
|
||||
t_port_id = interface_info['port_id']
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, t_port_id, t_constants.RT_PORT)
|
||||
if not mappings:
|
||||
# bottom interface does not exists, ignore this exception
|
||||
# and continue to create bottom interface
|
||||
self._unbound_top_interface(context, router_id, t_port_id)
|
||||
else:
|
||||
pod, b_port_id = mappings[0]
|
||||
b_port = self._get_client(pod['pod_name']).get_ports(
|
||||
t_ctx, b_port_id)
|
||||
if not b_port['device_id']:
|
||||
# bottom interface exists but is not bound, ignore this
|
||||
# exception and continue to bind bottom interface
|
||||
self._unbound_top_interface(context, router_id,
|
||||
t_port_id)
|
||||
else:
|
||||
# bottom interface already bound, re-raise exception
|
||||
raise
|
||||
if is_success:
|
||||
break
|
||||
|
||||
if not is_success:
|
||||
raise Exception()
|
||||
|
||||
t_port_id = return_info['port_id']
|
||||
t_port = self.get_port(context, t_port_id)
|
||||
t_subnet = self.get_subnet(context,
|
||||
t_port['fixed_ips'][0]['subnet_id'])
|
||||
|
||||
try:
|
||||
b_port_id = self._get_bottom_elements(
|
||||
t_ctx, project_id, b_pod, t_net, t_subnet, t_port)
|
||||
except Exception:
|
||||
# NOTE(zhiyuan) remove_router_interface will delete top interface.
|
||||
# if mapping is already built between top and bottom interface,
|
||||
# bottom interface and resource routing entry will also be deleted.
|
||||
#
|
||||
# but remove_router_interface may fail when deleting bottom
|
||||
# interface, in this case, top and bottom interfaces are both left,
|
||||
# user needs to manually delete top interface.
|
||||
super(TricirclePlugin, self).remove_router_interface(
|
||||
context, router_id, interface_info)
|
||||
raise
|
||||
|
||||
client = self._get_client(b_pod['pod_name'])
|
||||
try:
|
||||
if is_new:
|
||||
# only attach bridge port the first time
|
||||
client.action_routers(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id})
|
||||
else:
|
||||
# still need to check if the bridge port is bound
|
||||
port = client.get_ports(t_ctx, b_bridge_port_id)
|
||||
if not port.get('device_id'):
|
||||
client.action_routers(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id})
|
||||
client.action_routers(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_port_id})
|
||||
except Exception:
|
||||
super(TricirclePlugin, self).remove_router_interface(
|
||||
context, router_id, interface_info)
|
||||
raise
|
||||
|
||||
return return_info
|
||||
|
|
|
@ -13,18 +13,15 @@
|
|||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
import eventlet
|
||||
|
||||
import pecan
|
||||
from pecan import expose
|
||||
from pecan import rest
|
||||
|
||||
import oslo_db.exception as db_exc
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.common import az_ag
|
||||
import tricircle.common.client as t_client
|
||||
from tricircle.common import constants
|
||||
import tricircle.common.context as t_context
|
||||
import tricircle.common.lock_handle as t_lock
|
||||
import tricircle.db.api as db_api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
|
@ -42,62 +39,15 @@ class ServerController(rest.RestController):
|
|||
return self.clients[pod_name]
|
||||
|
||||
def _get_or_create_route(self, context, pod, _id, _type):
|
||||
# use configuration option later
|
||||
route_expire_threshold = 30
|
||||
def list_resources(t_ctx, q_ctx, pod_, _id_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
return client.list_resources(_type_, t_ctx, [{'key': 'name',
|
||||
'comparator': 'eq',
|
||||
'value': _id_}])
|
||||
|
||||
with context.session.begin():
|
||||
routes = core.query_resource(
|
||||
context, models.ResourceRouting,
|
||||
[{'key': 'top_id', 'comparator': 'eq', 'value': _id},
|
||||
{'key': 'pod_id', 'comparator': 'eq',
|
||||
'value': pod['pod_id']}], [])
|
||||
if routes:
|
||||
route = routes[0]
|
||||
if route['bottom_id']:
|
||||
return route, False
|
||||
else:
|
||||
route_time = route['updated_at'] or route['created_at']
|
||||
current_time = datetime.datetime.utcnow()
|
||||
delta = current_time - route_time
|
||||
if delta.seconds > route_expire_threshold:
|
||||
# NOTE(zhiyuan) cannot directly remove the route, we
|
||||
# have a race that other worker is updating this route
|
||||
# with bottom id, we need to check if the bottom
|
||||
# element has been created by other worker
|
||||
client = self._get_client(pod['pod_name'])
|
||||
bottom_eles = client.list_resources(
|
||||
_type, context, [{'key': 'name',
|
||||
'comparator': 'eq',
|
||||
'value': _id}])
|
||||
if bottom_eles:
|
||||
route['bottom_id'] = bottom_eles[0]['id']
|
||||
core.update_resource(context,
|
||||
models.ResourceRouting,
|
||||
route['id'], route)
|
||||
return route, False
|
||||
try:
|
||||
core.delete_resource(context,
|
||||
models.ResourceRouting,
|
||||
route['id'])
|
||||
except db_exc.ResourceNotFound:
|
||||
pass
|
||||
try:
|
||||
# NOTE(zhiyuan) try/except block inside a with block will cause
|
||||
# problem, so move them out of the block and manually handle the
|
||||
# session context
|
||||
context.session.begin()
|
||||
route = core.create_resource(context, models.ResourceRouting,
|
||||
{'top_id': _id,
|
||||
'pod_id': pod['pod_id'],
|
||||
'project_id': self.project_id,
|
||||
'resource_type': _type})
|
||||
context.session.commit()
|
||||
return route, True
|
||||
except db_exc.DBDuplicateEntry:
|
||||
context.session.rollback()
|
||||
return None, False
|
||||
finally:
|
||||
context.session.close()
|
||||
return t_lock.get_or_create_route(context, None,
|
||||
self.project_id, pod, _id, _type,
|
||||
list_resources)
|
||||
|
||||
def _get_create_network_body(self, network):
|
||||
body = {
|
||||
|
@ -163,47 +113,21 @@ class ServerController(rest.RestController):
|
|||
return body
|
||||
|
||||
def _prepare_neutron_element(self, context, pod, ele, _type, body):
|
||||
client = self._get_client(pod['pod_name'])
|
||||
# use configuration option later
|
||||
max_tries = 5
|
||||
for _ in xrange(max_tries):
|
||||
route, is_new = self._get_or_create_route(context,
|
||||
pod, ele['id'], _type)
|
||||
if not route:
|
||||
eventlet.sleep(0)
|
||||
continue
|
||||
if not is_new and not route['bottom_id']:
|
||||
eventlet.sleep(0)
|
||||
continue
|
||||
if is_new:
|
||||
try:
|
||||
bottom_ele = client.create_resources(_type, context, body)
|
||||
except Exception:
|
||||
with context.session.begin():
|
||||
try:
|
||||
core.delete_resource(context,
|
||||
models.ResourceRouting,
|
||||
route['id'])
|
||||
except db_exc.ResourceNotFound:
|
||||
# NOTE(zhiyuan) this is a rare case that other
|
||||
# worker considers the route expires and delete it
|
||||
# though it was just created, maybe caused by
|
||||
# out-of-sync time
|
||||
pass
|
||||
raise
|
||||
with context.session.begin():
|
||||
# NOTE(zhiyuan) it's safe to update route, the bottom
|
||||
# network has been successfully created, so other worker
|
||||
# will not delete this route
|
||||
route['bottom_id'] = bottom_ele['id']
|
||||
core.update_resource(context, models.ResourceRouting,
|
||||
route['id'], route)
|
||||
break
|
||||
if not route:
|
||||
raise Exception('Fail to create %s routing entry' % _type)
|
||||
if not route['bottom_id']:
|
||||
raise Exception('Fail to bind top and bottom %s' % _type)
|
||||
return route['bottom_id']
|
||||
def list_resources(t_ctx, q_ctx, pod_, _id_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
return client.list_resources(_type_, t_ctx, [{'key': 'name',
|
||||
'comparator': 'eq',
|
||||
'value': _id_}])
|
||||
|
||||
def create_resources(t_ctx, q_ctx, pod_, body_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
return client.create_resources(_type_, t_ctx, body_)
|
||||
|
||||
_, ele_id = t_lock.get_or_create_element(
|
||||
context, None, # we don't need neutron context, so pass None
|
||||
self.project_id, pod, ele, _type, body,
|
||||
list_resources, create_resources)
|
||||
return ele_id
|
||||
|
||||
def _handle_network(self, context, pod, net, subnets, port=None):
|
||||
# network
|
||||
|
@ -259,7 +183,7 @@ class ServerController(rest.RestController):
|
|||
t_dhcp_port = top_client.create_ports(context,
|
||||
top_dhcp_port_body)
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, t_dhcp_port['id'], 'port')
|
||||
context, t_dhcp_port['id'], constants.RT_PORT)
|
||||
pod_list = [mapping[0]['pod_id'] for mapping in mappings]
|
||||
if pod['pod_id'] in pod_list:
|
||||
# mapping exists, skip this subnet
|
||||
|
@ -289,7 +213,7 @@ class ServerController(rest.RestController):
|
|||
'bottom_id': dhcp_port['id'],
|
||||
'pod_id': pod['pod_id'],
|
||||
'project_id': self.project_id,
|
||||
'resource_type': 'port'})
|
||||
'resource_type': constants.RT_PORT})
|
||||
dhcp_port_match = True
|
||||
break
|
||||
if not dhcp_port_match:
|
||||
|
@ -303,7 +227,7 @@ class ServerController(rest.RestController):
|
|||
'bottom_id': b_dhcp_port['id'],
|
||||
'pod_id': pod['pod_id'],
|
||||
'project_id': self.project_id,
|
||||
'resource_type': 'port'})
|
||||
'resource_type': constants.RT_PORT})
|
||||
# there is still one thing to do, there may be other dhcp ports
|
||||
# created by bottom pod, we need to delete them
|
||||
b_dhcp_ports = client.list_ports(context,
|
||||
|
@ -329,8 +253,8 @@ class ServerController(rest.RestController):
|
|||
return bottom_port_id
|
||||
|
||||
def _handle_port(self, context, pod, port):
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(context,
|
||||
port['id'], 'port')
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(context, port['id'],
|
||||
constants.RT_PORT)
|
||||
if mappings:
|
||||
# TODO(zhiyuan) judge return or raise exception
|
||||
# NOTE(zhiyuan) user provides a port that already has mapped
|
||||
|
@ -369,47 +293,6 @@ class ServerController(rest.RestController):
|
|||
ret.extend(client.list_servers(context))
|
||||
return ret
|
||||
|
||||
def _schedule_pod(self, context, az):
|
||||
with context.session.begin():
|
||||
pod_bindings = core.query_resource(
|
||||
context, models.PodBinding,
|
||||
[{'key': 'tenant_id',
|
||||
'comparator': 'eq',
|
||||
'value': self.project_id}], [])
|
||||
for pod_binding in pod_bindings:
|
||||
pod = core.get_resource(context, models.Pod,
|
||||
pod_binding['pod_id'])
|
||||
if pod['az_name'] == az:
|
||||
pods = core.query_resource(
|
||||
context, models.Pod,
|
||||
[{'key': 'pod_name',
|
||||
'comparator': 'eq',
|
||||
'value': pod['pod_name']}], [])
|
||||
return pods[0], pod['pod_az_name']
|
||||
# no proper pod found, try to schedule one
|
||||
pods = core.query_resource(
|
||||
context, models.Pod,
|
||||
[{'key': 'az_name',
|
||||
'comparator': 'eq',
|
||||
'value': az}], [])
|
||||
if pods:
|
||||
# dump schedule, just select the first map
|
||||
select_pod = pods[0]
|
||||
|
||||
pods = core.query_resource(
|
||||
context, models.Pod,
|
||||
[{'key': 'pod_name',
|
||||
'comparator': 'eq',
|
||||
'value': select_pod['pod_name']}], [])
|
||||
core.create_resource(
|
||||
context, models.PodBinding,
|
||||
{'id': uuidutils.generate_uuid(),
|
||||
'tenant_id': self.project_id,
|
||||
'pod_id': select_pod['pod_id']})
|
||||
return pods[0], select_pod['pod_az_name']
|
||||
else:
|
||||
return None, None
|
||||
|
||||
@expose(generic=True, template='json')
|
||||
def get_one(self, _id):
|
||||
context = t_context.extract_context_from_environ()
|
||||
|
@ -417,7 +300,8 @@ class ServerController(rest.RestController):
|
|||
if _id == 'detail':
|
||||
return {'servers': self._get_all(context)}
|
||||
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(context, _id, 'server')
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
context, _id, constants.RT_SERVER)
|
||||
if not mappings:
|
||||
pecan.abort(404, 'Server not found')
|
||||
return
|
||||
|
@ -447,8 +331,8 @@ class ServerController(rest.RestController):
|
|||
pecan.abort(400, 'Availability zone not set')
|
||||
return
|
||||
|
||||
pod, b_az = self._schedule_pod(context,
|
||||
kw['server']['availability_zone'])
|
||||
pod, b_az = az_ag.get_pod_by_az_tenant(
|
||||
context, kw['server']['availability_zone'], self.project_id)
|
||||
if not pod:
|
||||
pecan.abort(400, 'No pod bound to availability zone')
|
||||
return
|
||||
|
@ -496,5 +380,5 @@ class ServerController(rest.RestController):
|
|||
'bottom_id': server['id'],
|
||||
'pod_id': pod['pod_id'],
|
||||
'project_id': self.project_id,
|
||||
'resource_type': 'server'})
|
||||
'resource_type': constants.RT_SERVER})
|
||||
return {'server': server}
|
||||
|
|
|
@ -110,17 +110,17 @@ class AZAGTest(unittest.TestCase):
|
|||
|
||||
def test_get_pod_by_az_tenant(self):
|
||||
|
||||
pod1 = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ + FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
pod1, _ = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ + FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
self.assertEqual(pod1, None)
|
||||
pods = az_ag.list_pods_by_tenant(self.context, FAKE_TENANT_ID)
|
||||
self.assertEqual(len(pods), 0)
|
||||
|
||||
# schedule one
|
||||
pod2 = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
pod2, _ = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
|
||||
pod_bindings = core.query_resource(self.context,
|
||||
models.PodBinding,
|
||||
|
@ -139,9 +139,9 @@ class AZAGTest(unittest.TestCase):
|
|||
self.assertEqual(pod2['az_name'], FAKE_AZ)
|
||||
|
||||
# scheduled one should always be bound
|
||||
pod3 = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
pod3, _ = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
|
||||
self.assertEqual(pod2['pod_name'], pod3['pod_name'])
|
||||
self.assertEqual(pod2['pod_id'], pod3['pod_id'])
|
||||
|
@ -149,9 +149,9 @@ class AZAGTest(unittest.TestCase):
|
|||
|
||||
def test_list_pods_by_tenant(self):
|
||||
|
||||
pod1 = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ + FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
pod1, _ = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ + FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
pods = az_ag.list_pods_by_tenant(self.context, FAKE_TENANT_ID)
|
||||
self.assertEqual(pod1, None)
|
||||
self.assertEqual(len(pods), 0)
|
||||
|
@ -159,9 +159,9 @@ class AZAGTest(unittest.TestCase):
|
|||
# TODO(joehuang): tenant bound to multiple pods in one AZ
|
||||
|
||||
# schedule one
|
||||
pod2 = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
pod2, _ = az_ag.get_pod_by_az_tenant(self.context,
|
||||
FAKE_AZ,
|
||||
FAKE_TENANT_ID)
|
||||
pods = az_ag.list_pods_by_tenant(self.context, FAKE_TENANT_ID)
|
||||
self.assertDictEqual(pods[0], pod2)
|
||||
|
||||
|
|
|
@ -14,13 +14,23 @@
|
|||
# under the License.
|
||||
|
||||
|
||||
import copy
|
||||
import mock
|
||||
from mock import patch
|
||||
import unittest
|
||||
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
from neutron.db import db_base_plugin_common
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import ipam_non_pluggable_backend
|
||||
from neutron.extensions import availability_zone as az_ext
|
||||
from neutron.ipam import subnet_alloc
|
||||
import neutronclient.common.exceptions as q_exceptions
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.common import constants
|
||||
from tricircle.common import context
|
||||
import tricircle.db.api as db_api
|
||||
from tricircle.db import core
|
||||
|
@ -28,34 +38,101 @@ from tricircle.db import models
|
|||
from tricircle.network import plugin
|
||||
|
||||
|
||||
TOP_NETS = []
|
||||
TOP_SUBNETS = []
|
||||
TOP_PORTS = []
|
||||
TOP_ROUTERS = []
|
||||
TOP_ROUTERPORT = []
|
||||
TOP_SUBNETPOOLS = []
|
||||
TOP_SUBNETPOOLPREFIXES = []
|
||||
TOP_IPALLOCATIONS = []
|
||||
BOTTOM1_NETS = []
|
||||
BOTTOM1_SUBNETS = []
|
||||
BOTTOM1_PORTS = []
|
||||
BOTTOM1_ROUTERS = []
|
||||
BOTTOM2_NETS = []
|
||||
BOTTOM2_SUBNETS = []
|
||||
BOTTOM2_PORTS = []
|
||||
BOTTOM2_ROUTERS = []
|
||||
RES_LIST = [TOP_NETS, TOP_SUBNETS, TOP_PORTS, TOP_ROUTERS, TOP_ROUTERPORT,
|
||||
TOP_SUBNETPOOLS, TOP_SUBNETPOOLPREFIXES, TOP_IPALLOCATIONS,
|
||||
BOTTOM1_NETS, BOTTOM1_SUBNETS, BOTTOM1_PORTS, BOTTOM1_ROUTERS,
|
||||
BOTTOM2_NETS, BOTTOM2_SUBNETS, BOTTOM2_PORTS, BOTTOM2_ROUTERS]
|
||||
RES_MAP = {'networks': TOP_NETS,
|
||||
'subnets': TOP_SUBNETS,
|
||||
'ports': TOP_PORTS,
|
||||
'routers': TOP_ROUTERS,
|
||||
'routerports': TOP_ROUTERPORT,
|
||||
'ipallocations': TOP_IPALLOCATIONS,
|
||||
'subnetpools': TOP_SUBNETPOOLS,
|
||||
'subnetpoolprefixes': TOP_SUBNETPOOLPREFIXES}
|
||||
|
||||
|
||||
class DotDict(dict):
|
||||
def __init__(self, normal_dict=None):
|
||||
if normal_dict:
|
||||
for key, value in normal_dict.iteritems():
|
||||
self[key] = value
|
||||
|
||||
def __getattr__(self, item):
|
||||
return self.get(item)
|
||||
|
||||
|
||||
class FakeNeutronClient(object):
|
||||
|
||||
_res_map = {'pod_1': {'network': BOTTOM1_NETS,
|
||||
'subnet': BOTTOM1_SUBNETS,
|
||||
'port': BOTTOM1_PORTS,
|
||||
'router': BOTTOM1_ROUTERS},
|
||||
'pod_2': {'network': BOTTOM2_NETS,
|
||||
'subnet': BOTTOM2_SUBNETS,
|
||||
'port': BOTTOM2_PORTS,
|
||||
'router': BOTTOM2_ROUTERS}}
|
||||
|
||||
def __init__(self, pod_name):
|
||||
self.pod_name = pod_name
|
||||
self.ports_path = ''
|
||||
|
||||
def _get(self, params=None):
|
||||
pod_index = self.pod_name.split('_')[1]
|
||||
bottom_id = 'bottom_id_%s' % pod_index
|
||||
port_list = self._res_map[self.pod_name]['port']
|
||||
|
||||
if not params:
|
||||
return {'ports': [{'id': bottom_id, 'name': 'bottom'}]}
|
||||
if params.get('marker') == bottom_id:
|
||||
return {'ports': []}
|
||||
if 'filters' in params and params['filters'].get('id', []):
|
||||
if bottom_id in params['filters']['id']:
|
||||
return {'ports': [{'id': bottom_id, 'name': 'bottom'}]}
|
||||
else:
|
||||
return {'ports': []}
|
||||
return {'ports': [{'id': bottom_id, 'name': 'bottom'}]}
|
||||
return {'ports': port_list}
|
||||
if 'marker' in params:
|
||||
sorted_list = sorted(port_list, key=lambda x: x['id'])
|
||||
for i, port in enumerate(sorted_list):
|
||||
if port['id'] == params['marker']:
|
||||
return {'ports': sorted_list[i + 1:]}
|
||||
if 'filters' in params and params['filters'].get('id'):
|
||||
return_list = []
|
||||
for port in port_list:
|
||||
if port['id'] in params['filters']['id']:
|
||||
return_list.append(port)
|
||||
return {'ports': return_list}
|
||||
return {'ports': port_list}
|
||||
|
||||
def get(self, path, params=None):
|
||||
if self.pod_name == 'pod_1' or self.pod_name == 'pod_2':
|
||||
return self._get(params)
|
||||
res_list = self._get(params)['ports']
|
||||
return_list = []
|
||||
for res in res_list:
|
||||
return_list.append(copy.copy(res))
|
||||
return {'ports': return_list}
|
||||
else:
|
||||
raise Exception()
|
||||
|
||||
|
||||
class FakeClient(object):
|
||||
|
||||
_res_map = {'pod_1': {'network': BOTTOM1_NETS,
|
||||
'subnet': BOTTOM1_SUBNETS,
|
||||
'port': BOTTOM1_PORTS,
|
||||
'router': BOTTOM1_ROUTERS},
|
||||
'pod_2': {'network': BOTTOM2_NETS,
|
||||
'subnet': BOTTOM2_SUBNETS,
|
||||
'port': BOTTOM2_PORTS,
|
||||
'router': BOTTOM2_ROUTERS}}
|
||||
|
||||
def __init__(self, pod_name):
|
||||
self.pod_name = pod_name
|
||||
self.client = FakeNeutronClient(self.pod_name)
|
||||
|
@ -63,6 +140,31 @@ class FakeClient(object):
|
|||
def get_native_client(self, resource, ctx):
|
||||
return self.client
|
||||
|
||||
def create_resources(self, _type, ctx, body):
|
||||
if _type == 'port':
|
||||
res_list = self._res_map[self.pod_name][_type]
|
||||
subnet_ips_map = {}
|
||||
for res in res_list:
|
||||
fixed_ips = res.get('fixed_ips', [])
|
||||
for fixed_ip in fixed_ips:
|
||||
if fixed_ip['subnet_id'] not in subnet_ips_map:
|
||||
subnet_ips_map[fixed_ip['subnet_id']] = set()
|
||||
subnet_ips_map[fixed_ip['subnet_id']].add(
|
||||
fixed_ip['ip_address'])
|
||||
fixed_ips = body[_type].get('fixed_ips', [])
|
||||
for fixed_ip in fixed_ips:
|
||||
if fixed_ip['ip_address'] in subnet_ips_map.get(
|
||||
fixed_ip['subnet_id'], set()):
|
||||
raise q_exceptions.IpAddressInUseClient()
|
||||
if 'device_id' not in body[_type]:
|
||||
body[_type]['device_id'] = ''
|
||||
if 'id' not in body[_type]:
|
||||
body[_type]['id'] = uuidutils.generate_uuid()
|
||||
res_list = self._res_map[self.pod_name][_type]
|
||||
res = dict(body[_type])
|
||||
res_list.append(res)
|
||||
return res
|
||||
|
||||
def list_ports(self, ctx, filters=None):
|
||||
filter_dict = {}
|
||||
filters = filters or []
|
||||
|
@ -73,12 +175,28 @@ class FakeClient(object):
|
|||
return self.client.get('', {'filters': filter_dict})['ports']
|
||||
|
||||
def get_ports(self, ctx, port_id):
|
||||
return self.client.get('')['ports'][0]
|
||||
return self.client.get(
|
||||
'', params={'filters': {'id': [port_id]}})['ports'][0]
|
||||
|
||||
def delete_ports(self, ctx, port_id):
|
||||
index = -1
|
||||
for i, port in enumerate(self._res_map[self.pod_name]['port']):
|
||||
if port['id'] == port_id:
|
||||
index = i
|
||||
if index != -1:
|
||||
del self._res_map[self.pod_name]['port'][index]
|
||||
|
||||
def action_routers(self, ctx, action, *args, **kwargs):
|
||||
# only for mock purpose
|
||||
pass
|
||||
|
||||
|
||||
class FakeNeutronContext(object):
|
||||
def __init__(self):
|
||||
self._session = None
|
||||
self.is_admin = True
|
||||
self.is_advsvc = False
|
||||
self.tenant_id = ''
|
||||
|
||||
@property
|
||||
def session(self):
|
||||
|
@ -86,41 +204,128 @@ class FakeNeutronContext(object):
|
|||
self._session = FakeSession()
|
||||
return self._session
|
||||
|
||||
def elevated(self):
|
||||
return self
|
||||
|
||||
|
||||
def delete_model(res_list, model_obj, key=None):
|
||||
if not res_list:
|
||||
return
|
||||
if not key:
|
||||
key = 'id'
|
||||
if key not in res_list[0]:
|
||||
return
|
||||
index = -1
|
||||
for i, res in enumerate(res_list):
|
||||
if res[key] == model_obj[key]:
|
||||
index = i
|
||||
break
|
||||
if index != -1:
|
||||
del res_list[index]
|
||||
return
|
||||
|
||||
|
||||
def link_models(model_obj, model_dict, foreign_table, foreign_key, table, key,
|
||||
link_prop):
|
||||
if model_obj.__tablename__ == foreign_table:
|
||||
for instance in RES_MAP[table]:
|
||||
if instance[key] == model_dict[foreign_key]:
|
||||
if link_prop not in instance:
|
||||
instance[link_prop] = []
|
||||
instance[link_prop].append(model_dict)
|
||||
|
||||
|
||||
def unlink_models(res_list, model_dict, foreign_key, key, link_prop,
|
||||
link_ele_foreign_key, link_ele_key):
|
||||
if foreign_key not in model_dict:
|
||||
return
|
||||
for instance in res_list:
|
||||
if instance[key] == model_dict[foreign_key]:
|
||||
if link_prop not in instance:
|
||||
return
|
||||
index = -1
|
||||
for i, res in enumerate(instance[link_prop]):
|
||||
if res[link_ele_foreign_key] == model_dict[link_ele_key]:
|
||||
index = i
|
||||
break
|
||||
if index != -1:
|
||||
del instance[link_prop][index]
|
||||
return
|
||||
|
||||
|
||||
class FakeQuery(object):
|
||||
def __init__(self, records):
|
||||
def __init__(self, records, table):
|
||||
self.records = records
|
||||
self.table = table
|
||||
self.index = 0
|
||||
|
||||
def _handle_pagination_by_id(self, record_id):
|
||||
for i, record in enumerate(self.records):
|
||||
if record['id'] == record_id:
|
||||
if i + 1 < len(self.records):
|
||||
return FakeQuery(self.records[i + 1:])
|
||||
return FakeQuery(self.records[i + 1:], self.table)
|
||||
else:
|
||||
return FakeQuery([])
|
||||
return FakeQuery([])
|
||||
return FakeQuery([], self.table)
|
||||
return FakeQuery([], self.table)
|
||||
|
||||
def _handle_filter_by_id(self, record_id):
|
||||
for i, record in enumerate(self.records):
|
||||
if record['id'] == record_id:
|
||||
return FakeQuery(self.records[i:i + 1])
|
||||
return FakeQuery([])
|
||||
def _handle_filter(self, keys, values):
|
||||
filtered_list = []
|
||||
for record in self.records:
|
||||
selected = True
|
||||
for i, key in enumerate(keys):
|
||||
if key not in record or record[key] != values[i]:
|
||||
selected = False
|
||||
break
|
||||
if selected:
|
||||
filtered_list.append(record)
|
||||
return FakeQuery(filtered_list, self.table)
|
||||
|
||||
def filter(self, criteria):
|
||||
if hasattr(criteria.right, 'value'):
|
||||
record_id = criteria.right.value
|
||||
return self._handle_pagination_by_id(record_id)
|
||||
def filter(self, *criteria):
|
||||
if hasattr(criteria[0].right, 'value'):
|
||||
keys = [e.left.name for e in criteria]
|
||||
values = [e.right.value for e in criteria]
|
||||
else:
|
||||
record_id = criteria.expression.right.element.clauses[0].value
|
||||
return self._handle_filter_by_id(record_id)
|
||||
keys = [e.expression.left.name for e in criteria]
|
||||
values = [
|
||||
e.expression.right.element.clauses[0].value for e in criteria]
|
||||
if criteria[0].expression.operator.__name__ == 'lt':
|
||||
return self._handle_pagination_by_id(values[0])
|
||||
else:
|
||||
return self._handle_filter(keys, values)
|
||||
|
||||
def filter_by(self, **kwargs):
|
||||
filtered_list = []
|
||||
for record in self.records:
|
||||
selected = True
|
||||
for key, value in kwargs.iteritems():
|
||||
if key not in record or record[key] != value:
|
||||
selected = False
|
||||
break
|
||||
if selected:
|
||||
filtered_list.append(record)
|
||||
return FakeQuery(filtered_list, self.table)
|
||||
|
||||
def delete(self):
|
||||
for model_obj in self.records:
|
||||
unlink_models(RES_MAP['routers'], model_obj, 'router_id',
|
||||
'id', 'attached_ports', 'port_id', 'port_id')
|
||||
delete_model(RES_MAP[self.table], model_obj, key='port_id')
|
||||
|
||||
def outerjoin(self, *props, **kwargs):
|
||||
return FakeQuery(self.records, self.table)
|
||||
|
||||
def join(self, *props, **kwargs):
|
||||
return FakeQuery(self.records, self.table)
|
||||
|
||||
def order_by(self, func):
|
||||
self.records.sort(key=lambda x: x['id'])
|
||||
return FakeQuery(self.records)
|
||||
return FakeQuery(self.records, self.table)
|
||||
|
||||
def enable_eagerloads(self, value):
|
||||
return FakeQuery(self.records, self.table)
|
||||
|
||||
def limit(self, limit):
|
||||
return FakeQuery(self.records[:limit])
|
||||
return FakeQuery(self.records[:limit], self.table)
|
||||
|
||||
def next(self):
|
||||
if self.index >= len(self.records):
|
||||
|
@ -128,6 +333,17 @@ class FakeQuery(object):
|
|||
self.index += 1
|
||||
return self.records[self.index - 1]
|
||||
|
||||
def one(self):
|
||||
if len(self.records) == 0:
|
||||
raise exc.NoResultFound()
|
||||
return self.records[0]
|
||||
|
||||
def first(self):
|
||||
return self.one()
|
||||
|
||||
def all(self):
|
||||
return self.records
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
|
@ -140,20 +356,85 @@ class FakeSession(object):
|
|||
def __exit__(self, type, value, traceback):
|
||||
pass
|
||||
|
||||
def begin(self):
|
||||
def begin(self, subtransactions=False):
|
||||
return FakeSession.WithWrapper()
|
||||
|
||||
def begin_nested(self):
|
||||
return FakeSession.WithWrapper()
|
||||
|
||||
def query(self, model):
|
||||
return FakeQuery([{'id': 'top_id_0', 'name': 'top'},
|
||||
{'id': 'top_id_1', 'name': 'top'},
|
||||
{'id': 'top_id_2', 'name': 'top'},
|
||||
{'id': 'top_id_3', 'name': 'top'}])
|
||||
if model.__tablename__ not in RES_MAP:
|
||||
return FakeQuery([], model.__tablename__)
|
||||
return FakeQuery(RES_MAP[model.__tablename__],
|
||||
model.__tablename__)
|
||||
|
||||
def add(self, model_obj):
|
||||
if model_obj.__tablename__ not in RES_MAP:
|
||||
return
|
||||
model_dict = DotDict(model_obj._as_dict())
|
||||
|
||||
if model_obj.__tablename__ == 'networks':
|
||||
model_dict['subnets'] = []
|
||||
if model_obj.__tablename__ == 'ports':
|
||||
model_dict['dhcp_opts'] = []
|
||||
model_dict['security_groups'] = []
|
||||
|
||||
link_models(model_obj, model_dict,
|
||||
'subnetpoolprefixes', 'subnetpool_id',
|
||||
'subnetpools', 'id', 'prefixes')
|
||||
link_models(model_obj, model_dict,
|
||||
'ipallocations', 'port_id',
|
||||
'ports', 'id', 'fixed_ips')
|
||||
link_models(model_obj, model_dict,
|
||||
'subnets', 'network_id', 'networks', 'id', 'subnets')
|
||||
|
||||
if model_obj.__tablename__ == 'routerports':
|
||||
for port in TOP_PORTS:
|
||||
if port['id'] == model_dict['port_id']:
|
||||
model_dict['port'] = port
|
||||
port.update(model_dict)
|
||||
break
|
||||
link_models(model_obj, model_dict,
|
||||
'routerports', 'router_id',
|
||||
'routers', 'id', 'attached_ports')
|
||||
|
||||
RES_MAP[model_obj.__tablename__].append(model_dict)
|
||||
|
||||
def _cascade_delete(self, model_dict, foreign_key, table, key):
|
||||
if foreign_key not in model_dict:
|
||||
return
|
||||
index = -1
|
||||
for i, instance in enumerate(RES_MAP[table]):
|
||||
if instance[foreign_key] == model_dict[key]:
|
||||
index = i
|
||||
break
|
||||
if index != -1:
|
||||
del RES_MAP[table][index]
|
||||
|
||||
def delete(self, model_obj):
|
||||
unlink_models(RES_MAP['routers'], model_obj, 'router_id', 'id',
|
||||
'attached_ports', 'port_id', 'id')
|
||||
self._cascade_delete(model_obj, 'port_id', 'ipallocations', 'id')
|
||||
for res_list in RES_MAP.values():
|
||||
delete_model(res_list, model_obj)
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
|
||||
class FakePlugin(plugin.TricirclePlugin):
|
||||
def __init__(self):
|
||||
self.clients = {'pod_1': t_client.Client('pod_1'),
|
||||
'pod_2': t_client.Client('pod_2')}
|
||||
self.set_ipam_backend()
|
||||
|
||||
def _get_client(self, pod_name):
|
||||
return FakeClient(pod_name)
|
||||
|
||||
def _make_network_dict(self, network, fields=None,
|
||||
process_extensions=True, context=None):
|
||||
return network
|
||||
|
||||
def _make_subnet_dict(self, subnet, fields=None, context=None):
|
||||
return subnet
|
||||
|
||||
|
||||
def fake_get_context_from_neutron_context(q_context):
|
||||
|
@ -164,16 +445,36 @@ def fake_get_client(self, pod_name):
|
|||
return FakeClient(pod_name)
|
||||
|
||||
|
||||
def fake_get_ports_from_db_with_number(self, ctx, number,
|
||||
last_port_id, top_set):
|
||||
return [{'id': 'top_id_0'}]
|
||||
def fake_make_network_dict(self, network, fields=None,
|
||||
process_extensions=True, context=None):
|
||||
return network
|
||||
|
||||
|
||||
def fake_get_ports_from_top(self, context, top_bottom_map):
|
||||
return [{'id': 'top_id_0'}]
|
||||
def fake_make_subnet_dict(self, subnet, fields=None, context=None):
|
||||
return subnet
|
||||
|
||||
|
||||
class ModelsTest(unittest.TestCase):
|
||||
@staticmethod
|
||||
def fake_generate_ip(context, subnets):
|
||||
suffix = 1
|
||||
for allocation in TOP_IPALLOCATIONS:
|
||||
if allocation['subnet_id'] == subnets[0]['id']:
|
||||
ip = allocation['ip_address']
|
||||
current_suffix = int(ip[ip.rindex('.') + 1:])
|
||||
if current_suffix >= suffix:
|
||||
suffix = current_suffix
|
||||
suffix += 1
|
||||
cidr = subnets[0]['cidr']
|
||||
new_ip = cidr[:cidr.rindex('.') + 1] + ('%d' % suffix)
|
||||
return {'ip_address': new_ip, 'subnet_id': subnets[0]['id']}
|
||||
|
||||
|
||||
@staticmethod
|
||||
def _allocate_specific_ip(context, subnet_id, ip_address):
|
||||
pass
|
||||
|
||||
|
||||
class PluginTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
core.initialize()
|
||||
core.ModelBase.metadata.create_all(core.get_engine())
|
||||
|
@ -205,6 +506,14 @@ class ModelsTest(unittest.TestCase):
|
|||
core.create_resource(self.context, models.ResourceRouting, route1)
|
||||
core.create_resource(self.context, models.ResourceRouting, route2)
|
||||
|
||||
def _basic_port_setup(self):
|
||||
TOP_PORTS.extend([{'id': 'top_id_0', 'name': 'top'},
|
||||
{'id': 'top_id_1', 'name': 'top'},
|
||||
{'id': 'top_id_2', 'name': 'top'},
|
||||
{'id': 'top_id_3', 'name': 'top'}])
|
||||
BOTTOM1_PORTS.append({'id': 'bottom_id_1', 'name': 'bottom'})
|
||||
BOTTOM2_PORTS.append({'id': 'bottom_id_2', 'name': 'bottom'})
|
||||
|
||||
@patch.object(context, 'get_context_from_neutron_context',
|
||||
new=fake_get_context_from_neutron_context)
|
||||
@patch.object(plugin.TricirclePlugin, '_get_client',
|
||||
|
@ -212,6 +521,7 @@ class ModelsTest(unittest.TestCase):
|
|||
@patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_port')
|
||||
def test_get_port(self, mock_plugin_method):
|
||||
self._basic_pod_route_setup()
|
||||
self._basic_port_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
neutron_context = FakeNeutronContext()
|
||||
|
@ -232,6 +542,7 @@ class ModelsTest(unittest.TestCase):
|
|||
new=fake_get_client)
|
||||
def test_get_ports_pagination(self):
|
||||
self._basic_pod_route_setup()
|
||||
self._basic_port_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
neutron_context = FakeNeutronContext()
|
||||
|
@ -260,6 +571,7 @@ class ModelsTest(unittest.TestCase):
|
|||
new=fake_get_client)
|
||||
def test_get_ports_filters(self):
|
||||
self._basic_pod_route_setup()
|
||||
self._basic_port_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
neutron_context = FakeNeutronContext()
|
||||
|
@ -275,10 +587,11 @@ class ModelsTest(unittest.TestCase):
|
|||
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
@patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'delete_port')
|
||||
@patch.object(t_client.Client, 'delete_resources')
|
||||
@patch.object(FakeClient, 'delete_ports')
|
||||
def test_delete_port(self, mock_client_method, mock_plugin_method,
|
||||
mock_context_method):
|
||||
self._basic_pod_route_setup()
|
||||
self._basic_port_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
neutron_context = FakeNeutronContext()
|
||||
|
@ -291,8 +604,347 @@ class ModelsTest(unittest.TestCase):
|
|||
calls = [mock.call(neutron_context, 'top_id_0'),
|
||||
mock.call(neutron_context, 'top_id_1')]
|
||||
mock_plugin_method.assert_has_calls(calls)
|
||||
mock_client_method.assert_called_once_with('port', tricircle_context,
|
||||
mock_client_method.assert_called_once_with(tricircle_context,
|
||||
'bottom_id_1')
|
||||
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
@patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'update_network')
|
||||
@patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'create_network')
|
||||
def test_network_az(self, mock_create, mock_update, mock_context):
|
||||
self._basic_pod_route_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
neutron_context = FakeNeutronContext()
|
||||
tricircle_context = context.get_db_context()
|
||||
mock_context.return_value = tricircle_context
|
||||
|
||||
network = {'network': {
|
||||
'id': 'net_id', 'name': 'net_az',
|
||||
'availability_zone_hints': ['az_name_1', 'az_name_2']}}
|
||||
mock_create.return_value = {'id': 'net_id', 'name': 'net_az'}
|
||||
mock_update.return_value = network['network']
|
||||
fake_plugin.create_network(neutron_context, network)
|
||||
mock_update.assert_called_once_with(
|
||||
neutron_context, 'net_id',
|
||||
{'network': {
|
||||
'availability_zone_hints': '["az_name_1", "az_name_2"]'}})
|
||||
|
||||
err_network = {'network': {
|
||||
'id': 'net_id', 'name': 'net_az',
|
||||
'availability_zone_hints': ['az_name_1', 'az_name_3']}}
|
||||
mock_create.return_value = {'id': 'net_id', 'name': 'net_az'}
|
||||
self.assertRaises(az_ext.AvailabilityZoneNotFound,
|
||||
fake_plugin.create_network,
|
||||
neutron_context, err_network)
|
||||
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
def test_create(self, mock_context):
|
||||
self._basic_pod_route_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
neutron_context = FakeNeutronContext()
|
||||
tricircle_context = context.get_db_context()
|
||||
mock_context.return_value = tricircle_context
|
||||
|
||||
network = {'network': {
|
||||
'id': 'net_id', 'name': 'net_az',
|
||||
'admin_state_up': True, 'shared': False,
|
||||
'availability_zone_hints': ['az_name_1', 'az_name_2']}}
|
||||
fake_plugin.create_network(neutron_context, network)
|
||||
|
||||
@patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
||||
'_generate_ip', new=fake_generate_ip)
|
||||
@patch.object(db_base_plugin_common.DbBasePluginCommon,
|
||||
'_make_subnet_dict', new=fake_make_subnet_dict)
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
@patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool',
|
||||
new=mock.Mock)
|
||||
def test_prepare_element(self, mock_context):
|
||||
self._basic_pod_route_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
q_ctx = FakeNeutronContext()
|
||||
t_ctx = context.get_db_context()
|
||||
mock_context.return_value = t_ctx
|
||||
|
||||
for pod in db_api.list_pods(t_ctx):
|
||||
if not pod['az_name']:
|
||||
t_pod = pod
|
||||
else:
|
||||
b_pod = pod
|
||||
|
||||
# test _prepare_top_element
|
||||
pool_id = fake_plugin._get_bridge_subnet_pool_id(
|
||||
t_ctx, q_ctx, 'project_id', t_pod)
|
||||
net, subnet = fake_plugin._get_bridge_network_subnet(
|
||||
t_ctx, q_ctx, 'project_id', t_pod, pool_id)
|
||||
port = fake_plugin._get_bridge_interface(
|
||||
t_ctx, q_ctx, 'project_id', pod, net['id'], 'b_router_id')
|
||||
|
||||
top_entry_map = {}
|
||||
with t_ctx.session.begin():
|
||||
for entry in core.query_resource(
|
||||
t_ctx, models.ResourceRouting,
|
||||
[{'key': 'pod_id', 'comparator': 'eq',
|
||||
'value': 'pod_id_0'}], []):
|
||||
top_entry_map[entry['resource_type']] = entry
|
||||
self.assertEqual(net['id'], subnet['network_id'])
|
||||
self.assertEqual(net['id'], port['network_id'])
|
||||
self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id'])
|
||||
self.assertEqual(top_entry_map['network']['bottom_id'], net['id'])
|
||||
self.assertEqual(top_entry_map['subnet']['bottom_id'], subnet['id'])
|
||||
self.assertEqual(top_entry_map['port']['bottom_id'], port['id'])
|
||||
|
||||
# test _prepare_bottom_element
|
||||
_, b_port_id = fake_plugin._get_bottom_bridge_elements(
|
||||
q_ctx, 'project_id', b_pod, net, subnet, port)
|
||||
b_port = fake_plugin._get_client(b_pod['pod_name']).get_ports(
|
||||
t_ctx, b_port_id)
|
||||
|
||||
bottom_entry_map = {}
|
||||
with t_ctx.session.begin():
|
||||
for entry in core.query_resource(
|
||||
t_ctx, models.ResourceRouting,
|
||||
[{'key': 'pod_id', 'comparator': 'eq',
|
||||
'value': b_pod['pod_id']}], []):
|
||||
bottom_entry_map[entry['resource_type']] = entry
|
||||
self.assertEqual(bottom_entry_map['network']['top_id'], net['id'])
|
||||
self.assertEqual(bottom_entry_map['network']['bottom_id'],
|
||||
b_port['network_id'])
|
||||
self.assertEqual(bottom_entry_map['subnet']['top_id'], subnet['id'])
|
||||
self.assertEqual(bottom_entry_map['subnet']['bottom_id'],
|
||||
b_port['fixed_ips'][0]['subnet_id'])
|
||||
self.assertEqual(bottom_entry_map['port']['top_id'], port['id'])
|
||||
self.assertEqual(bottom_entry_map['port']['bottom_id'], b_port_id)
|
||||
|
||||
def _prepare_router_test(self, tenant_id):
|
||||
t_net_id = uuidutils.generate_uuid()
|
||||
t_subnet_id = uuidutils.generate_uuid()
|
||||
t_router_id = uuidutils.generate_uuid()
|
||||
|
||||
t_net = {
|
||||
'id': t_net_id,
|
||||
'name': 'top_net',
|
||||
'availability_zone_hints': ['az_name_1'],
|
||||
'tenant_id': tenant_id
|
||||
}
|
||||
t_subnet = {
|
||||
'id': t_subnet_id,
|
||||
'network_id': t_net_id,
|
||||
'name': 'top_subnet',
|
||||
'ip_version': 4,
|
||||
'cidr': '10.0.0.0/24',
|
||||
'allocation_pools': [],
|
||||
'enable_dhcp': True,
|
||||
'gateway_ip': '10.0.0.1',
|
||||
'ipv6_address_mode': '',
|
||||
'ipv6_ra_mode': '',
|
||||
'tenant_id': tenant_id
|
||||
}
|
||||
t_router = {
|
||||
'id': t_router_id,
|
||||
'name': 'top_router',
|
||||
'distributed': False,
|
||||
'tenant_id': tenant_id,
|
||||
'attached_ports': []
|
||||
}
|
||||
TOP_NETS.append(DotDict(t_net))
|
||||
TOP_SUBNETS.append(DotDict(t_subnet))
|
||||
TOP_ROUTERS.append(DotDict(t_router))
|
||||
|
||||
return t_net_id, t_subnet_id, t_router_id
|
||||
|
||||
@patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
||||
'_allocate_specific_ip', new=_allocate_specific_ip)
|
||||
@patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
||||
'_generate_ip', new=fake_generate_ip)
|
||||
@patch.object(db_base_plugin_common.DbBasePluginCommon,
|
||||
'_make_subnet_dict', new=fake_make_subnet_dict)
|
||||
@patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool',
|
||||
new=mock.Mock)
|
||||
@patch.object(FakeClient, 'action_routers')
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
def test_add_interface(self, mock_context, mock_action):
|
||||
self._basic_pod_route_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
q_ctx = FakeNeutronContext()
|
||||
t_ctx = context.get_db_context()
|
||||
mock_context.return_value = t_ctx
|
||||
|
||||
tenant_id = 'test_tenant_id'
|
||||
t_net_id, t_subnet_id, t_router_id = self._prepare_router_test(
|
||||
tenant_id)
|
||||
|
||||
t_port_id = fake_plugin.add_router_interface(
|
||||
q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
|
||||
_, b_port_id = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, t_port_id, 'port')[0]
|
||||
b_port = fake_plugin._get_client('pod_1').get_ports(q_ctx, b_port_id)
|
||||
b_net_id = b_port['network_id']
|
||||
b_subnet_id = b_port['fixed_ips'][0]['subnet_id']
|
||||
_, map_net_id = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, t_net_id, 'network')[0]
|
||||
_, map_subnet_id = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, t_subnet_id, 'subnet')[0]
|
||||
_, b_router_id = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, t_router_id, 'router')[0]
|
||||
|
||||
self.assertEqual(b_net_id, map_net_id)
|
||||
self.assertEqual(b_subnet_id, map_subnet_id)
|
||||
|
||||
bridge_port_name = constants.bridge_port_name % (tenant_id,
|
||||
b_router_id)
|
||||
_, t_bridge_port_id = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, bridge_port_name, 'port')[0]
|
||||
_, b_bridge_port_id = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, t_bridge_port_id, 'port')[0]
|
||||
|
||||
t_net_id = uuidutils.generate_uuid()
|
||||
t_subnet_id = uuidutils.generate_uuid()
|
||||
t_net = {
|
||||
'id': t_net_id,
|
||||
'name': 'another_top_net',
|
||||
'availability_zone_hints': ['az_name_1'],
|
||||
'tenant_id': tenant_id
|
||||
}
|
||||
t_subnet = {
|
||||
'id': t_subnet_id,
|
||||
'network_id': t_net_id,
|
||||
'name': 'another_top_subnet',
|
||||
'ip_version': 4,
|
||||
'cidr': '10.0.1.0/24',
|
||||
'allocation_pools': [],
|
||||
'enable_dhcp': True,
|
||||
'gateway_ip': '10.0.1.1',
|
||||
'ipv6_address_mode': '',
|
||||
'ipv6_ra_mode': '',
|
||||
'tenant_id': tenant_id
|
||||
}
|
||||
TOP_NETS.append(DotDict(t_net))
|
||||
TOP_SUBNETS.append(DotDict(t_subnet))
|
||||
|
||||
# action_routers is mocked, manually add device_id
|
||||
for port in BOTTOM1_PORTS:
|
||||
if port['id'] == b_bridge_port_id:
|
||||
port['device_id'] = b_router_id
|
||||
|
||||
another_t_port_id = fake_plugin.add_router_interface(
|
||||
q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
|
||||
_, another_b_port_id = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, another_t_port_id, 'port')[0]
|
||||
another_b_port = fake_plugin._get_client('pod_1').get_ports(
|
||||
q_ctx, another_b_port_id)
|
||||
|
||||
calls = [mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id}),
|
||||
mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_port['id']}),
|
||||
mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': another_b_port['id']})]
|
||||
mock_action.assert_has_calls(calls)
|
||||
self.assertEqual(mock_action.call_count, 3)
|
||||
|
||||
@patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
||||
'_allocate_specific_ip', new=_allocate_specific_ip)
|
||||
@patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
||||
'_generate_ip', new=fake_generate_ip)
|
||||
@patch.object(db_base_plugin_common.DbBasePluginCommon,
|
||||
'_make_subnet_dict', new=fake_make_subnet_dict)
|
||||
@patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool',
|
||||
new=mock.Mock)
|
||||
@patch.object(FakeClient, 'action_routers')
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
def test_add_interface_exception(self, mock_context, mock_action):
|
||||
self._basic_pod_route_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
q_ctx = FakeNeutronContext()
|
||||
t_ctx = context.get_db_context()
|
||||
mock_context.return_value = t_ctx
|
||||
|
||||
tenant_id = 'test_tenant_id'
|
||||
t_net_id, t_subnet_id, t_router_id = self._prepare_router_test(
|
||||
tenant_id)
|
||||
|
||||
with t_ctx.session.begin():
|
||||
entries = core.query_resource(t_ctx, models.ResourceRouting,
|
||||
[{'key': 'resource_type',
|
||||
'comparator': 'eq',
|
||||
'value': 'port'}], [])
|
||||
entry_num = len(entries)
|
||||
|
||||
mock_action.side_effect = q_exceptions.ConnectionFailed
|
||||
self.assertRaises(q_exceptions.ConnectionFailed,
|
||||
fake_plugin.add_router_interface,
|
||||
q_ctx, t_router_id, {'subnet_id': t_subnet_id})
|
||||
self.assertEqual(0, len(TOP_ROUTERS[0]['attached_ports']))
|
||||
|
||||
with t_ctx.session.begin():
|
||||
entries = core.query_resource(t_ctx, models.ResourceRouting,
|
||||
[{'key': 'resource_type',
|
||||
'comparator': 'eq',
|
||||
'value': 'port'}], [])
|
||||
# two new entries, for top and bottom bridge ports
|
||||
self.assertEqual(entry_num + 2, len(entries))
|
||||
# top and bottom interface is deleted, only bridge port left
|
||||
self.assertEqual(1, len(TOP_PORTS))
|
||||
self.assertEqual(1, len(BOTTOM1_PORTS))
|
||||
|
||||
mock_action.side_effect = None
|
||||
fake_plugin.add_router_interface(q_ctx, t_router_id,
|
||||
{'subnet_id': t_subnet_id})
|
||||
# bottom interface and bridge port
|
||||
self.assertEqual(2, len(BOTTOM1_PORTS))
|
||||
with t_ctx.session.begin():
|
||||
entries = core.query_resource(t_ctx, models.ResourceRouting,
|
||||
[{'key': 'resource_type',
|
||||
'comparator': 'eq',
|
||||
'value': 'port'}], [])
|
||||
# one more entry, for bottom interface
|
||||
self.assertEqual(entry_num + 3, len(entries))
|
||||
|
||||
@patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
||||
'_allocate_specific_ip', new=_allocate_specific_ip)
|
||||
@patch.object(ipam_non_pluggable_backend.IpamNonPluggableBackend,
|
||||
'_generate_ip', new=fake_generate_ip)
|
||||
@patch.object(db_base_plugin_common.DbBasePluginCommon,
|
||||
'_make_subnet_dict', new=fake_make_subnet_dict)
|
||||
@patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool',
|
||||
new=mock.Mock)
|
||||
@patch.object(FakeClient, 'delete_ports')
|
||||
@patch.object(FakeClient, 'action_routers')
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
def test_add_interface_exception_port_left(self, mock_context,
|
||||
mock_action, mock_delete):
|
||||
self._basic_pod_route_setup()
|
||||
|
||||
fake_plugin = FakePlugin()
|
||||
q_ctx = FakeNeutronContext()
|
||||
t_ctx = context.get_db_context()
|
||||
mock_context.return_value = t_ctx
|
||||
|
||||
tenant_id = 'test_tenant_id'
|
||||
t_net_id, t_subnet_id, t_router_id = self._prepare_router_test(
|
||||
tenant_id)
|
||||
mock_action.side_effect = q_exceptions.ConnectionFailed
|
||||
mock_delete.side_effect = q_exceptions.ConnectionFailed
|
||||
self.assertRaises(q_exceptions.ConnectionFailed,
|
||||
fake_plugin.add_router_interface,
|
||||
q_ctx, t_router_id, {'subnet_id': t_subnet_id})
|
||||
# fail to delete bottom interface, so top interface is also there
|
||||
self.assertEqual(1, len(TOP_ROUTERS[0]['attached_ports']))
|
||||
|
||||
mock_action.side_effect = None
|
||||
mock_delete.side_effect = None
|
||||
t_port_id = TOP_ROUTERS[0]['attached_ports'][0]['port_id']
|
||||
# test that we can reuse the left interface to attach
|
||||
fake_plugin.add_router_interface(
|
||||
q_ctx, t_router_id, {'port_id': t_port_id})
|
||||
# bottom interface and bridge port
|
||||
self.assertEqual(2, len(BOTTOM1_PORTS))
|
||||
|
||||
def tearDown(self):
|
||||
core.ModelBase.metadata.drop_all(core.get_engine())
|
||||
for res in RES_LIST:
|
||||
del res[:]
|
||||
|
|
Loading…
Reference in New Issue