Central and local plugin (part 1, base)

1. What is the problem
As discussed in the feature specification[1], we are going to
move the process of networking automation from the Nova-APIGW
to Neutron server.

2. What is the solution to the problem
Implement a new Neutron core plugin which runs in local Neutron
server to finish the networking automation process. Also, the
original Neutron core plugin is renamed as central plugin and
needs some changes to work with local plugin.

3. What the features need to be implemented to the Tricircle
   to realize the solution
With this patch, users can boot a virtual machine directly via
the local Nova server. But security group support is not covered.

DevStack script and local.conf sample are also updated.

[1] https://github.com/openstack/tricircle/blob/master/specs/ocata/local-neutron-plugin.rst

Change-Id: I6a3dc5e9af395e3035a7d218264a08b6313a248d
This commit is contained in:
zhiyuan_cai 2016-09-05 09:23:36 +08:00 committed by joehuang
parent a20a6b0ebe
commit b0789882eb
12 changed files with 854 additions and 256 deletions

View File

@ -37,14 +37,11 @@ enable_plugin tricircle https://github.com/openstack/tricircle/
# Tricircle Services
enable_service t-api
enable_service t-ngw
enable_service t-cgw
enable_service t-job
# Use Neutron instead of nova-network
disable_service n-net
enable_service q-svc
enable_service q-svc1
enable_service q-dhcp
enable_service q-agt
@ -58,3 +55,22 @@ enable_service c-sch
disable_service c-bak
# disable_service tempest
disable_service horizon
CENTRAL_REGION_NAME=CentralRegion
TRICIRCLE_NEUTRON_PORT=20001
[[post-config|$NEUTRON_CONF]]
[DEFAULT]
core_plugin=tricircle.network.local_plugin.TricirclePlugin
[client]
admin_username=admin
admin_password=$ADMIN_PASSWORD
admin_tenant=demo
auto_refresh_endpoint=True
top_pod_name=$CENTRAL_REGION_NAME
[tricircle]
real_core_plugin=neutron.plugins.ml2.plugin.Ml2Plugin
central_neutron_url=http://127.0.0.1:$TRICIRCLE_NEUTRON_PORT

View File

@ -19,9 +19,9 @@ function create_tricircle_accounts {
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
local tricircle_api=$(get_or_create_service "tricircle" \
"Cascading" "OpenStack Cascading Service")
"Tricircle" "Cross Neutron Networking Automation Service")
get_or_create_endpoint $tricircle_api \
"$REGION_NAME" \
"$CENTRAL_REGION_NAME" \
"$SERVICE_PROTOCOL://$TRICIRCLE_API_HOST:$TRICIRCLE_API_PORT/v1.0" \
"$SERVICE_PROTOCOL://$TRICIRCLE_API_HOST:$TRICIRCLE_API_PORT/v1.0" \
"$SERVICE_PROTOCOL://$TRICIRCLE_API_HOST:$TRICIRCLE_API_PORT/v1.0"
@ -29,79 +29,6 @@ function create_tricircle_accounts {
fi
}
# create_nova_apigw_accounts() - Set up common required nova_apigw
# work as nova api serice
# service accounts in keystone
# Project User Roles
# -----------------------------------------------------------------
# $SERVICE_TENANT_NAME nova_apigw service
function create_nova_apigw_accounts {
if [[ "$ENABLED_SERVICES" =~ "t-ngw" ]]; then
create_service_user "nova_apigw"
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
local tricircle_nova_apigw=$(get_or_create_service "nova" \
"compute" "Nova Compute Service")
remove_old_endpoint_conf $tricircle_nova_apigw
get_or_create_endpoint $tricircle_nova_apigw \
"$REGION_NAME" \
"$SERVICE_PROTOCOL://$TRICIRCLE_NOVA_APIGW_HOST:$TRICIRCLE_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s' \
"$SERVICE_PROTOCOL://$TRICIRCLE_NOVA_APIGW_HOST:$TRICIRCLE_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s' \
"$SERVICE_PROTOCOL://$TRICIRCLE_NOVA_APIGW_HOST:$TRICIRCLE_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s'
fi
fi
}
# create_cinder_apigw_accounts() - Set up common required cinder_apigw
# work as cinder api serice
# service accounts in keystone
# Project User Roles
# ---------------------------------------------------------------------
# $SERVICE_TENANT_NAME cinder_apigw service
function create_cinder_apigw_accounts {
if [[ "$ENABLED_SERVICES" =~ "t-cgw" ]]; then
create_service_user "cinder_apigw"
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
local tricircle_cinder_apigw=$(get_or_create_service "cinder" \
"volumev2" "Cinder Volume Service")
remove_old_endpoint_conf $tricircle_cinder_apigw
get_or_create_endpoint $tricircle_cinder_apigw \
"$REGION_NAME" \
"$SERVICE_PROTOCOL://$TRICIRCLE_CINDER_APIGW_HOST:$TRICIRCLE_CINDER_APIGW_PORT/v2/"'$(tenant_id)s' \
"$SERVICE_PROTOCOL://$TRICIRCLE_CINDER_APIGW_HOST:$TRICIRCLE_CINDER_APIGW_PORT/v2/"'$(tenant_id)s' \
"$SERVICE_PROTOCOL://$TRICIRCLE_CINDER_APIGW_HOST:$TRICIRCLE_CINDER_APIGW_PORT/v2/"'$(tenant_id)s'
fi
fi
}
# common config-file configuration for tricircle services
function remove_old_endpoint_conf {
local service=$1
local endpoint_id
interface_list="public admin internal"
for interface in $interface_list; do
endpoint_id=$(openstack endpoint list \
--service "$service" \
--interface "$interface" \
--region "$REGION_NAME" \
-c ID -f value)
if [[ -n "$endpoint_id" ]]; then
# Delete endpoint
openstack endpoint delete "$endpoint_id"
fi
done
}
# create_tricircle_cache_dir() - Set up cache dir for tricircle
function create_tricircle_cache_dir {
@ -125,7 +52,7 @@ function init_common_tricircle_conf {
iniset $conf_file client admin_password $ADMIN_PASSWORD
iniset $conf_file client admin_tenant demo
iniset $conf_file client auto_refresh_endpoint True
iniset $conf_file client top_pod_name $REGION_NAME
iniset $conf_file client top_pod_name $CENTRAL_REGION_NAME
iniset $conf_file oslo_concurrency lock_path $TRICIRCLE_STATE_PATH/lock
}
@ -154,66 +81,21 @@ function configure_tricircle_api {
fi
}
function configure_tricircle_nova_apigw {
if is_service_enabled t-ngw ; then
echo "Configuring Tricircle Nova APIGW"
init_common_tricircle_conf $TRICIRCLE_NOVA_APIGW_CONF
setup_colorized_logging $TRICIRCLE_NOVA_APIGW_CONF DEFAULT tenant_name
if is_service_enabled keystone; then
create_tricircle_cache_dir
# Configure auth token middleware
configure_auth_token_middleware $TRICIRCLE_NOVA_APIGW_CONF tricircle \
$TRICIRCLE_AUTH_CACHE_DIR
else
iniset $TRICIRCLE_NOVA_APIGW_CONF DEFAULT auth_strategy noauth
fi
fi
}
function configure_tricircle_cinder_apigw {
if is_service_enabled t-cgw ; then
echo "Configuring Tricircle Cinder APIGW"
init_common_tricircle_conf $TRICIRCLE_CINDER_APIGW_CONF
setup_colorized_logging $TRICIRCLE_CINDER_APIGW_CONF DEFAULT tenant_name
if is_service_enabled keystone; then
create_tricircle_cache_dir
# Configure auth token middleware
configure_auth_token_middleware $TRICIRCLE_CINDER_APIGW_CONF tricircle \
$TRICIRCLE_AUTH_CACHE_DIR
else
iniset $TRICIRCLE_CINDER_APIGW_CONF DEFAULT auth_strategy noauth
fi
fi
}
function configure_tricircle_xjob {
if is_service_enabled t-job ; then
echo "Configuring Tricircle xjob"
init_common_tricircle_conf $TRICIRCLE_XJOB_CONF
iniset $TRICIRCLE_XJOB_CONF DEFAULT enable_api_gateway False
setup_colorized_logging $TRICIRCLE_XJOB_CONF DEFAULT
fi
}
function start_new_neutron_server {
local server_index=$1
local region_name=$2
local q_port=$3
function start_central_neutron_server {
local server_index=0
local region_name=$1
local q_port=$2
get_or_create_service "neutron" "network" "Neutron Service"
get_or_create_endpoint "network" \
@ -222,10 +104,30 @@ function start_new_neutron_server {
"$Q_PROTOCOL://$SERVICE_HOST:$q_port/" \
"$Q_PROTOCOL://$SERVICE_HOST:$q_port/"
# reconfigure central neutron server to use our own central plugin
echo "Configuring central Neutron plugin for Tricircle"
cp $NEUTRON_CONF $NEUTRON_CONF.$server_index
iniset $NEUTRON_CONF.$server_index database connection `database_connection_url $Q_DB_NAME$server_index`
iniset $NEUTRON_CONF.$server_index nova region_name $region_name
iniset $NEUTRON_CONF.$server_index DEFAULT bind_port $q_port
iniset $NEUTRON_CONF.$server_index DEFAULT core_plugin "tricircle.network.central_plugin.TricirclePlugin"
iniset $NEUTRON_CONF.$server_index DEFAULT service_plugins ""
iniset $NEUTRON_CONF.$server_index DEFAULT tricircle_db_connection `database_connection_url tricircle`
iniset $NEUTRON_CONF.$server_index DEFAULT notify_nova_on_port_data_changes False
iniset $NEUTRON_CONF.$server_index DEFAULT notify_nova_on_port_status_changes False
iniset $NEUTRON_CONF.$server_index client admin_username admin
iniset $NEUTRON_CONF.$server_index client admin_password $ADMIN_PASSWORD
iniset $NEUTRON_CONF.$server_index client admin_tenant demo
iniset $NEUTRON_CONF.$server_index client auto_refresh_endpoint True
iniset $NEUTRON_CONF.$server_index client top_pod_name $CENTRAL_REGION_NAME
if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
iniset $NEUTRON_CONF.$server_index tricircle type_drivers local,shared_vlan
iniset $NEUTRON_CONF.$server_index tricircle tenant_network_types local,shared_vlan
iniset $NEUTRON_CONF.$server_index tricircle network_vlan_ranges `echo $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS | awk -F= '{print $2}'`
iniset $NEUTRON_CONF.$server_index tricircle bridge_network_type shared_vlan
iniset $NEUTRON_CONF.$server_index tricircle enable_api_gateway False
fi
recreate_database $Q_DB_NAME$server_index
$NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF.$server_index --config-file /$Q_PLUGIN_CONF_FILE upgrade head
@ -245,11 +147,9 @@ if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
export NEUTRON_CREATE_INITIAL_NETWORKS=False
sudo install -d -o $STACK_USER -m 755 $TRICIRCLE_CONF_DIR
enable_service t-api t-ngw t-cgw t-job
enable_service t-api t-job
configure_tricircle_api
configure_tricircle_nova_apigw
configure_tricircle_cinder_apigw
configure_tricircle_xjob
echo export PYTHONPATH=\$PYTHONPATH:$TRICIRCLE_DIR >> $RC_DIR/.localrc.auto
@ -260,29 +160,7 @@ if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
python "$TRICIRCLE_DIR/cmd/manage.py" "$TRICIRCLE_API_CONF"
if is_service_enabled q-svc ; then
start_new_neutron_server 1 $POD_REGION_NAME $TRICIRCLE_NEUTRON_PORT
# reconfigure neutron server to use our own plugin
echo "Configuring Neutron plugin for Tricircle"
Q_PLUGIN_CLASS="tricircle.network.plugin.TricirclePlugin"
iniset $NEUTRON_CONF DEFAULT core_plugin "$Q_PLUGIN_CLASS"
iniset $NEUTRON_CONF DEFAULT service_plugins ""
iniset $NEUTRON_CONF DEFAULT tricircle_db_connection `database_connection_url tricircle`
iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes False
iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes False
iniset $NEUTRON_CONF client admin_username admin
iniset $NEUTRON_CONF client admin_password $ADMIN_PASSWORD
iniset $NEUTRON_CONF client admin_tenant demo
iniset $NEUTRON_CONF client auto_refresh_endpoint True
iniset $NEUTRON_CONF client top_pod_name $REGION_NAME
if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
iniset $NEUTRON_CONF tricircle type_drivers local,shared_vlan
iniset $NEUTRON_CONF tricircle tenant_network_types local,shared_vlan
iniset $NEUTRON_CONF tricircle network_vlan_ranges `echo $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS | awk -F= '{print $2}'`
iniset $NEUTRON_CONF tricircle bridge_network_type shared_vlan
fi
start_central_neutron_server $CENTRAL_REGION_NAME $TRICIRCLE_NEUTRON_PORT
fi
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
@ -295,47 +173,6 @@ if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
run_process t-api "python $TRICIRCLE_API --config-file $TRICIRCLE_API_CONF"
fi
if is_service_enabled t-ngw; then
create_nova_apigw_accounts
run_process t-ngw "python $TRICIRCLE_NOVA_APIGW --config-file $TRICIRCLE_NOVA_APIGW_CONF"
# Nova services are running, but we need to re-configure them to
# move them to bottom region
iniset $NOVA_CONF neutron region_name $POD_REGION_NAME
iniset $NOVA_CONF neutron url "$Q_PROTOCOL://$SERVICE_HOST:$TRICIRCLE_NEUTRON_PORT"
iniset $NOVA_CONF cinder os_region_name $POD_REGION_NAME
get_or_create_endpoint "compute" \
"$POD_REGION_NAME" \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/"'$(tenant_id)s' \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/"'$(tenant_id)s' \
"$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/"'$(tenant_id)s'
stop_process n-api
stop_process n-cpu
# remove previous failure flag file since we are going to restart service
rm -f "$SERVICE_DIR/$SCREEN_NAME"/n-api.failure
rm -f "$SERVICE_DIR/$SCREEN_NAME"/n-cpu.failure
sleep 20
run_process n-api "$NOVA_BIN_DIR/nova-api"
run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF" $LIBVIRT_GROUP
fi
if is_service_enabled t-cgw; then
create_cinder_apigw_accounts
run_process t-cgw "python $TRICIRCLE_CINDER_APIGW --config-file $TRICIRCLE_CINDER_APIGW_CONF"
get_or_create_endpoint "volumev2" \
"$POD_REGION_NAME" \
"$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/"'$(tenant_id)s' \
"$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/"'$(tenant_id)s' \
"$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/"'$(tenant_id)s'
fi
if is_service_enabled t-job; then
run_process t-job "python $TRICIRCLE_XJOB --config-file $TRICIRCLE_XJOB_CONF"
@ -348,20 +185,12 @@ if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
stop_process t-api
fi
if is_service_enabled t-ngw; then
stop_process t-ngw
fi
if is_service_enabled t-cgw; then
stop_process t-cgw
fi
if is_service_enabled t-job; then
stop_process t-job
fi
if is_service_enabled q-svc1; then
stop_process q-svc1
if is_service_enabled q-svc0; then
stop_process q-svc0
fi
fi
fi

View File

@ -4,7 +4,7 @@ TRICIRCLE_DIR=$DEST/tricircle
TRICIRCLE_BRANCH=${TRICIRCLE_BRANCH:-master}
# common variables
POD_REGION_NAME=${POD_REGION_NAME:-Pod1}
CENTRAL_REGION_NAME=${CENTRAL_REGION_NAME:-CentralRegion}
TRICIRCLE_NEUTRON_PORT=${TRICIRCLE_NEUTRON_PORT:-20001}
TRICIRCLE_CONF_DIR=${TRICIRCLE_CONF_DIR:-/etc/tricircle}
TRICIRCLE_STATE_PATH=${TRICIRCLE_STATE_PATH:-/var/lib/tricircle}
@ -18,24 +18,6 @@ TRICIRCLE_API_HOST=${TRICIRCLE_API_HOST:-$SERVICE_HOST}
TRICIRCLE_API_PORT=${TRICIRCLE_API_PORT:-19999}
TRICIRCLE_API_PROTOCOL=${TRICIRCLE_API_PROTOCOL:-$SERVICE_PROTOCOL}
# tricircle nova_apigw
TRICIRCLE_NOVA_APIGW=$TRICIRCLE_DIR/cmd/nova_apigw.py
TRICIRCLE_NOVA_APIGW_CONF=$TRICIRCLE_CONF_DIR/nova_apigw.conf
TRICIRCLE_NOVA_APIGW_LISTEN_ADDRESS=${TRICIRCLE_NOVA_APIGW_LISTEN_ADDRESS:-0.0.0.0}
TRICIRCLE_NOVA_APIGW_HOST=${TRICIRCLE_NOVA_APIGW_HOST:-$SERVICE_HOST}
TRICIRCLE_NOVA_APIGW_PORT=${TRICIRCLE_NOVA_APIGW_PORT:-19998}
TRICIRCLE_NOVA_APIGW_PROTOCOL=${TRICIRCLE_NOVA_APIGW_PROTOCOL:-$SERVICE_PROTOCOL}
# tricircle cinder_apigw
TRICIRCLE_CINDER_APIGW=$TRICIRCLE_DIR/cmd/cinder_apigw.py
TRICIRCLE_CINDER_APIGW_CONF=$TRICIRCLE_CONF_DIR/cinder_apigw.conf
TRICIRCLE_CINDER_APIGW_LISTEN_ADDRESS=${TRICIRCLE_CINDER_APIGW_LISTEN_ADDRESS:-0.0.0.0}
TRICIRCLE_CINDER_APIGW_HOST=${TRICIRCLE_CINDER_APIGW_HOST:-$SERVICE_HOST}
TRICIRCLE_CINDER_APIGW_PORT=${TRICIRCLE_CINDER_APIGW_PORT:-19997}
TRICIRCLE_CINDER_APIGW_PROTOCOL=${TRICIRCLE_CINDER_APIGW_PROTOCOL:-$SERVICE_PROTOCOL}
# tricircle xjob
TRICIRCLE_XJOB=$TRICIRCLE_DIR/cmd/xjob.py
TRICIRCLE_XJOB_CONF=$TRICIRCLE_CONF_DIR/xjob.conf

View File

@ -72,6 +72,7 @@ JS_Fail = 'Fail'
SP_EXTRA_ID = '00000000-0000-0000-0000-000000000000'
TOP = 'top'
POD_NOT_SPECIFIED = 'not_specified_pod'
PROFILE_REGION = 'region'
# job type
JT_ROUTER = 'router'

View File

@ -127,6 +127,26 @@ def update_pod_service_configuration(context, config_id, update_dict):
context, models.PodServiceConfiguration, config_id, update_dict)
def create_resource_mapping(context, top_id, bottom_id, pod_id, project_id,
resource_type):
try:
context.session.begin()
route = core.create_resource(context, models.ResourceRouting,
{'top_id': top_id,
'bottom_id': bottom_id,
'pod_id': pod_id,
'project_id': project_id,
'resource_type': resource_type})
context.session.commit()
return route
except db_exc.DBDuplicateEntry:
# entry has already been created
context.session.rollback()
return None
finally:
context.session.close()
def get_bottom_mappings_by_top_id(context, top_id, resource_type):
"""Get resource id and pod name on bottom

View File

@ -79,7 +79,10 @@ tricircle_opts = [
cfg.StrOpt('bridge_network_type',
default='',
help=_('Type of l3 bridge network, this type should be enabled '
'in tenant_network_types and is not local type.'))
'in tenant_network_types and is not local type.')),
cfg.BoolOpt('enable_api_gateway',
default=True,
help=_('Whether the Nova API gateway is enabled'))
]
tricircle_opt_group = cfg.OptGroup('tricircle')
@ -335,6 +338,14 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
if network.get(external_net.EXTERNAL):
self._create_bottom_external_subnet(
context, res, network, res['id'])
if res['enable_dhcp']:
try:
t_ctx = t_context.get_context_from_neutron_context(context)
self.helper.prepare_top_dhcp_port(
t_ctx, context, res['tenant_id'], network['id'], res['id'])
except Exception:
self.delete_subnet(context, res['id'])
raise
return res
def _delete_pre_created_port(self, t_ctx, q_ctx, port_name):
@ -384,7 +395,26 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
# router interface, we cannot directly update bottom port in this case,
# otherwise we will fail when attaching bottom port to bottom router
# because its device_id is not empty
return super(TricirclePlugin, self).update_port(context, port_id, port)
res = super(TricirclePlugin, self).update_port(context, port_id, port)
if t_constants.PROFILE_REGION in port['port'].get(
'binding:profile', {}):
region_name = port['port']['binding:profile'][
t_constants.PROFILE_REGION]
t_ctx = t_context.get_context_from_neutron_context(context)
pod = db_api.get_pod_by_name(t_ctx, region_name)
entries = [(ip['subnet_id'],
t_constants.RT_SUBNET) for ip in res['fixed_ips']]
entries.append((res['network_id'], t_constants.RT_NETWORK))
entries.append((res['id'], t_constants.RT_PORT))
for resource_id, resource_type in entries:
if db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, resource_id, pod['pod_name'], resource_type):
continue
db_api.create_resource_mapping(t_ctx, resource_id, resource_id,
pod['pod_id'], res['tenant_id'],
resource_type)
return res
def delete_port(self, context, port_id, l3_port_check=True):
t_ctx = t_context.get_context_from_neutron_context(context)
@ -393,16 +423,20 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
# ports, we just remove records in top pod and leave deletion of
# ports and routing entries in bottom pods to xjob
if port.get('device_owner') not in NON_VM_PORT_TYPES:
try:
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, port_id, t_constants.RT_PORT)
if mappings:
pod_name = mappings[0][0]['pod_name']
bottom_port_id = mappings[0][1]
self._get_client(pod_name).delete_ports(
t_ctx, bottom_port_id)
except Exception:
raise
if cfg.CONF.tricircle.enable_api_gateway:
# NOTE(zhiyuan) this is a temporary check, after we remove all
# the networking process from nova api gateway, we can remove
# this option
try:
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, port_id, t_constants.RT_PORT)
if mappings:
pod_name = mappings[0][0]['pod_name']
bottom_port_id = mappings[0][1]
self._get_client(pod_name).delete_ports(
t_ctx, bottom_port_id)
except Exception:
raise
with t_ctx.session.begin():
core.delete_resources(t_ctx, models.ResourceRouting,
filters=[{'key': 'top_id',

View File

@ -28,3 +28,7 @@ class DefaultGroupUpdateNotSupported(exceptions.InvalidInput):
class BottomPodOperationFailure(exceptions.NeutronException):
message = _('Operation for %(resource)s on bottom pod %(pod_name)s fails')
class DhcpPortNotFound(exceptions.NotFound):
message = _('Dhcp port for subnet %(subnet_id)s not found')

View File

@ -472,21 +472,17 @@ class NetworkHelper(object):
}
return body
def prepare_dhcp_port(self, ctx, project_id, b_pod, t_net_id, t_subnet_id,
b_net_id, b_subnet_id):
"""Create top dhcp port and map it to bottom dhcp port
def prepare_top_dhcp_port(self, t_ctx, q_ctx, project_id, t_net_id,
t_subnet_id):
"""Create top dhcp port
:param ctx: tricircle context
:param t_ctx: tricircle context
:param q_ctx: neutron context
:param project_id: project id
:param b_pod: dict of bottom pod
:param t_net_id: top network id
:param t_subnet_id: top subnet id
:param b_net_id: bottom network id
:param b_subnet_id: bottom subnet id
:return: None
:return: top dhcp port id
"""
t_client = self._get_client()
t_dhcp_name = t_constants.dhcp_port_name % t_subnet_id
t_dhcp_port_body = {
'port': {
@ -509,8 +505,26 @@ class NetworkHelper(object):
# the same IP, each dnsmasq daemon only takes care of VM IPs in
# its own pod, VM will not receive incorrect dhcp response
_, t_dhcp_port_id = self.prepare_top_element(
ctx, None, project_id, db_api.get_top_pod(ctx),
t_ctx, q_ctx, project_id, db_api.get_top_pod(t_ctx),
{'id': t_dhcp_name}, t_constants.RT_PORT, t_dhcp_port_body)
return t_dhcp_port_id
def prepare_dhcp_port(self, ctx, project_id, b_pod, t_net_id, t_subnet_id,
b_net_id, b_subnet_id):
"""Create top dhcp port and map it to bottom dhcp port
:param ctx: tricircle context
:param project_id: project id
:param b_pod: dict of bottom pod
:param t_net_id: top network id
:param t_subnet_id: top subnet id
:param b_net_id: bottom network id
:param b_subnet_id: bottom subnet id
:return: None
"""
t_dhcp_port_id = self.prepare_top_dhcp_port(ctx, None, project_id,
t_net_id, t_subnet_id)
t_client = self._get_client()
t_dhcp_port = t_client.get_ports(ctx, t_dhcp_port_id)
dhcp_port_body = self._get_create_dhcp_port_body(
project_id, t_dhcp_port, b_subnet_id, b_net_id)

View File

@ -0,0 +1,363 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from oslo_config import cfg
from oslo_log import log
import neutron_lib.constants as q_constants
import neutron_lib.exceptions as q_exceptions
from neutron.common import utils
from neutron.plugins.ml2 import plugin
from tricircle.common import client # noqa
import tricircle.common.constants as t_constants
import tricircle.common.context as t_context
from tricircle.common.i18n import _
from tricircle.common import resource_handle
import tricircle.network.exceptions as t_exceptions
from tricircle.network import helper
tricircle_opts = [
cfg.StrOpt('real_core_plugin', help=_('The core plugin the Tricircle '
'local plugin will invoke.')),
cfg.StrOpt('central_neutron_url', help=_('Central Neutron server url'))]
tricircle_opt_group = cfg.OptGroup('tricircle')
cfg.CONF.register_group(tricircle_opt_group)
cfg.CONF.register_opts(tricircle_opts, group=tricircle_opt_group)
LOG = log.getLogger(__name__)
class TricirclePlugin(plugin.Ml2Plugin):
def __init__(self):
super(TricirclePlugin, self).__init__()
core_plugins_namespace = 'neutron.core_plugins'
plugin_provider = cfg.CONF.tricircle.real_core_plugin
plugin_class = utils.load_class_by_alias_or_classname(
core_plugins_namespace, plugin_provider)
self.core_plugin = plugin_class()
self.neutron_handle = resource_handle.NeutronResourceHandle(
cfg.CONF.client.auth_url)
self.neutron_handle.endpoint_url = \
cfg.CONF.tricircle.central_neutron_url
@staticmethod
def _adapt_network_body(network):
network_type = network.get('provider:network_type')
if network_type == t_constants.NT_LOCAL:
for key in ['provider:network_type', 'provider:physical_network',
'provider:segmentation_id']:
network.pop(key, None)
elif network_type == t_constants.NT_SHARED_VLAN:
network['provider:network_type'] = 'vlan'
@staticmethod
def _adapt_port_body_for_client(port):
port.pop('port_security_enabled', None)
port.pop('allowed_address_pairs', None)
remove_keys = []
for key, value in six.iteritems(port):
if value is q_constants.ATTR_NOT_SPECIFIED:
remove_keys.append(key)
for key in remove_keys:
port.pop(key)
@staticmethod
def _adapt_port_body_for_call(port):
if 'mac_address' not in port:
port['mac_address'] = q_constants.ATTR_NOT_SPECIFIED
if 'fixed_ips' not in port:
port['fixed_ips'] = q_constants.ATTR_NOT_SPECIFIED
@staticmethod
def _construct_params(filters, sorts, limit, marker, page_reverse):
params = {}
for key, value in six.iteritems(filters):
params[key] = value
if sorts:
params['sort_key'] = [s[0] for s in sorts]
if page_reverse:
params['sort_dir'] = ['desc' if s[1] else 'asc' for s in sorts]
else:
params['sort_dir'] = ['asc' if s[1] else 'desc' for s in sorts]
if limit:
params['limit'] = limit
if marker:
params['marker'] = marker
return params
def _ensure_network_subnet(self, context, port):
network_id = port['network_id']
# get_network will create bottom network if it doesn't exist, also
# create bottom subnets if they don't exist
self.get_network(context, network_id)
def _ensure_subnet(self, context, network, is_top=True):
subnet_ids = network.get('subnets', [])
if not is_top:
if subnet_ids:
return subnet_ids
else:
t_ctx = t_context.get_context_from_neutron_context(context)
t_network = self.neutron_handle.handle_get(
t_ctx, 'network', network['id'])
return self._ensure_subnet(context, t_network)
if not subnet_ids:
return []
if len(subnet_ids) == 1:
self.get_subnet(context, subnet_ids[0])
else:
self.get_subnets(context, filters={'id': subnet_ids})
return subnet_ids
def _ensure_subnet_dhcp_port(self, t_ctx, q_ctx, b_subnet):
b_dhcp_ports = self.core_plugin.get_ports(
q_ctx, filters={'network_id': [b_subnet['network_id']],
'device_owner': ['network:dhcp']})
if b_dhcp_ports:
return
raw_client = self.neutron_handle._get_client(t_ctx)
params = {'name': t_constants.dhcp_port_name % b_subnet['id']}
t_ports = raw_client.list_ports(**params)['ports']
if not t_ports:
raise t_exceptions.DhcpPortNotFound(subnet_id=b_subnet['id'])
dhcp_port_body = \
helper.NetworkHelper._get_create_dhcp_port_body(
b_subnet['tenant_id'], t_ports[0], b_subnet['id'],
b_subnet['network_id'])
dhcp_port_body['port']['id'] = t_ports[0]['id']
self.core_plugin.create_port(q_ctx, dhcp_port_body)
def get_network(self, context, _id, fields=None):
try:
b_network = self.core_plugin.get_network(context, _id, fields)
subnet_ids = self._ensure_subnet(context, b_network, False)
except q_exceptions.NotFound:
t_ctx = t_context.get_context_from_neutron_context(context)
t_network = self.neutron_handle.handle_get(t_ctx, 'network', _id)
if not t_network:
raise q_exceptions.NetworkNotFound(net_id=_id)
self._adapt_network_body(t_network)
b_network = self.core_plugin.create_network(context,
{'network': t_network})
subnet_ids = self._ensure_subnet(context, t_network)
if subnet_ids:
b_network['subnets'] = subnet_ids
return self._fields(b_network, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
# if id is not specified in the filter, we just return network data in
# local Neutron server, otherwise id is specified, we need to retrieve
# network data from central Neutron server and create network which
# doesn't exist in local Neutron server.
if not filters or 'id' not in filters:
return self.core_plugin.get_networks(
context, filters, fields, sorts, limit, marker, page_reverse)
b_networks = self.core_plugin.get_networks(
context, filters, fields, sorts, limit, marker, page_reverse)
for b_network in b_networks:
subnet_ids = self._ensure_subnet(context, b_network, False)
if subnet_ids:
b_network['subnets'] = subnet_ids
if len(b_networks) == len(filters['id']):
return b_networks
t_ctx = t_context.get_context_from_neutron_context(context)
raw_client = self.neutron_handle._get_client(t_ctx)
params = self._construct_params(filters, sorts, limit, marker,
page_reverse)
t_networks = raw_client.list_networks(**params)['networks']
t_id_set = set([network['id'] for network in t_networks])
b_id_set = set([network['id'] for network in b_networks])
missing_id_set = t_id_set - b_id_set
if missing_id_set:
missing_networks = [network for network in t_networks if (
network['id'] in missing_id_set)]
for network in missing_networks:
self._adapt_network_body(network)
b_network = self.core_plugin.create_network(
context, {'network': network})
subnet_ids = self._ensure_subnet(context, network)
if subnet_ids:
b_network['subnets'] = subnet_ids
b_networks.append(self._fields(b_network, fields))
return b_networks
def get_subnet(self, context, _id, fields=None):
t_ctx = t_context.get_context_from_neutron_context(context)
try:
b_subnet = self.core_plugin.get_subnet(context, _id, fields)
except q_exceptions.NotFound:
t_subnet = self.neutron_handle.handle_get(t_ctx, 'subnet', _id)
if not t_subnet:
raise q_exceptions.SubnetNotFound(subnet_id=_id)
b_subnet = self.core_plugin.create_subnet(context,
{'subnet': t_subnet})
if b_subnet['enable_dhcp']:
self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet)
return self._fields(b_subnet, fields)
def get_subnets(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
# if id is not specified in the filter, we just return subnet data in
# local Neutron server, otherwise id is specified, we need to retrieve
# subnet data from central Neutron server and create subnet which
# doesn't exist in local Neutron server.
if not filters or 'id' not in filters:
return self.core_plugin.get_subnets(
context, filters, fields, sorts, limit, marker, page_reverse)
t_ctx = t_context.get_context_from_neutron_context(context)
b_subnets = self.core_plugin.get_subnets(
context, filters, fields, sorts, limit, marker, page_reverse)
for b_subnet in b_subnets:
self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet)
if len(b_subnets) == len(filters['id']):
return b_subnets
raw_client = self.neutron_handle._get_client(t_ctx)
params = self._construct_params(filters, sorts, limit, marker,
page_reverse)
t_subnets = raw_client.list_subnets(**params)['subnets']
t_id_set = set([subnet['id'] for subnet in t_subnets])
b_id_set = set([subnet['id'] for subnet in b_subnets])
missing_id_set = t_id_set - b_id_set
if missing_id_set:
missing_subnets = [subnet for subnet in t_subnets if (
subnet['id'] in missing_id_set)]
for subnet in missing_subnets:
b_subnet = self.core_plugin.create_subnet(
context, {'subnet': subnet})
if b_subnet['enable_dhcp']:
self._ensure_subnet_dhcp_port(t_ctx, context, b_subnet)
b_subnets.append(self._fields(b_subnet, fields))
return b_subnets
def create_port(self, context, port):
port_body = port['port']
network_id = port_body['network_id']
# get_network will create bottom network if it doesn't exist
self.get_network(context, network_id)
t_ctx = t_context.get_context_from_neutron_context(context)
raw_client = self.neutron_handle._get_client(t_ctx)
if port_body['fixed_ips'] is not q_constants.ATTR_NOT_SPECIFIED:
fixed_ip = port_body['fixed_ips'][0]
ip_address = fixed_ip.get('ip_address')
if not ip_address:
# dhcp agent may request to create a dhcp port without
# specifying ip address, we just raise an exception to reject
# this request
raise q_exceptions.InvalidIpForNetwork(ip_address='None')
params = {'fixed_ips': 'ip_address=%s' % ip_address}
t_ports = raw_client.list_ports(**params)['ports']
if not t_ports:
raise q_exceptions.InvalidIpForNetwork(
ip_address=fixed_ip['ip_address'])
t_port = t_ports[0]
else:
self._adapt_port_body_for_client(port['port'])
t_port = raw_client.create_port(port)['port']
subnet_id = t_port['fixed_ips'][0]['subnet_id']
# get_subnet will create bottom subnet if it doesn't exist
self.get_subnet(context, subnet_id)
b_port = self.core_plugin.create_port(context, {'port': t_port})
return b_port
def update_port(self, context, _id, port):
if port['port'].get('device_owner', '').startswith('compute') and (
port['port'].get('binding:host_id')):
# we check both "device_owner" and "binding:host_id" to ensure the
# request comes from nova. and ovs agent will not call update_port.
# it updates port status via rpc and direct db operation
region_name = cfg.CONF.nova.region_name
update_dict = {'binding:profile': {
t_constants.PROFILE_REGION: region_name}}
t_ctx = t_context.get_context_from_neutron_context(context)
self.neutron_handle.handle_update(t_ctx, 'port', _id,
{'port': update_dict})
return self.core_plugin.update_port(context, _id, port)
def get_port(self, context, _id, fields=None):
try:
b_port = self.core_plugin.get_port(context, _id, fields)
except q_exceptions.NotFound:
t_ctx = t_context.get_context_from_neutron_context(context)
t_port = self.neutron_handle.handle_get(t_ctx, 'port', _id)
if not t_port:
raise q_exceptions.PortNotFound(port_id=_id)
self._ensure_network_subnet(context, t_port)
self._adapt_port_body_for_call(t_port)
b_port = self.core_plugin.create_port(context, {'port': t_port})
return self._fields(b_port, fields)
def get_ports(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
# if id is not specified in the filter, we just return port data in
# local Neutron server, otherwise id is specified, we need to retrieve
# port data from central Neutron server and create port which doesn't
# exist in local Neutron server.
if not filters or 'id' not in filters:
return self.core_plugin.get_ports(context, filters, fields, sorts,
limit, marker, page_reverse)
b_ports = self.core_plugin.get_ports(context, filters, fields, sorts,
limit, marker, page_reverse)
if len(b_ports) == len(filters['id']):
return b_ports
id_set = set(filters['id'])
b_id_set = set([port['id'] for port in b_ports])
missing_id_set = id_set - b_id_set
t_ctx = t_context.get_context_from_neutron_context(context)
raw_client = self.neutron_handle._get_client(t_ctx)
t_ports = []
for port_id in missing_id_set:
# use list_port will cause infinite API call since central Neutron
# server will also use list_port to retrieve port information from
# local Neutron server, so we show_port one by one
try:
t_port = raw_client.show_port(port_id)['port']
t_ports.append(t_port)
except Exception:
# user passes a nonexistent port id
pass
for port in t_ports:
self._ensure_network_subnet(context, port)
self._adapt_port_body_for_call(port)
b_port = self.core_plugin.create_port(context,
{'port': port})
b_ports.append(self._fields(b_port, fields))
return b_ports
def delete_port(self, context, _id, l3_port_check=True):
t_ctx = t_context.get_context_from_neutron_context(context)
self.neutron_handle.handle_delete(t_ctx, t_constants.RT_PORT, _id)
self.core_plugin.delete_port(context, _id, l3_port_check)

View File

@ -43,7 +43,7 @@ curl -X POST http://127.0.0.1:19999/v1.0/pods \
# <name> <id> <ram> <disk> <vcpus>
# the following command is to create a flavor wih name='test',
# id=1, ram=1024MB, disk=10GB, vcpu=1
nova flavor-create test 1 1024 10 1
# nova flavor-create test 1 1024 10 1
image_id=$(openstack image list | awk 'NR==4 {print $2}')
# preparation for the tests
@ -80,12 +80,12 @@ iniset $TEMPEST_CONF volume-feature-enabled api_v1 false
iniset $TEMPEST_CONF validation connect_method fixed
# Run the Compute Tempest tests
cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
sudo BASE=$BASE ./tempest_compute.sh
# cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
# sudo BASE=$BASE ./tempest_compute.sh
# Run the Volume Tempest tests
cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
sudo BASE=$BASE ./tempest_volume.sh
# cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
# sudo BASE=$BASE ./tempest_volume.sh
# Run the Network Tempest tests
cd $TRICIRCLE_TEMPEST_PLUGIN_DIR

View File

@ -50,11 +50,11 @@ from tricircle.common import exceptions
import tricircle.db.api as db_api
from tricircle.db import core
from tricircle.db import models
import tricircle.network.central_plugin as plugin
from tricircle.network.drivers import type_local
from tricircle.network.drivers import type_shared_vlan
from tricircle.network import helper
from tricircle.network import managers
from tricircle.network import plugin
from tricircle.tests.unit.network import test_security_groups
from tricircle.xjob import xmanager
@ -1020,7 +1020,8 @@ class PluginTest(unittest.TestCase,
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
cfg.CONF.register_opts(q_config.core_opts)
plugin_path = 'tricircle.tests.unit.network.test_plugin.FakePlugin'
plugin_path = \
'tricircle.tests.unit.network.test_central_plugin.FakePlugin'
cfg.CONF.set_override('core_plugin', plugin_path)
self.context = context.Context()
self.save_method = manager.NeutronManager._get_default_service_plugins

View File

@ -0,0 +1,334 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from mock import patch
import six
import unittest
from oslo_utils import uuidutils
import neutron_lib.constants as q_constants
import neutron_lib.exceptions as q_exceptions
from tricircle.common import constants
import tricircle.common.context as t_context
import tricircle.network.local_plugin as plugin
TOP_NETS = []
TOP_SUBNETS = []
TOP_PORTS = []
BOTTOM_NETS = []
BOTTOM_SUBNETS = []
BOTTOM_PORTS = []
RES_LIST = [TOP_NETS, TOP_SUBNETS, TOP_PORTS,
BOTTOM_NETS, BOTTOM_SUBNETS, BOTTOM_PORTS]
RES_MAP = {'network': {True: TOP_NETS, False: BOTTOM_NETS},
'subnet': {True: TOP_SUBNETS, False: BOTTOM_SUBNETS},
'port': {True: TOP_PORTS, False: BOTTOM_PORTS}}
def create_resource(_type, is_top, body):
RES_MAP[_type][is_top].append(body)
def update_resource(_type, is_top, resource_id, body):
for resource in RES_MAP[_type][is_top]:
if resource['id'] == resource_id:
resource.update(body)
return copy.deepcopy(resource)
raise q_exceptions.NotFound()
def get_resource(_type, is_top, resource_id):
for resource in RES_MAP[_type][is_top]:
if resource['id'] == resource_id:
return copy.deepcopy(resource)
raise q_exceptions.NotFound()
def list_resource(_type, is_top, filters=None):
if not filters:
return [copy.deepcopy(resource) for resource in RES_MAP[_type][is_top]]
ret = []
for resource in RES_MAP[_type][is_top]:
pick = True
for key, value in six.iteritems(filters):
if resource.get(key) not in value:
pick = False
break
if pick:
ret.append(copy.deepcopy(resource))
return ret
def delete_resource(_type, is_top, body):
RES_MAP[_type][is_top].append(body)
class FakeCorePlugin(object):
def create_network(self, context, network):
create_resource('network', False, network['network'])
return network['network']
def get_network(self, context, _id, fields=None):
return get_resource('network', False, _id)
def get_networks(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
return list_resource('network', False, filters)
def create_subnet(self, context, subnet):
create_resource('subnet', False, subnet['subnet'])
return subnet['subnet']
def update_subnet(self, context, _id, subnet):
return update_resource('subnet', False, _id, subnet['subnet'])
def get_subnet(self, context, _id, fields=None):
return get_resource('subnet', False, _id)
def create_port(self, context, port):
create_resource('port', False, port['port'])
return port['port']
def get_port(self, context, _id, fields=None):
return get_resource('port', False, _id)
def get_ports(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
return list_resource('port', False, filters)
class FakeSession(object):
class WithWrapper(object):
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
def begin(self, subtransactions=True):
return FakeSession.WithWrapper()
class FakeContext(object):
def __init__(self):
self.session = FakeSession()
class FakeClient(object):
def list_networks(self, **kwargs):
return {'networks': list_resource('network', True, kwargs)}
def create_port(self, port):
if 'id' not in port['port']:
port['port']['id'] = uuidutils.generate_uuid()
if 'fixed_ips' not in port['port']:
for subnet in TOP_SUBNETS:
if subnet['network_id'] == port['port']['network_id']:
ip = {'subnet_id': subnet['id'],
'ip_address': subnet['cidr'][:-4] + '3'}
port['port']['fixed_ips'] = [ip]
create_resource('port', True, port['port'])
return port
def show_port(self, port_id):
return {'port': get_resource('port', True, port_id)}
def list_ports(self, **kwargs):
def find_ip_address(port, ip_address):
for ip in port.get('fixed_ips', []):
if ip['ip_address'] == ip_address:
return True
return False
ports = []
for port in TOP_PORTS:
pick = True
for key, value in six.iteritems(kwargs):
if key == 'fixed_ips':
if not find_ip_address(port, value.split('=')[1]):
pick = False
break
elif port.get(key) != value:
pick = False
break
if pick:
ports.append(copy.deepcopy(port))
return {'ports': ports}
class FakeNeutronHandle(object):
def _get_client(self, context):
return FakeClient()
def handle_get(self, context, _type, _id):
return get_resource(_type, True, _id)
class FakePlugin(plugin.TricirclePlugin):
def __init__(self):
self.core_plugin = FakeCorePlugin()
self.neutron_handle = FakeNeutronHandle()
class PluginTest(unittest.TestCase):
def setUp(self):
self.tenant_id = uuidutils.generate_uuid()
self.plugin = FakePlugin()
self.context = FakeContext()
def _prepare_resource(self):
network_id = uuidutils.generate_uuid()
subnet_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
t_net = {'id': network_id,
'tenant_id': self.tenant_id,
'name': 'net1',
'provider:network_type': constants.NT_SHARED_VLAN,
'subnets': [subnet_id]}
t_subnet = {'id': subnet_id,
'tenant_id': self.tenant_id,
'name': 'subnet1',
'network_id': network_id,
'cidr': '10.0.1.0/24',
'enable_dhcp': True}
t_port = {'id': port_id,
'tenant_id': self.tenant_id,
'admin_state_up': True,
'name': constants.dhcp_port_name % subnet_id,
'network_id': network_id,
'mac_address': 'fa:16:3e:96:41:02',
'device_owner': 'network:dhcp',
'device_id': 'reserved_dhcp_port',
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '10.0.1.2'}],
'binding:profile': {}}
TOP_NETS.append(t_net)
TOP_SUBNETS.append(t_subnet)
TOP_PORTS.append(t_port)
return t_net, t_subnet, t_port
def _validate(self, net, subnet, port):
b_net = self.plugin.get_network(self.context, net['id'])
net.pop('provider:network_type')
b_net_type = b_net.pop('provider:network_type')
b_subnet = get_resource('subnet', False, subnet['id'])
b_port = get_resource('port', False, port['id'])
b_net.pop('project_id')
b_subnet.pop('project_id')
port.pop('name')
b_port.pop('name')
self.assertDictEqual(net, b_net)
self.assertDictEqual(subnet, b_subnet)
self.assertEqual('vlan', b_net_type)
self.assertDictEqual(port, b_port)
@patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
def test_get_network(self):
t_net, t_subnet, t_port = self._prepare_resource()
self._validate(t_net, t_subnet, t_port)
@patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
def test_get_networks(self):
t_net1, t_subnet1, t_port1 = self._prepare_resource()
t_net2, t_subnet2, t_port2 = self._prepare_resource()
self.plugin.get_networks(self.context,
{'id': [t_net1['id'], t_net2['id'],
'fake_net_id']})
self._validate(t_net1, t_subnet1, t_port1)
self._validate(t_net2, t_subnet2, t_port2)
@patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
def test_create_port(self):
t_net, t_subnet, t_port = self._prepare_resource()
port = {
'port': {'network_id': t_net['id'],
'fixed_ips': q_constants.ATTR_NOT_SPECIFIED}
}
t_port = self.plugin.create_port(self.context, port)
b_port = get_resource('port', False, t_port['id'])
self.assertDictEqual(t_port, b_port)
@patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
def test_create_port_ip_specified(self):
t_net, t_subnet, t_port = self._prepare_resource()
port_body = {
'port': {'network_id': t_net['id'],
'fixed_ips': [{'ip_address': '10.0.1.4'}]}
}
self.assertRaises(q_exceptions.InvalidIpForNetwork,
self.plugin.create_port, self.context, port_body)
port_id = uuidutils.generate_uuid()
t_port = {'id': port_id,
'tenant_id': self.tenant_id,
'admin_state_up': True,
'network_id': t_net['id'],
'mac_address': 'fa:16:3e:96:41:04',
'fixed_ips': [{'subnet_id': t_subnet['id'],
'ip_address': '10.0.1.4'}],
'binding:profile': {}}
TOP_PORTS.append(t_port)
b_port = self.plugin.create_port(self.context, port_body)
self.assertDictEqual(t_port, b_port)
@patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
def test_get_port(self):
t_net, t_subnet, t_port = self._prepare_resource()
port_id = uuidutils.generate_uuid()
t_port = {'id': port_id,
'tenant_id': self.tenant_id,
'admin_state_up': True,
'network_id': t_net['id'],
'mac_address': 'fa:16:3e:96:41:04',
'fixed_ips': [{'subnet_id': t_subnet['id'],
'ip_address': '10.0.1.4'}],
'binding:profile': {}}
TOP_PORTS.append(t_port)
t_port = self.plugin.get_port(self.context, port_id)
b_port = get_resource('port', False, t_port['id'])
self.assertDictEqual(t_port, b_port)
@patch.object(t_context, 'get_context_from_neutron_context', new=mock.Mock)
def test_get_ports(self):
t_net, t_subnet, t_port = self._prepare_resource()
t_ports = []
for i in (4, 5):
port_id = uuidutils.generate_uuid()
t_port = {'id': port_id,
'tenant_id': self.tenant_id,
'admin_state_up': True,
'network_id': t_net['id'],
'mac_address': 'fa:16:3e:96:41:04',
'fixed_ips': [{'subnet_id': t_subnet['id'],
'ip_address': '10.0.1.%d' % i}],
'binding:profile': {}}
TOP_PORTS.append(t_port)
t_ports.append(t_port)
self.plugin.get_ports(self.context,
{'id': [t_ports[0]['id'], t_ports[1]['id'],
'fake_port_id']})
for i in (0, 1):
b_port = get_resource('port', False, t_ports[i]['id'])
b_port.pop('project_id')
self.assertDictEqual(t_ports[i], b_port)
def tearDown(self):
for res in RES_LIST:
del res[:]