QoS implementation(Part1: Qos Plugin)

1. What is the problem?
Tricircle now don't support QoS service, we should add QoS
servicesupporting.

2. What is the solution to the problem?
We implement Tricircle QoS service by inherit neutron QoS plugin.
For QoS automation deployment in local, we should implement QoS xjob
jobs.

Change-Id: Ifbf453b57f7e18919318e1dae2ca2849e149a29b
Signed-off-by: xiaohan zhang <zhangxiaohan@szzt.com.cn>
This commit is contained in:
zhangxiaohan 2017-11-06 11:09:16 +08:00 committed by 曹嵘晖
parent 1155347157
commit db679ef7cb
20 changed files with 2228 additions and 81 deletions

View File

@ -291,6 +291,11 @@ function start_central_neutron_server {
iniset $NEUTRON_CONF.$server_index sfc drivers tricircle_sfc iniset $NEUTRON_CONF.$server_index sfc drivers tricircle_sfc
iniset $NEUTRON_CONF.$server_index flowclassifier drivers tricircle_fc iniset $NEUTRON_CONF.$server_index flowclassifier drivers tricircle_fc
fi fi
if [ "$TRICIRCLE_ENABLE_QOS" == "True" ]; then
service_plugins+=",tricircle.network.central_qos_plugin.TricircleQosPlugin"
fi
if [ -n service_plugins ]; then if [ -n service_plugins ]; then
service_plugins=$(echo $service_plugins| sed 's/^,//') service_plugins=$(echo $service_plugins| sed 's/^,//')
iniset $NEUTRON_CONF.$server_index DEFAULT service_plugins "$service_plugins" iniset $NEUTRON_CONF.$server_index DEFAULT service_plugins "$service_plugins"
@ -324,6 +329,17 @@ function start_central_neutron_server {
iniset $NEUTRON_CONF.$server_index tricircle enable_api_gateway False iniset $NEUTRON_CONF.$server_index tricircle enable_api_gateway False
# default value of bridge_network_type is vxlan # default value of bridge_network_type is vxlan
if [ "$TRICIRCLE_ENABLE_QOS" == "True" ]; then
local p_exist=$(grep "^extension_drivers" /$Q_PLUGIN_CONF_FILE)
if [[ $p_exist != "" ]];then
if ! [[ $(echo $p_exist | grep "qos") ]];then
sed -i "s/$p_exist/$p_exist,qos/g" /$Q_PLUGIN_CONF_FILE
fi
else
sed -i "s/^\[ml2\]/\[ml2\]\nextension_drivers = qos/g" /$Q_PLUGIN_CONF_FILE
fi
fi
recreate_database $Q_DB_NAME$server_index recreate_database $Q_DB_NAME$server_index
$NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF.$server_index --config-file /$Q_PLUGIN_CONF_FILE upgrade head $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF.$server_index --config-file /$Q_PLUGIN_CONF_FILE upgrade head

View File

@ -13,6 +13,7 @@ TRICIRCLE_DEPLOY_WITH_CELL=${TRICIRCLE_DEPLOY_WITH_CELL:-False}
# extensions working with tricircle # extensions working with tricircle
TRICIRCLE_ENABLE_TRUNK=${TRICIRCLE_ENABLE_TRUNK:-False} TRICIRCLE_ENABLE_TRUNK=${TRICIRCLE_ENABLE_TRUNK:-False}
TRICIRCLE_ENABLE_SFC=${TRICIRCLE_ENABLE_SFC:-False} TRICIRCLE_ENABLE_SFC=${TRICIRCLE_ENABLE_SFC:-False}
TRICIRCLE_ENABLE_QOS=${TRICIRCLE_ENABLE_QOS:-False}
# these default settings are used for devstack based gate/check jobs # these default settings are used for devstack based gate/check jobs
TRICIRCLE_DEFAULT_VLAN_BRIDGE=${TRICIRCLE_DEFAULT_VLAN_BRIDGE:-br-vlan} TRICIRCLE_DEFAULT_VLAN_BRIDGE=${TRICIRCLE_DEFAULT_VLAN_BRIDGE:-br-vlan}

View File

@ -0,0 +1,5 @@
---
features:
- Provide central Neutron QoS plugin and implement QoS driver. Support QoS
policy creation, update and delete, QoS policy binding with network or
port.

View File

@ -64,6 +64,8 @@ tricircle.network.type_drivers =
vlan = tricircle.network.drivers.type_vlan:VLANTypeDriver vlan = tricircle.network.drivers.type_vlan:VLANTypeDriver
vxlan = tricircle.network.drivers.type_vxlan:VxLANTypeDriver vxlan = tricircle.network.drivers.type_vxlan:VxLANTypeDriver
flat = tricircle.network.drivers.type_flat:FlatTypeDriver flat = tricircle.network.drivers.type_flat:FlatTypeDriver
tricircle.network.extension_drivers =
qos = neutron.plugins.ml2.extensions.qos:QosExtensionDriver
networking_sfc.flowclassifier.drivers = networking_sfc.flowclassifier.drivers =
tricircle_fc = tricircle.network.central_fc_driver:TricircleFcDriver tricircle_fc = tricircle.network.central_fc_driver:TricircleFcDriver
networking_sfc.sfc.drivers = networking_sfc.sfc.drivers =

View File

@ -209,8 +209,14 @@ class Client(object):
if (handle_obj.support_resource[resource] & index) == 0: if (handle_obj.support_resource[resource] & index) == 0:
continue continue
self.operation_resources_map[operation].add(resource) self.operation_resources_map[operation].add(resource)
setattr(self, '%s_%ss' % (operation, resource), if resource == 'qos_policy':
functools.partial( setattr(self, '%s_qos_policies' % operation,
functools.partial(
getattr(self, '%s_resources' % operation),
resource))
else:
setattr(self, '%s_%ss' % (operation, resource),
functools.partial(
getattr(self, '%s_resources' % operation), getattr(self, '%s_resources' % operation),
resource)) resource))

View File

@ -36,6 +36,7 @@ RT_ROUTER = 'router'
RT_NS_ROUTER = 'ns_router' RT_NS_ROUTER = 'ns_router'
RT_SG = 'security_group' RT_SG = 'security_group'
RT_FIP = 'floatingip' RT_FIP = 'floatingip'
RT_QOS = 'qos_policy'
REAL_SHADOW_TYPE_MAP = { REAL_SHADOW_TYPE_MAP = {
RT_NETWORK: RT_SD_NETWORK, RT_NETWORK: RT_SD_NETWORK,
@ -48,7 +49,7 @@ REAL_SHADOW_TYPE_MAP = {
def is_valid_resource_type(resource_type): def is_valid_resource_type(resource_type):
resource_type_table = [RT_NETWORK, RT_SUBNET, RT_PORT, RT_ROUTER, RT_SG, resource_type_table = [RT_NETWORK, RT_SUBNET, RT_PORT, RT_ROUTER, RT_SG,
RT_TRUNK, RT_PORT_PAIR, RT_PORT_PAIR_GROUP, RT_TRUNK, RT_PORT_PAIR, RT_PORT_PAIR_GROUP,
RT_FLOW_CLASSIFIER, RT_PORT_CHAIN] RT_FLOW_CLASSIFIER, RT_PORT_CHAIN, RT_QOS]
return resource_type in resource_type_table return resource_type in resource_type_table
@ -113,6 +114,10 @@ JT_SHADOW_PORT_SETUP = 'shadow_port_setup'
JT_TRUNK_SYNC = 'trunk_sync' JT_TRUNK_SYNC = 'trunk_sync'
JT_SFC_SYNC = 'sfc_sync' JT_SFC_SYNC = 'sfc_sync'
JT_RESOURCE_RECYCLE = 'resource_recycle' JT_RESOURCE_RECYCLE = 'resource_recycle'
JT_QOS_CREATE = 'qos_create'
JT_QOS_UPDATE = 'qos_update'
JT_QOS_DELETE = 'qos_delete'
JT_SYNC_QOS_RULE = 'sync_qos_rule'
# network type # network type
NT_LOCAL = 'local' NT_LOCAL = 'local'
@ -148,7 +153,17 @@ job_resource_map = {
JT_SFC_SYNC: [(None, "pod_id"), JT_SFC_SYNC: [(None, "pod_id"),
(RT_PORT_CHAIN, "portchain_id"), (RT_PORT_CHAIN, "portchain_id"),
(RT_NETWORK, "network_id")], (RT_NETWORK, "network_id")],
JT_RESOURCE_RECYCLE: [(None, "project_id")] JT_RESOURCE_RECYCLE: [(None, "project_id")],
JT_QOS_CREATE: [(None, "pod_id"),
(RT_QOS, "policy_id"),
(None, "res_type"),
(None, "res_id")],
JT_QOS_UPDATE: [(None, "pod_id"),
(RT_QOS, "policy_id")],
JT_QOS_DELETE: [(None, "pod_id"),
(RT_QOS, "policy_id")],
JT_SYNC_QOS_RULE: [(None, "rule_id"),
(RT_QOS, "policy_id")]
} }
# map raw job status to more human readable job status # map raw job status to more human readable job status
@ -173,7 +188,11 @@ job_handles = {
JT_TRUNK_SYNC: "sync_trunk", JT_TRUNK_SYNC: "sync_trunk",
JT_SHADOW_PORT_SETUP: "setup_shadow_ports", JT_SHADOW_PORT_SETUP: "setup_shadow_ports",
JT_SFC_SYNC: "sync_service_function_chain", JT_SFC_SYNC: "sync_service_function_chain",
JT_RESOURCE_RECYCLE: "recycle_resources" JT_RESOURCE_RECYCLE: "recycle_resources",
JT_QOS_CREATE: "create_qos_policy",
JT_QOS_UPDATE: "update_qos_policy",
JT_QOS_DELETE: "delete_qos_policy",
JT_SYNC_QOS_RULE: "sync_qos_policy_rules"
} }
# map job type to its primary resource and then we only validate the project_id # map job type to its primary resource and then we only validate the project_id
@ -189,7 +208,11 @@ job_primary_resource_map = {
JT_TRUNK_SYNC: (RT_TRUNK, "trunk_id"), JT_TRUNK_SYNC: (RT_TRUNK, "trunk_id"),
JT_SHADOW_PORT_SETUP: (RT_NETWORK, "network_id"), JT_SHADOW_PORT_SETUP: (RT_NETWORK, "network_id"),
JT_SFC_SYNC: (RT_PORT_CHAIN, "portchain_id"), JT_SFC_SYNC: (RT_PORT_CHAIN, "portchain_id"),
JT_RESOURCE_RECYCLE: (None, "project_id") JT_RESOURCE_RECYCLE: (None, "project_id"),
JT_QOS_CREATE: (RT_QOS, "policy_id"),
JT_QOS_UPDATE: (RT_QOS, "policy_id"),
JT_QOS_DELETE: (RT_QOS, "policy_id"),
JT_SYNC_QOS_RULE: (RT_QOS, "policy_id")
} }
# admin API request path # admin API request path

View File

@ -35,6 +35,9 @@ LIST, CREATE, DELETE, GET, ACTION, UPDATE = 1, 2, 4, 8, 16, 32
operation_index_map = {'list': LIST, 'create': CREATE, 'delete': DELETE, operation_index_map = {'list': LIST, 'create': CREATE, 'delete': DELETE,
'get': GET, 'action': ACTION, 'update': UPDATE} 'get': GET, 'action': ACTION, 'update': UPDATE}
policy_rules = ('bandwidth_limit_rule', 'dscp_marking_rule',
'minimum_bandwidth_rule')
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -98,7 +101,11 @@ class NeutronResourceHandle(ResourceHandle):
'port_chain': LIST | CREATE | DELETE | GET | UPDATE, 'port_chain': LIST | CREATE | DELETE | GET | UPDATE,
'port_pair_group': LIST | CREATE | DELETE | GET | UPDATE, 'port_pair_group': LIST | CREATE | DELETE | GET | UPDATE,
'port_pair': LIST | CREATE | DELETE | GET | UPDATE, 'port_pair': LIST | CREATE | DELETE | GET | UPDATE,
'flow_classifier': LIST | CREATE | DELETE | GET | UPDATE} 'flow_classifier': LIST | CREATE | DELETE | GET | UPDATE,
'qos_policy': LIST | CREATE | DELETE | GET | UPDATE,
'bandwidth_limit_rule': LIST | CREATE | DELETE | GET | UPDATE,
'dscp_marking_rule': LIST | CREATE | DELETE | GET | UPDATE,
'minimum_bandwidth_rule': LIST | CREATE | DELETE | GET | UPDATE}
def _get_client(self, cxt): def _get_client(self, cxt):
token = cxt.auth_token token = cxt.auth_token
@ -113,7 +120,10 @@ class NeutronResourceHandle(ResourceHandle):
def handle_list(self, cxt, resource, filters): def handle_list(self, cxt, resource, filters):
try: try:
client = self._get_client(cxt) client = self._get_client(cxt)
collection = '%ss' % resource if resource == 'qos_policy':
collection = 'qos_policies'
else:
collection = '%ss' % resource
search_opts = _transform_filters(filters) search_opts = _transform_filters(filters)
return [res for res in getattr( return [res for res in getattr(
client, 'list_%s' % collection)(**search_opts)[collection]] client, 'list_%s' % collection)(**search_opts)[collection]]
@ -126,6 +136,10 @@ class NeutronResourceHandle(ResourceHandle):
client = self._get_client(cxt) client = self._get_client(cxt)
ret = getattr(client, 'create_%s' % resource)( ret = getattr(client, 'create_%s' % resource)(
*args, **kwargs) *args, **kwargs)
if resource == 'qos_policy':
return ret['policy']
if resource in ret: if resource in ret:
return ret[resource] return ret[resource]
else: else:
@ -137,6 +151,9 @@ class NeutronResourceHandle(ResourceHandle):
def handle_update(self, cxt, resource, *args, **kwargs): def handle_update(self, cxt, resource, *args, **kwargs):
try: try:
client = self._get_client(cxt) client = self._get_client(cxt)
if resource == 'qos_policy':
return getattr(client, 'update_%s' % resource)(
*args, **kwargs)['policy']
return getattr(client, 'update_%s' % resource)( return getattr(client, 'update_%s' % resource)(
*args, **kwargs)[resource] *args, **kwargs)[resource]
except q_exceptions.ConnectionFailed: except q_exceptions.ConnectionFailed:
@ -146,6 +163,13 @@ class NeutronResourceHandle(ResourceHandle):
def handle_get(self, cxt, resource, resource_id): def handle_get(self, cxt, resource, resource_id):
try: try:
client = self._get_client(cxt) client = self._get_client(cxt)
if resource == 'qos_policy':
return getattr(client, 'show_%s' % resource)(
resource_id)['policy']
if resource in policy_rules:
(rule_id, policy_id) = resource_id.split('#')
return getattr(client, 'show_%s' % resource)(
rule_id, policy_id)[resource]
return getattr(client, 'show_%s' % resource)(resource_id)[resource] return getattr(client, 'show_%s' % resource)(resource_id)[resource]
except q_exceptions.ConnectionFailed: except q_exceptions.ConnectionFailed:
raise exceptions.EndpointNotAvailable( raise exceptions.EndpointNotAvailable(
@ -157,6 +181,10 @@ class NeutronResourceHandle(ResourceHandle):
def handle_delete(self, cxt, resource, resource_id): def handle_delete(self, cxt, resource, resource_id):
try: try:
client = self._get_client(cxt) client = self._get_client(cxt)
if resource in policy_rules:
(rule_id, policy_id) = resource_id.split('#')
return getattr(client, 'delete_%s' % resource)(
rule_id, policy_id)
return getattr(client, 'delete_%s' % resource)(resource_id) return getattr(client, 'delete_%s' % resource)(resource_id)
except q_exceptions.ConnectionFailed: except q_exceptions.ConnectionFailed:
raise exceptions.EndpointNotAvailable( raise exceptions.EndpointNotAvailable(

View File

@ -140,3 +140,26 @@ class XJobAPI(object):
ctxt, project_id, ctxt, project_id,
constants.job_handles[constants.JT_RESOURCE_RECYCLE], constants.job_handles[constants.JT_RESOURCE_RECYCLE],
constants.JT_RESOURCE_RECYCLE, project_id) constants.JT_RESOURCE_RECYCLE, project_id)
def create_qos_policy(self, ctxt, project_id, policy_id, pod_id,
res_type, res_id=None):
self.invoke_method(
ctxt, project_id, constants.job_handles[constants.JT_QOS_CREATE],
constants.JT_QOS_CREATE, '%s#%s#%s#%s' % (pod_id, policy_id,
res_type, res_id))
def update_qos_policy(self, ctxt, project_id, policy_id, pod_id):
self.invoke_method(
ctxt, project_id, constants.job_handles[constants.JT_QOS_UPDATE],
constants.JT_QOS_UPDATE, '%s#%s' % (pod_id, policy_id))
def delete_qos_policy(self, ctxt, project_id, policy_id, pod_id):
self.invoke_method(
ctxt, project_id, constants.job_handles[constants.JT_QOS_DELETE],
constants.JT_QOS_DELETE, '%s#%s' % (pod_id, policy_id))
def sync_qos_policy_rules(self, ctxt, project_id, policy_id):
self.invoke_method(
ctxt, project_id,
constants.job_handles[constants.JT_SYNC_QOS_RULE],
constants.JT_SYNC_QOS_RULE, policy_id)

View File

@ -28,7 +28,9 @@ from neutron.callbacks import exceptions as callbacks_exc
from neutron.callbacks import registry from neutron.callbacks import registry
from neutron.callbacks import resources from neutron.callbacks import resources
import neutron.common.exceptions as ml2_exceptions import neutron.common.exceptions as ml2_exceptions
from neutron.conf.plugins.ml2 import config # noqa
from neutron.db import _resource_extend as resource_extend from neutron.db import _resource_extend as resource_extend
from neutron.db import agents_db
from neutron.db import api as q_db_api from neutron.db import api as q_db_api
from neutron.db.availability_zone import router as router_az from neutron.db.availability_zone import router as router_az
from neutron.db import db_base_plugin_v2 from neutron.db import db_base_plugin_v2
@ -44,6 +46,8 @@ from neutron.db import l3_hamode_db # noqa
from neutron.db import models_v2 from neutron.db import models_v2
from neutron.db import portbindings_db from neutron.db import portbindings_db
from neutron.extensions import providernet as provider from neutron.extensions import providernet as provider
from neutron.objects.qos import policy as policy_object
from neutron.plugins.ml2 import managers as n_managers
from neutron_lib.api.definitions import availability_zone as az_def from neutron_lib.api.definitions import availability_zone as az_def
from neutron_lib.api.definitions import external_net from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import l3 as l3_apidef from neutron_lib.api.definitions import l3 as l3_apidef
@ -72,6 +76,7 @@ from tricircle.db import models
import tricircle.network.exceptions as t_network_exc import tricircle.network.exceptions as t_network_exc
from tricircle.network import helper from tricircle.network import helper
from tricircle.network import managers from tricircle.network import managers
from tricircle.network import qos_driver
from tricircle.network import security_groups from tricircle.network import security_groups
@ -80,6 +85,11 @@ tricircle_opts = [
default=['vxlan,local'], default=['vxlan,local'],
help=_('List of network type driver entry points to be loaded ' help=_('List of network type driver entry points to be loaded '
'from the tricircle.network.type_drivers namespace.')), 'from the tricircle.network.type_drivers namespace.')),
cfg.ListOpt('extension_drivers',
default=[],
help=_('List of network extension driver entry points to be '
'loaded from the neutron.ml2.extension_drivers '
'namespace.')),
cfg.ListOpt('tenant_network_types', cfg.ListOpt('tenant_network_types',
default=['vxlan,local'], default=['vxlan,local'],
help=_('Ordered list of network_types to allocate as tenant ' help=_('Ordered list of network_types to allocate as tenant '
@ -128,6 +138,7 @@ NON_VM_PORT_TYPES = [constants.DEVICE_OWNER_ROUTER_INTF,
class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2, class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
agents_db.AgentDbMixin,
security_groups.TricircleSecurityGroupMixin, security_groups.TricircleSecurityGroupMixin,
external_net_db.External_net_db_mixin, external_net_db.External_net_db_mixin,
portbindings_db.PortBindingMixin, portbindings_db.PortBindingMixin,
@ -165,12 +176,15 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
def __init__(self): def __init__(self):
super(TricirclePlugin, self).__init__() super(TricirclePlugin, self).__init__()
LOG.info("Starting Tricircle Neutron Plugin") LOG.info("Starting Tricircle Neutron Plugin")
self.clients = {} self.clients = {'top': t_client.Client()}
self.xjob_handler = xrpcapi.XJobAPI() self.xjob_handler = xrpcapi.XJobAPI()
self._setup_rpc() self._setup_rpc()
self.type_manager = managers.TricircleTypeManager() self.type_manager = managers.TricircleTypeManager()
self.extension_manager = n_managers.ExtensionManager()
self.extension_manager.initialize()
self.type_manager.initialize() self.type_manager.initialize()
self.helper = helper.NetworkHelper(self) self.helper = helper.NetworkHelper(self)
qos_driver.register()
def _setup_rpc(self): def _setup_rpc(self):
self.endpoints = [] self.endpoints = []
@ -303,6 +317,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
net_db = self.create_network_db(context, network) net_db = self.create_network_db(context, network)
res = self._make_network_dict(net_db, process_extensions=False, res = self._make_network_dict(net_db, process_extensions=False,
context=context) context=context)
self.extension_manager.process_create_network(context, net_data,
res)
self._process_l3_create(context, res, net_data) self._process_l3_create(context, res, net_data)
net_data['id'] = res['id'] net_data['id'] = res['id']
self.type_manager.create_network_segments(context, net_data, self.type_manager.create_network_segments(context, net_data,
@ -392,19 +408,55 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
self._raise_if_updates_external_attribute(net_data) self._raise_if_updates_external_attribute(net_data)
with context.session.begin(): with context.session.begin():
net = super(TricirclePlugin, self).update_network(context, original_network = super(TricirclePlugin, self).get_network(
network_id, context, network_id)
network) policy = policy_object.QosPolicy.get_network_policy(
context, network_id)
if policy:
original_network['qos_policy_id'] = policy['id']
else:
original_network['qos_policy_id'] = None
updated_network = super(
TricirclePlugin, self).update_network(
context, network_id, network)
self.extension_manager.process_update_network(
context, net_data, updated_network)
self.type_manager.extend_network_dict_provider(context,
updated_network)
updated_network = self.get_network(context, network_id)
if net_data.get('qos_policy_id', None):
updated_network['qos_policy_id'] = net_data['qos_policy_id']
if not updated_network.get('qos_policy_id', None):
updated_network['qos_policy_id'] = None
need_network_update_notify = (
'qos_policy_id' in net_data and
original_network['qos_policy_id'] !=
updated_network['qos_policy_id'])
t_ctx = t_context.get_context_from_neutron_context(context) t_ctx = t_context.get_context_from_neutron_context(context)
mappings = db_api.get_bottom_mappings_by_top_id( mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, network_id, t_constants.RT_NETWORK) t_ctx, network_id, t_constants.RT_NETWORK)
if mappings: if mappings:
self.xjob_handler.update_network( self.xjob_handler.update_network(
t_ctx, net['tenant_id'], network_id, t_ctx, updated_network['tenant_id'], network_id,
t_constants.POD_NOT_SPECIFIED) t_constants.POD_NOT_SPECIFIED)
self.type_manager.extend_network_dict_provider(context, net) if need_network_update_notify and \
return net updated_network['qos_policy_id'] and mappings:
t_policy_id = updated_network['qos_policy_id']
self.xjob_handler.create_qos_policy(
t_ctx, updated_network['tenant_id'], t_policy_id,
t_constants.POD_NOT_SPECIFIED, t_constants.RT_NETWORK,
updated_network['id'])
return updated_network
def _convert_az2region_for_nets(self, context, nets): def _convert_az2region_for_nets(self, context, nets):
for net in nets: for net in nets:
@ -419,6 +471,11 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
def _convert_az2region(self, t_ctx, az_hints): def _convert_az2region(self, t_ctx, az_hints):
return self.helper.convert_az2region(t_ctx, az_hints) return self.helper.convert_az2region(t_ctx, az_hints)
def _get_network_qos_info(self, context, net_id):
policy = policy_object.QosPolicy.get_network_policy(
context.elevated(), net_id)
return policy['id'] if policy else None
def get_network(self, context, network_id, fields=None): def get_network(self, context, network_id, fields=None):
net = super(TricirclePlugin, self).get_network(context, network_id, net = super(TricirclePlugin, self).get_network(context, network_id,
fields) fields)
@ -427,6 +484,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
self._convert_az2region_for_net(context, net) self._convert_az2region_for_net(context, net)
net['qos_policy_id'] = \
self._get_network_qos_info(context.elevated(), net['id'])
return net return net
def get_networks(self, context, filters=None, fields=None, def get_networks(self, context, filters=None, fields=None,
@ -438,6 +498,11 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
self.type_manager.extend_networks_dict_provider(context, nets) self.type_manager.extend_networks_dict_provider(context, nets)
self._convert_az2region_for_nets(context, nets) self._convert_az2region_for_nets(context, nets)
for net in nets:
net['qos_policy_id'] = \
self._get_network_qos_info(context.elevated(), net['id'])
return nets return nets
def create_subnet(self, context, subnet): def create_subnet(self, context, subnet):
@ -561,6 +626,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
self._ensure_default_security_group_on_port(context, port) self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port) sgids = self._get_security_groups_on_port(context, port)
result = self._make_port_dict(db_port) result = self._make_port_dict(db_port)
self.extension_manager.process_create_port(context, port_body, result)
self._process_port_create_security_group(context, result, sgids) self._process_port_create_security_group(context, result, sgids)
return result return result
@ -671,7 +737,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
def update_port(self, context, port_id, port): def update_port(self, context, port_id, port):
t_ctx = t_context.get_context_from_neutron_context(context) t_ctx = t_context.get_context_from_neutron_context(context)
top_port = super(TricirclePlugin, self).get_port(context, port_id) top_port = super(TricirclePlugin, self).get_port(context, port_id)
updated_port = None
# be careful that l3_db will call update_port to update device_id of # be careful that l3_db will call update_port to update device_id of
# router interface, we cannot directly update bottom port in this case, # router interface, we cannot directly update bottom port in this case,
# otherwise we will fail when attaching bottom port to bottom router # otherwise we will fail when attaching bottom port to bottom router
@ -679,15 +745,17 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
if t_constants.PROFILE_REGION in port['port'].get( if t_constants.PROFILE_REGION in port['port'].get(
'binding:profile', {}): 'binding:profile', {}):
# this update request comes from local Neutron # this update request comes from local Neutron
res = super(TricirclePlugin, self).update_port(context, port_id, updated_port = super(TricirclePlugin, self).update_port(context,
port) port_id,
port)
profile_dict = port['port']['binding:profile'] profile_dict = port['port']['binding:profile']
region_name = profile_dict[t_constants.PROFILE_REGION] region_name = profile_dict[t_constants.PROFILE_REGION]
device_name = profile_dict[t_constants.PROFILE_DEVICE] device_name = profile_dict[t_constants.PROFILE_DEVICE]
t_ctx = t_context.get_context_from_neutron_context(context) t_ctx = t_context.get_context_from_neutron_context(context)
pod = db_api.get_pod_by_name(t_ctx, region_name) pod = db_api.get_pod_by_name(t_ctx, region_name)
net = self.get_network(context, res['network_id']) net = self.get_network(context, updated_port['network_id'])
is_vxlan_network = ( is_vxlan_network = (
net[provider_net.NETWORK_TYPE] == t_constants.NT_VxLAN) net[provider_net.NETWORK_TYPE] == t_constants.NT_VxLAN)
if is_vxlan_network: if is_vxlan_network:
@ -700,20 +768,41 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
# gateway port, but we only need to create resource routing # gateway port, but we only need to create resource routing
# entries, trigger xjob and configure security group rules for # entries, trigger xjob and configure security group rules for
# instance port # instance port
self._create_mapping_for_vm_port(t_ctx, res, pod) self._create_mapping_for_vm_port(t_ctx, updated_port, pod)
self._process_trunk_port(context, t_ctx, self._process_trunk_port(context, t_ctx,
res, pod, profile_dict) updated_port, pod, profile_dict)
# only trigger setup_bottom_router job # only trigger setup_bottom_router job
self._trigger_router_xjob_for_vm_port(context, res, pod) self._trigger_router_xjob_for_vm_port(context, updated_port,
pod)
self.xjob_handler.configure_security_group_rules( self.xjob_handler.configure_security_group_rules(
t_ctx, res['tenant_id']) t_ctx, updated_port['tenant_id'])
if is_vxlan_network and ( if is_vxlan_network and (
cfg.CONF.client.cross_pod_vxlan_mode in ( cfg.CONF.client.cross_pod_vxlan_mode in (
t_constants.NM_P2P, t_constants.NM_L2GW)): t_constants.NM_P2P, t_constants.NM_L2GW)):
self.xjob_handler.setup_shadow_ports(t_ctx, res['tenant_id'], self.xjob_handler.setup_shadow_ports(
pod['pod_id'], t_ctx, updated_port['tenant_id'], pod['pod_id'],
res['network_id']) updated_port['network_id'])
network_binding_policy = \
policy_object.QosPolicy.get_network_policy(
context, updated_port['network_id'])
port_binding_policy = policy_object.QosPolicy.get_port_policy(
context, port_id)
if network_binding_policy:
t_policy_id = network_binding_policy['id']
self.xjob_handler.create_qos_policy(
t_ctx, t_ctx.project_id, t_policy_id, pod['pod_id'],
t_constants.RT_NETWORK, updated_port['network_id'])
if port_binding_policy:
t_policy_id = port_binding_policy['id']
self.xjob_handler.create_qos_policy(
t_ctx, t_ctx.project_id, t_policy_id, pod['pod_id'],
t_constants.RT_PORT, port_id)
# for vm port or port with empty device_owner, update top port and # for vm port or port with empty device_owner, update top port and
# bottom port # bottom port
elif top_port.get('device_owner') not in NON_VM_PORT_TYPES: elif top_port.get('device_owner') not in NON_VM_PORT_TYPES:
@ -722,6 +811,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
request_body = port[attributes.PORT] request_body = port[attributes.PORT]
if mappings: if mappings:
with context.session.begin(): with context.session.begin():
original_qos_policy_id = \
self._get_port_qos_info(context, port_id)
b_pod, b_port_id = mappings[0] b_pod, b_port_id = mappings[0]
b_region_name = b_pod['region_name'] b_region_name = b_pod['region_name']
b_client = self._get_client(region_name=b_region_name) b_client = self._get_client(region_name=b_region_name)
@ -733,36 +825,58 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
self._handle_bottom_security_group( self._handle_bottom_security_group(
t_ctx, request_body['security_groups'], b_pod) t_ctx, request_body['security_groups'], b_pod)
res = super(TricirclePlugin, self).update_port( updated_port = super(TricirclePlugin, self).update_port(
context, port_id, port) context, port_id, port)
self.extension_manager.process_update_port(
context, request_body, updated_port)
updated_port = \
super(TricirclePlugin, self).get_port(context, port_id)
# name is not allowed to be updated, because it is used by # name is not allowed to be updated, because it is used by
# lock_handle to retrieve bottom/local resources that have # lock_handle to retrieve bottom/local resources that have
# been created but not registered in the resource routing # been created but not registered in the resource routing
# table # table
request_body.pop('name', None) request_body.pop('name', None)
try: request_body_policy_id = \
b_client.update_ports(t_ctx, b_port_id, port) request_body.get('qos_policy_id', None)
except q_cli_exceptions.NotFound: if request_body_policy_id:
LOG.error( request_body.pop('qos_policy_id')
('port: %(port_id)s not found, '
'region name: %(name)s'), if request_body:
{'port_id': b_port_id, 'name': b_region_name}) try:
b_client.update_ports(t_ctx, b_port_id, port)
except q_cli_exceptions.NotFound:
LOG.error(
('port: %(port_id)s not found, '
'region name: %(name)s'),
{'port_id': b_port_id, 'name': b_region_name})
if request_body.get('security_groups', None): if request_body.get('security_groups', None):
self.xjob_handler.configure_security_group_rules( self.xjob_handler.configure_security_group_rules(
t_ctx, res['tenant_id']) t_ctx, updated_port['tenant_id'])
updated_port['qos_policy_id'] = request_body_policy_id
if request_body_policy_id and \
original_qos_policy_id != \
request_body_policy_id:
t_policy_id = updated_port['qos_policy_id']
self.xjob_handler.create_qos_policy(
t_ctx, t_ctx.project_id,
t_policy_id, b_pod['pod_id'],
t_constants.RT_PORT, b_port_id)
else: else:
self._filter_unsupported_attrs(request_body) self._filter_unsupported_attrs(request_body)
res = super(TricirclePlugin, self).update_port( updated_port = super(TricirclePlugin, self).update_port(
context, port_id, port) context, port_id, port)
self.extension_manager.process_update_port(
context, request_body, updated_port)
else: else:
# for router interface, router gw, dhcp port, not directly # for router interface, router gw, dhcp port, not directly
# update bottom port # update bottom port
res = super(TricirclePlugin, self).update_port( updated_port = super(TricirclePlugin, self).update_port(
context, port_id, port) context, port_id, port)
self._log_update_port_sensitive_attrs(port_id, port) self._log_update_port_sensitive_attrs(port_id, port)
return res return updated_port
def _pre_delete_port(self, context, port_id, port_check): def _pre_delete_port(self, context, port_id, port_check):
"""Do some preliminary operations before deleting the port.""" """Do some preliminary operations before deleting the port."""
@ -816,6 +930,10 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
'value': port_id}]) 'value': port_id}])
super(TricirclePlugin, self).delete_port(context, port_id) super(TricirclePlugin, self).delete_port(context, port_id)
def _get_port_qos_info(self, context, port_id):
policy = policy_object.QosPolicy.get_port_policy(context, port_id)
return policy['id'] if policy else None
def get_port(self, context, port_id, fields=None): def get_port(self, context, port_id, fields=None):
t_ctx = t_context.get_context_from_neutron_context(context) t_ctx = t_context.get_context_from_neutron_context(context)
mappings = db_api.get_bottom_mappings_by_top_id( mappings = db_api.get_bottom_mappings_by_top_id(
@ -830,28 +948,30 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
if fields: if fields:
port = dict( port = dict(
[(k, v) for k, v in six.iteritems(port) if k in fields]) [(k, v) for k, v in six.iteritems(port) if k in fields])
if 'network_id' not in port and 'fixed_ips' not in port: if 'network_id' in port or 'fixed_ips' in port:
return port bottom_top_map = {}
with t_ctx.session.begin():
bottom_top_map = {} for resource in (t_constants.RT_SUBNET,
with t_ctx.session.begin(): t_constants.RT_NETWORK,
for resource in (t_constants.RT_SUBNET, t_constants.RT_NETWORK, t_constants.RT_ROUTER):
t_constants.RT_ROUTER): route_filters = [{'key': 'resource_type',
route_filters = [{'key': 'resource_type', 'comparator': 'eq',
'comparator': 'eq', 'value': resource}]
'value': resource}] routes = core.query_resource(
routes = core.query_resource( t_ctx, models.ResourceRouting, route_filters, [])
t_ctx, models.ResourceRouting, route_filters, []) for route in routes:
for route in routes: if route['bottom_id']:
if route['bottom_id']: bottom_top_map[
bottom_top_map[ route['bottom_id']] = route['top_id']
route['bottom_id']] = route['top_id'] self._map_port_from_bottom_to_top(port, bottom_top_map)
self._map_port_from_bottom_to_top(port, bottom_top_map)
return port
else: else:
return super(TricirclePlugin, self).get_port(context, port = super(TricirclePlugin, self).get_port(context,
port_id, fields) port_id, fields)
port['qos_policy_id'] = \
self._get_port_qos_info(context, port_id)
return port
@staticmethod @staticmethod
def _apply_ports_filters(query, model, filters): def _apply_ports_filters(query, model, filters):
if not filters: if not filters:
@ -1051,8 +1171,13 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
if not filters or 'id' not in filters: if not filters or 'id' not in filters:
# if filter is empty or "id" is not in the filter, no special # if filter is empty or "id" is not in the filter, no special
# handle is required # handle is required
return self._get_ports(context, filters, fields, sorts, limit, ports = self._get_ports(context, filters, fields, sorts, limit,
marker, page_reverse) marker, page_reverse)
for port in ports:
port['qos_policy_id'] = \
self._get_port_qos_info(context, port['id'])
return ports
if len(filters) == 1: if len(filters) == 1:
# only "id" is in the filter, we use get_port to get all the ports # only "id" is in the filter, we use get_port to get all the ports
ports = [] ports = []
@ -1068,9 +1193,14 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
id_filters = filters.pop('id') id_filters = filters.pop('id')
ports = self._get_ports(context, filters, None, sorts, limit, ports = self._get_ports(context, filters, None, sorts, limit,
marker, page_reverse) marker, page_reverse)
return [super(TricirclePlugin, ports = [super(TricirclePlugin,
self)._fields( self)._fields(
p, fields) for p in ports if p['id'] in id_filters] p, fields) for p in ports if p['id'] in id_filters]
for port in ports:
port['qos_policy_id'] = \
self._get_port_qos_info(context, port['id'])
return ports
def _get_ports(self, context, filters=None, fields=None, sorts=None, def _get_ports(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False): limit=None, marker=None, page_reverse=False):

View File

@ -0,0 +1,83 @@
# Copyright 2017 Hunan University.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.objects import ports as ports_object
from neutron.services.qos import qos_plugin
from neutron_lib.api.definitions import portbindings
from oslo_log import log
import tricircle.common.client as t_client
import tricircle.common.constants as t_constants
import tricircle.common.context as t_context
import tricircle.db.api as db_api
LOG = log.getLogger(__name__)
class TricircleQosPlugin(qos_plugin.QoSPlugin):
def __init__(self):
super(TricircleQosPlugin, self).__init__()
self.clients = {'top': t_client.Client()}
def _get_client(self, region_name):
if region_name not in self.clients:
self.clients[region_name] = t_client.Client(region_name)
return self.clients[region_name]
def _get_ports_with_policy(self, context, policy):
networks_ids = policy.get_bound_networks()
ports_with_net_policy = ports_object.Port.get_objects(
context, network_id=networks_ids)
# Filter only these ports which don't have overwritten policy
ports_with_net_policy = [
port for port in ports_with_net_policy if
port.qos_policy_id is None
]
ports_ids = policy.get_bound_ports()
ports_with_policy = ports_object.Port.get_objects(
context, id=ports_ids)
t_ports = list(set(ports_with_policy + ports_with_net_policy))
t_ctx = t_context.get_context_from_neutron_context(context)
for t_port in t_ports:
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, t_port.id, t_constants.RT_PORT)
if mappings:
b_pod, b_port_id = mappings[0]
b_region_name = b_pod['region_name']
b_client = self._get_client(region_name=b_region_name)
b_port = b_client.get_ports(t_ctx, b_port_id)
new_binding = ports_object.PortBinding(
port_id=t_port.id,
vif_type=b_port.get('binding:vif_type',
portbindings.VIF_TYPE_UNBOUND),
vnic_type=b_port.get('binding:vnic_type',
portbindings.VNIC_NORMAL)
)
t_port.binding = new_binding
else:
new_binding = ports_object.PortBinding(
port_id=t_port.id,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vnic_type=portbindings.VNIC_NORMAL
)
t_port.binding = new_binding
return t_ports

View File

@ -245,6 +245,9 @@ class TricirclePlugin(plugin.Ml2Plugin):
net_body['name']) net_body['name'])
if net_id: if net_id:
net_body['id'] = net_id net_body['id'] = net_id
net_body.pop('qos_policy_id', None)
b_network = self.core_plugin.create_network(context, b_network = self.core_plugin.create_network(context,
{'network': net_body}) {'network': net_body})
return b_network return b_network
@ -349,6 +352,8 @@ class TricirclePlugin(plugin.Ml2Plugin):
continue continue
self._adapt_network_body(network) self._adapt_network_body(network)
network.pop('qos_policy_id', None)
b_network = self.core_plugin.create_network( b_network = self.core_plugin.create_network(
context, {'network': network}) context, {'network': network})
subnet_ids = self._ensure_subnet(context, network) subnet_ids = self._ensure_subnet(context, network)
@ -391,6 +396,7 @@ class TricirclePlugin(plugin.Ml2Plugin):
if not t_network: if not t_network:
raise q_exceptions.NetworkNotFound(net_id=_id) raise q_exceptions.NetworkNotFound(net_id=_id)
self._adapt_network_body(t_network) self._adapt_network_body(t_network)
t_network.pop('qos_policy_id', None)
b_network = self.core_plugin.create_network(context, b_network = self.core_plugin.create_network(context,
{'network': t_network}) {'network': t_network})
return t_network, b_network return t_network, b_network
@ -594,6 +600,8 @@ class TricirclePlugin(plugin.Ml2Plugin):
self._handle_security_group(t_ctx, context, t_port) self._handle_security_group(t_ctx, context, t_port)
self._create_shadow_agent(context, port_body) self._create_shadow_agent(context, port_body)
t_port.pop('qos_policy_id', None)
b_port = self.core_plugin.create_port(context, {'port': t_port}) b_port = self.core_plugin.create_port(context, {'port': t_port})
return b_port return b_port
@ -779,6 +787,7 @@ class TricirclePlugin(plugin.Ml2Plugin):
self._ensure_network_subnet(context, t_port) self._ensure_network_subnet(context, t_port)
self._adapt_port_body_for_call(t_port) self._adapt_port_body_for_call(t_port)
self._handle_security_group(t_ctx, context, t_port) self._handle_security_group(t_ctx, context, t_port)
t_port.pop('qos_policy_id', None)
b_port = self.core_plugin.create_port(context, {'port': t_port}) b_port = self.core_plugin.create_port(context, {'port': t_port})
self._ensure_trunk(context, t_ctx, _id) self._ensure_trunk(context, t_ctx, _id)
@ -822,6 +831,7 @@ class TricirclePlugin(plugin.Ml2Plugin):
self._ensure_network_subnet(context, port) self._ensure_network_subnet(context, port)
self._adapt_port_body_for_call(port) self._adapt_port_body_for_call(port)
self._handle_security_group(t_ctx, context, port) self._handle_security_group(t_ctx, context, port)
port.pop('qos_policy_id', None)
b_port = self.core_plugin.create_port(context, b_port = self.core_plugin.create_port(context,
{'port': port}) {'port': port})
b_ports.append(self._fields(b_port, fields)) b_ports.append(self._fields(b_port, fields))

View File

@ -0,0 +1,144 @@
# Copyright 2017 Hunan University Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib.db import constants as db_constants
from neutron_lib.services.qos import base
from neutron_lib.services.qos import constants as qos_consts
from oslo_log import log as logging
from tricircle.common import constants as t_constants
from tricircle.common import context
from tricircle.common import xrpcapi
from tricircle.db import api as db_api
LOG = logging.getLogger(__name__)
DRIVER = None
SUPPORTED_RULES = {
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: {
qos_consts.MAX_KBPS: {
'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]},
qos_consts.MAX_BURST: {
'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]},
qos_consts.DIRECTION: {
'type:values': constants.VALID_DIRECTIONS}
},
qos_consts.RULE_TYPE_DSCP_MARKING: {
qos_consts.DSCP_MARK: {'type:values': constants.VALID_DSCP_MARKS}
},
qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH: {
qos_consts.MIN_KBPS: {
'type:range': [0, db_constants.DB_INTEGER_MAX_VALUE]},
qos_consts.DIRECTION: {'type:values': [constants.EGRESS_DIRECTION]}
}
}
VIF_TYPES = [portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_VHOST_USER,
portbindings.VIF_TYPE_UNBOUND]
class TricircleQoSDriver(base.DriverBase):
def __init__(self, name, vif_types, vnic_types,
supported_rules,
requires_rpc_notifications):
super(TricircleQoSDriver, self).__init__(name, vif_types, vnic_types,
supported_rules,
requires_rpc_notifications)
self.xjob_handler = xrpcapi.XJobAPI()
@staticmethod
def create():
return TricircleQoSDriver(
name='tricircle',
vif_types=VIF_TYPES,
vnic_types=portbindings.VNIC_TYPES,
supported_rules=SUPPORTED_RULES,
requires_rpc_notifications=False)
def create_policy(self, q_context, policy):
"""Create policy invocation.
:param q_context: current running context information
:param policy: a QoSPolicy object being created, which will have no
rules.
"""
pass
def create_policy_precommit(self, q_context, policy):
"""Create policy precommit.
:param q_context: current running context information
:param policy: a QoSPolicy object being created, which will have no
rules.
"""
pass
def update_policy(self, q_context, policy):
"""Update policy invocation.
:param q_context: current running context information
:param policy: a QoSPolicy object being updated.
"""
pass
def update_policy_precommit(self, q_context, policy):
"""Update policy precommit.
:param q_context: current running context information
:param policy: a QoSPolicy object being updated.
"""
t_context = context.get_context_from_neutron_context(q_context)
policy_id = policy['id']
mappings = db_api.get_bottom_mappings_by_top_id(
t_context, policy_id, t_constants.RT_QOS)
if mappings:
self.xjob_handler.update_qos_policy(
t_context, t_context.project_id, policy_id,
t_constants.POD_NOT_SPECIFIED)
self.xjob_handler.sync_qos_policy_rules(
t_context, t_context.project_id, policy_id)
def delete_policy(self, q_context, policy):
"""Delete policy invocation.
:param q_context: current running context information
:param policy: a QoSPolicy object being deleted
"""
def delete_policy_precommit(self, q_context, policy):
"""Delete policy precommit.
:param q_context: current running context information
:param policy: a QoSPolicy object being deleted
"""
t_context = context.get_context_from_neutron_context(q_context)
policy_id = policy['id']
self.xjob_handler.delete_qos_policy(
t_context, t_context.project_id, policy_id,
t_constants.POD_NOT_SPECIFIED)
def register():
"""Register the driver."""
global DRIVER
if not DRIVER:
DRIVER = TricircleQoSDriver.create()
LOG.debug('Tricircle QoS driver registered')

View File

@ -48,12 +48,15 @@ function _setup_tricircle_multinode {
export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_START_SERVICES=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_START_SERVICES=True"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_ENABLE_TRUNK=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_ENABLE_TRUNK=True"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_ENABLE_SFC=True" export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_ENABLE_SFC=True"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"TRICIRCLE_ENABLE_QOS=True"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"REGION_NAME=RegionOne" export DEVSTACK_LOCAL_CONFIG+=$'\n'"REGION_NAME=RegionOne"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"HOST_IP=$PRIMARY_NODE_IP" export DEVSTACK_LOCAL_CONFIG+=$'\n'"HOST_IP=$PRIMARY_NODE_IP"
ML2_CONFIG=$'\n'"ML2_L3_PLUGIN=tricircle.network.local_l3_plugin.TricircleL3Plugin" ML2_CONFIG=$'\n'"ML2_L3_PLUGIN=tricircle.network.local_l3_plugin.TricircleL3Plugin"
ML2_CONFIG+=$'\n'"ML2_L3_PLUGIN+=,neutron.services.qos.qos_plugin.QoSPlugin"
ML2_CONFIG+=$'\n'"[[post-config|/"'$Q_PLUGIN_CONF_FILE]]' ML2_CONFIG+=$'\n'"[[post-config|/"'$Q_PLUGIN_CONF_FILE]]'
ML2_CONFIG+=$'\n'"[ml2]" ML2_CONFIG+=$'\n'"[ml2]"
ML2_CONFIG+=$'\n'"extension_drivers = port_security,qos"
ML2_CONFIG+=$'\n'"mechanism_drivers = openvswitch,linuxbridge,l2population" ML2_CONFIG+=$'\n'"mechanism_drivers = openvswitch,linuxbridge,l2population"
ML2_CONFIG+=$'\n'"[agent]" ML2_CONFIG+=$'\n'"[agent]"
ML2_CONFIG+=$'\n'"extensions=sfc" ML2_CONFIG+=$'\n'"extensions=sfc"

View File

@ -0,0 +1,861 @@
- task_set_id: preparation
tasks:
- task_id: policy1
type: qos_policy
region: central
params:
name: policy1
- task_id: bandwidth_limit_rule1
region: central
type: qos_bandwidth_limit_rule
depend: [policy1]
params:
max_kbps: 3000
max_burst_kbps: 300
qos_policy: policy1@id
- task_id: policy2
type: qos_policy
region: central
params:
name: policy2
- task_id: bandwidth_limit_rule2
region: central
type: qos_bandwidth_limit_rule
depend: [policy2]
params:
max_kbps: 3000
max_burst_kbps: 300
qos_policy: policy2@id
- task_id: policy3
type: qos_policy
region: central
params:
name: policy3
- task_id: policy4
type: qos_policy
region: central
params:
name: policy4
- task_id: policy5
type: qos_policy
region: central
params:
name: policy5
- task_id: bandwidth_limit_rule5
region: central
type: qos_bandwidth_limit_rule
depend: [policy5]
params:
max_kbps: 3000
max_burst_kbps: 300
qos_policy: policy5@id
- task_id: dscp_marking_rule1
region: central
type: qos_dscp_marking_rule
depend: [policy1]
params:
dscp_mark: 30
qos_policy: policy1@id
- task_id: net1
region: central
type: network
params:
name: net1
- task_id: subnet1
region: central
type: subnet
depend: [net1]
params:
name: subnet1
ip_version: 4
cidr: 10.0.1.0/24
network_id: net1@id
- task_id: port1
region: central
type: port
depend:
- net1
- subnet1
params:
name: port1
network_id: net1@id
- task_id: net2
region: central
type: network
params:
name: net2
- task_id: subnet2
region: central
type: subnet
depend: [net2]
params:
name: subnet2
ip_version: 4
cidr: 10.0.2.0/24
network_id: net2@id
- task_id: port2
region: central
type: port
depend:
- net2
- subnet2
params:
name: port2
network_id: net2@id
- task_id: net3
region: central
type: network
params:
name: net3
- task_id: subnet3
region: central
type: subnet
depend: [net3]
params:
name: subnet3
ip_version: 4
cidr: 10.0.3.0/24
network_id: net3@id
- task_id: port3
region: central
type: port
depend:
- net3
- subnet3
params:
name: port3
network_id: net3@id
- task_id: net4
region: central
type: network
params:
name: net4
- task_id: subnet4
region: central
type: subnet
depend: [net4]
params:
name: subnet4
ip_version: 4
cidr: 10.0.4.0/24
network_id: net4@id
- task_id: port4
region: central
type: port
depend:
- net4
- subnet4
params:
name: port4
network_id: net4@id
- task_id: net5
region: central
type: network
params:
name: net5
- task_id: image1
region: region1
type: image
query:
get_one: true
- task_set_id: check_qos_create
depend: [preparation]
tasks:
- task_id: check_policy1_central
region: central
type: qos_policy
validate:
predicate: any
condition:
- name: policy1
- task_id: check_bandwidth_limit_rule1
region: central
type: qos_bandwidth_limit_rule
params:
qos_policy: preparation@policy1@id
validate:
predicate: any
condition:
- id: preparation@bandwidth_limit_rule1@id
- task_id: check_dscp_marking_rule1
region: central
type: qos_dscp_marking_rule
params:
qos_policy: preparation@policy1@id
validate:
predicate: any
condition:
- id: preparation@dscp_marking_rule1@id
- task_id: check_policy1_region
region: region1
type: qos_policy
validate:
predicate: all
condition:
- name: invalid-name
- task_set_id: policy_update_only_central
depend: [preparation]
tasks:
- task_id: policy1_update_only_central
region: central
type: qos_policy
action:
target: preparation@policy1@id
method: update
params:
name: policy1_update_only_central
- task_id: bandwidth_limit_rule1_update_only_central
region: central
type: qos_bandwidth_limit_rule
action:
target: preparation@bandwidth_limit_rule1@id
method: update
params:
qos_policy: preparation@policy1@id
max_kbps: 4000
- task_id: dscp_marking_rule1_update_only_central
region: central
type: qos_dscp_marking_rule
action:
target: preparation@dscp_marking_rule1@id
method: update
params:
qos_policy: preparation@policy1@id
dscp_mark: 40
- task_set_id: check_qos_update_only_central
depend: [preparation]
tasks:
- task_id: check_policy1_update_only_central
region: central
type: qos_policy
validate:
predicate: any
condition:
- name: policy1_update_only_central
- task_id: check_limit_rule1_update_only_central
region: central
type: qos_bandwidth_limit_rule
params:
qos_policy: preparation@policy1@id
validate:
predicate: any
condition:
- id: preparation@bandwidth_limit_rule1@id
max_kbps: 4000
- task_id: check_dscp_rule1_update_only_central
region: central
type: qos_dscp_marking_rule
params:
qos_policy: preparation@policy1@id
validate:
predicate: any
condition:
- id: preparation@dscp_marking_rule1@id
dscp_mark: 40
- task_set_id: central_bound_policy
depend: [preparation]
tasks:
- task_id: net1_policy
region: central
type: network
action:
target: preparation@net1@id
method: update
params:
qos_policy_id: preparation@policy1@id
- task_id: net5_policy
region: central
type: network
action:
target: preparation@net5@id
method: update
params:
qos_policy_id: preparation@policy5@id
- task_id: port3_policy
region: central
type: port
action:
target: preparation@port3@id
method: update
params:
qos_policy_id: preparation@policy3@id
- task_set_id: create_vm
depend: [preparation]
tasks:
- task_id: vm1
region: region1
type: server
params:
flavor_id: 1
image_id: preparation@image1@id
name: vm1
networks:
- uuid: preparation@net1@id
port: preparation@port1@id
- task_id: vm2
region: region1
type: server
params:
flavor_id: 1
image_id: preparation@image1@id
name: vm2
networks:
- uuid: preparation@net2@id
- task_id: vm3
region: region1
type: server
params:
flavor_id: 1
image_id: preparation@image1@id
name: vm3
networks:
- uuid: preparation@net3@id
port: preparation@port3@id
- task_id: vm4
region: region1
type: server
params:
flavor_id: 1
image_id: preparation@image1@id
name: vm4
networks:
- uuid: preparation@net4@id
port: preparation@port4@id
- task_set_id: check_vm
depend: [preparation]
tasks:
- task_id: check_vm1
region: region1
type: server
validate:
predicate: any
retries: 10
condition:
- status: ACTIVE
name: vm1
- task_id: check_vm2
region: region1
type: server
validate:
predicate: any
retries: 10
condition:
- status: ACTIVE
name: vm2
- task_id: check_vm3
region: region1
type: server
validate:
predicate: any
retries: 10
condition:
- status: ACTIVE
name: vm3
- task_id: check_vm4
region: region1
type: server
validate:
predicate: any
retries: 10
condition:
- status: ACTIVE
name: vm4
- task_set_id: wait_for_vm
tasks:
- task_id: check_job_vm
region: central
type: job
validate:
predicate: all
retries: 10
condition:
- status: SUCCESS
- task_set_id: local_bound_policy
depend: [preparation]
tasks:
- task_id: net2_policy
region: central
type: network
action:
target: preparation@net2@id
method: update
params:
qos_policy_id: preparation@policy2@id
- task_id: port4_policy
region: central
type: port
action:
target: preparation@port4@id
method: update
params:
qos_policy_id: preparation@policy4@id
- task_set_id: wait_for_bound
tasks:
- task_id: check_job_bound
region: central
type: job
validate:
predicate: all
retries: 10
condition:
- status: SUCCESS
- task_set_id: check_bound_policy
depend: [preparation]
tasks:
- task_id: check_net1_policy_central
region: central
type: network
validate:
predicate: any
condition:
- qos_policy_id: preparation@policy1@id
- task_id: check_net2_policy_central
region: central
type: network
validate:
predicate: any
condition:
- qos_policy_id: preparation@policy2@id
- task_id: check_net5_policy_central
region: central
type: network
validate:
predicate: any
condition:
- qos_policy_id: preparation@policy5@id
- task_id: check_port3_policy_central
region: central
type: port
validate:
predicate: any
condition:
- qos_policy_id: preparation@policy3@id
- task_id: check_port4_policy_central
region: central
type: port
validate:
predicate: any
condition:
- qos_policy_id: preparation@policy4@id
- task_id: check_policy1_region
region: region1
type: qos_policy
validate:
predicate: any
condition:
- name: policy1_update_only_central
- task_id: check_policy2_region
region: region1
type: qos_policy
validate:
predicate: any
condition:
- name: policy2
- task_id: check_policy3_region
region: region1
type: qos_policy
validate:
predicate: any
condition:
- name: policy3
- task_id: check_policy4_region
region: region1
type: qos_policy
validate:
predicate: any
condition:
- name: policy4
- task_set_id: policy_update_with_local
depend: [preparation]
tasks:
- task_id: policy4_update_with_local
region: central
type: qos_policy
action:
target: preparation@policy4@id
method: update
params:
name: policy4_update_with_local
- task_id: bandwidth_limit_rule2_update_with_local
region: central
type: qos_bandwidth_limit_rule
action:
target: preparation@bandwidth_limit_rule2@id
method: update
params:
qos_policy: preparation@policy2@id
max_kbps: 5000
- task_set_id: wait_for_job_update
tasks:
- task_id: check_job_update
region: central
type: job
validate:
predicate: all
retries: 10
condition:
- status: SUCCESS
- task_set_id: check_qos_update_with_local
depend: [preparation]
tasks:
- task_id: check_policy4_update_with_local
region: central
type: qos_policy
validate:
predicate: any
condition:
- name: policy4_update_with_local
- task_id: check_policy4_update_region
region: region1
type: qos_policy
validate:
predicate: any
condition:
- name: policy4_update_with_local
- task_id: check_limit_rule2_update_with_local
region: central
type: qos_bandwidth_limit_rule
params:
qos_policy: preparation@policy2@id
validate:
predicate: any
condition:
- id: preparation@bandwidth_limit_rule2@id
max_kbps: 5000
- task_set_id: unbound_policy
depend: [preparation]
tasks:
- task_id: net1_no_policy
region: central
type: network
action:
target: preparation@net1@id
method: update
params:
qos_policy_id:
- task_id: net2_no_policy
region: central
type: network
action:
target: preparation@net2@id
method: update
params:
qos_policy_id:
- task_id: port3_no_policy
region: central
type: port
action:
target: preparation@port3@id
method: update
params:
qos_policy_id:
- task_id: port4_no_policy
region: central
type: port
action:
target: preparation@port4@id
method: update
params:
qos_policy_id:
- task_id: net5_no_policy
region: central
type: network
action:
target: preparation@net5@id
method: update
params:
qos_policy_id:
- task_set_id: wait_for_qos_unbound
tasks:
- task_id: check_job_qos_unbound
region: central
type: job
validate:
predicate: all
retries: 10
condition:
- status: SUCCESS
- task_set_id: qos-rule-delete
depend: [preparation]
tasks:
- task_id: bandwidth_limit_rule1_delete
region: central
type: qos_bandwidth_limit_rule
action:
target: preparation@bandwidth_limit_rule1@id
method: delete
params:
qos_policy: preparation@policy1@id
- task_id: bandwidth_limit_rule2_delete
region: central
type: qos_bandwidth_limit_rule
action:
target: preparation@bandwidth_limit_rule2@id
method: delete
params:
qos_policy: preparation@policy2@id
- task_id: dscp_marking_rule1_delete
region: central
type: qos_dscp_marking_rule
action:
target: preparation@dscp_marking_rule1@id
method: delete
params:
qos_policy: preparation@policy1@id
- task_id: bandwidth_limit_rule5_delete
region: central
type: qos_bandwidth_limit_rule
action:
target: preparation@bandwidth_limit_rule5@id
method: delete
params:
qos_policy: preparation@policy5@id
- task_set_id: wait_for_rule_delete
depend: [preparation]
tasks:
- task_id: check_job_rule_delete
region: central
type: job
validate:
predicate: all
retries: 10
condition:
- status: SUCCESS
- task_id: check_for_bandwidth_limit1_delete_central
region: central
type: qos_bandwidth_limit_rule
params:
qos_policy: preparation@policy1@id
validate:
predicate: all
retries: 10
condition:
id: invalid-id
- task_id: check_for_bandwidth_limit2_delete_central
region: central
type: qos_bandwidth_limit_rule
params:
qos_policy: preparation@policy2@id
validate:
predicate: all
retries: 10
condition:
id: invalid-id
- task_id: check_for_bandwidth_limit5_delete_central
region: central
type: qos_bandwidth_limit_rule
params:
qos_policy: preparation@policy5@id
validate:
predicate: all
retries: 10
condition:
id: invalid-id
- task_id: check_for_dscp_marking1_delete_central
region: central
type: qos_dscp_marking_rule
params:
qos_policy: preparation@policy1@id
validate:
predicate: all
retries: 10
condition:
id: invalid-id
- task_set_id: qos-policy-delete
depend: [preparation]
tasks:
- task_id: policy1_delete
region: central
type: qos_policy
action:
target: preparation@policy1@id
method: delete
- task_id: policy2_delete
region: central
type: qos_policy
action:
target: preparation@policy2@id
method: delete
- task_id: policy3_delete
region: central
type: qos_policy
action:
target: preparation@policy3@id
method: delete
- task_id: policy4_delete
region: central
type: qos_policy
action:
target: preparation@policy4@id
method: delete
- task_id: policy5_delete
region: central
type: qos_policy
action:
target: preparation@policy5@id
method: delete
- task_set_id: wait_for_policy_delete
tasks:
- task_id: check_for_policy_delete_central
region: central
type: qos_policy
validate:
predicate: all
retries: 10
condition:
name: invalid-name
- task_id: check_for_policy_delete_region
region: region1
type: qos_policy
validate:
predicate: all
retries: 10
condition:
name: invalid-name
- task_set_id: delete_vm
depend: [create_vm]
tasks:
- task_id: delete_vm1
region: region1
type: server
action:
target: create_vm@vm1@id
method: delete
- task_id: delete_vm2
region: region1
type: server
action:
target: create_vm@vm2@id
method: delete
- task_id: delete_vm3
region: region1
type: server
action:
target: create_vm@vm3@id
method: delete
- task_id: delete_vm4
region: region1
type: server
action:
target: create_vm@vm4@id
method: delete
- task_set_id: wait_for_vm_delete
tasks:
- task_id: check_for_vm_delete
region: region1
type: server
validate:
predicate: all
retries: 10
condition:
name: invalid-name
- task_set_id: delete_net
depend: [preparation]
tasks:
- task_id: delete_port1
region: central
type: port
action:
target: preparation@port1@id
method: delete
- task_id: delete_port2
region: central
type: port
action:
target: preparation@port2@id
method: delete
- task_id: delete_port3
region: central
type: port
action:
target: preparation@port3@id
method: delete
- task_id: delete_port4
region: central
type: port
action:
target: preparation@port4@id
method: delete
- task_id: delete_subnet1
region: central
type: subnet
depend: [delete_port1]
action:
target: preparation@subnet1@id
method: delete
retries: 3
- task_id: delete_subnet2
region: central
type: subnet
depend: [delete_port2]
action:
target: preparation@subnet2@id
method: delete
retries: 3
- task_id: delete_subnet3
region: central
type: subnet
depend: [delete_port3]
action:
target: preparation@subnet3@id
method: delete
retries: 3
- task_id: delete_subnet4
region: central
type: subnet
depend: [delete_port4]
action:
target: preparation@subnet4@id
method: delete
retries: 3
- task_id: delete_net1
region: central
type: network
depend: [delete_subnet1]
action:
target: preparation@net1@id
method: delete
- task_id: delete_net2
region: central
type: network
depend: [delete_subnet2]
action:
target: preparation@net2@id
method: delete
- task_id: delete_net3
region: central
type: network
depend: [delete_subnet3]
action:
target: preparation@net3@id
method: delete
- task_id: delete_net4
region: central
type: network
depend: [delete_subnet4]
action:
target: preparation@net4@id
method: delete
- task_id: delete_net5
region: central
type: network
action:
target: preparation@net5@id
method: delete
- task_set_id: check_net_delete
tasks:
- task_id: check_net_delete_job
region: region1
type: network
validate:
predicate: all
condition:
- name: invalid-name
- task_id: check-jobs
region: central
type: job
validate:
predicate: all
retries: 10
condition:
- status: SUCCESS

View File

@ -25,3 +25,8 @@ fi
#if [ $? != 0 ]; then #if [ $? != 0 ]; then
# die $LINENO "Smoke test fails, error in service function chain test" # die $LINENO "Smoke test fails, error in service function chain test"
#fi #fi
echo "Start to run qos policy function test"
python run_yaml_test.py qos_policy_rule_test.yaml "$OS_AUTH_URL" "$OS_TENANT_NAME" "$OS_USERNAME" "$OS_PASSWORD"
if [ $? != 0 ]; then
die $LINENO "Smoke test fails, error in service function chain test"
fi

View File

@ -77,12 +77,15 @@ class SDKRunner(object):
serv_reslist_map = { serv_reslist_map = {
'network_sdk': ['network', 'subnet', 'port', 'router', 'fip', 'trunk', 'network_sdk': ['network', 'subnet', 'port', 'router', 'fip', 'trunk',
'flow_classifier', 'port_pair', 'port_pair_group', 'flow_classifier', 'port_pair', 'port_pair_group',
'port_chain'], 'port_chain', 'qos_policy', 'qos_bandwidth_limit_rule',
'qos_dscp_marking_rule', 'qos_minimum_bandwidth_rule'],
'compute': ['server'], 'compute': ['server'],
'image': ['image'], 'image': ['image'],
'tricircle_sdk': ['job']} 'tricircle_sdk': ['job']}
res_alias_map = { res_alias_map = {
'fip': 'ip'} 'fip': 'ip'}
type_plural_map = {
'qos_policy': 'qos_policie'}
def __init__(self, auth_url, project, username, password): def __init__(self, auth_url, project, username, password):
self.res_serv_map = {} self.res_serv_map = {}
@ -133,6 +136,7 @@ class SDKRunner(object):
serv = self.res_serv_map[_type] serv = self.res_serv_map[_type]
_type = self.res_alias_map.get(_type, _type) _type = self.res_alias_map.get(_type, _type)
proxy = getattr(conn, serv) proxy = getattr(conn, serv)
_type = self.type_plural_map.get(_type, _type)
_list = list(getattr(proxy, '%ss' % _type)(**params)) _list = list(getattr(proxy, '%ss' % _type)(**params))
if get_one: if get_one:
return _list[0] return _list[0]

View File

@ -38,6 +38,9 @@ from neutron.db import ipam_pluggable_backend
from neutron.db import l3_db from neutron.db import l3_db
from neutron.db import models_v2 from neutron.db import models_v2
from neutron.db import rbac_db_models as rbac_db from neutron.db import rbac_db_models as rbac_db
from neutron.services.qos.drivers import manager as q_manager
from neutron.plugins.ml2 import managers as n_managers
from neutron.ipam import driver from neutron.ipam import driver
from neutron.ipam import exceptions as ipam_exc from neutron.ipam import exceptions as ipam_exc
@ -60,12 +63,15 @@ import tricircle.db.api as db_api
from tricircle.db import core from tricircle.db import core
from tricircle.db import models from tricircle.db import models
import tricircle.network.central_plugin as plugin import tricircle.network.central_plugin as plugin
from tricircle.network import central_qos_plugin
from tricircle.network.drivers import type_flat from tricircle.network.drivers import type_flat
from tricircle.network.drivers import type_local from tricircle.network.drivers import type_local
from tricircle.network.drivers import type_vlan from tricircle.network.drivers import type_vlan
from tricircle.network.drivers import type_vxlan from tricircle.network.drivers import type_vxlan
from tricircle.network import helper from tricircle.network import helper
from tricircle.network import managers from tricircle.network import managers
from tricircle.network import qos_driver
from tricircle.tests.unit.network import test_qos
from tricircle.tests.unit.network import test_security_groups from tricircle.tests.unit.network import test_security_groups
import tricircle.tests.unit.utils as test_utils import tricircle.tests.unit.utils as test_utils
from tricircle.xjob import xmanager from tricircle.xjob import xmanager
@ -84,18 +90,24 @@ TOP_SEGMENTS = _resource_store.TOP_NETWORKSEGMENTS
TOP_FLOATINGIPS = _resource_store.TOP_FLOATINGIPS TOP_FLOATINGIPS = _resource_store.TOP_FLOATINGIPS
TOP_SGS = _resource_store.TOP_SECURITYGROUPS TOP_SGS = _resource_store.TOP_SECURITYGROUPS
TOP_SG_RULES = _resource_store.TOP_SECURITYGROUPRULES TOP_SG_RULES = _resource_store.TOP_SECURITYGROUPRULES
TOP_POLICIES = _resource_store.TOP_QOS_POLICIES
TOP_POLICY_RULES = _resource_store.TOP_QOS_BANDWIDTH_LIMIT_RULES
BOTTOM1_NETS = _resource_store.BOTTOM1_NETWORKS BOTTOM1_NETS = _resource_store.BOTTOM1_NETWORKS
BOTTOM1_SUBNETS = _resource_store.BOTTOM1_SUBNETS BOTTOM1_SUBNETS = _resource_store.BOTTOM1_SUBNETS
BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS BOTTOM1_PORTS = _resource_store.BOTTOM1_PORTS
BOTTOM1_SGS = _resource_store.BOTTOM1_SECURITYGROUPS BOTTOM1_SGS = _resource_store.BOTTOM1_SECURITYGROUPS
BOTTOM1_FIPS = _resource_store.BOTTOM1_FLOATINGIPS BOTTOM1_FIPS = _resource_store.BOTTOM1_FLOATINGIPS
BOTTOM1_ROUTERS = _resource_store.BOTTOM1_ROUTERS BOTTOM1_ROUTERS = _resource_store.BOTTOM1_ROUTERS
BOTTOM1_POLICIES = _resource_store.BOTTOM1_QOS_POLICIES
BOTTOM1_POLICY_RULES = _resource_store.BOTTOM1_QOS_BANDWIDTH_LIMIT_RULES
BOTTOM2_NETS = _resource_store.BOTTOM2_NETWORKS BOTTOM2_NETS = _resource_store.BOTTOM2_NETWORKS
BOTTOM2_SUBNETS = _resource_store.BOTTOM2_SUBNETS BOTTOM2_SUBNETS = _resource_store.BOTTOM2_SUBNETS
BOTTOM2_PORTS = _resource_store.BOTTOM2_PORTS BOTTOM2_PORTS = _resource_store.BOTTOM2_PORTS
BOTTOM2_SGS = _resource_store.BOTTOM2_SECURITYGROUPS BOTTOM2_SGS = _resource_store.BOTTOM2_SECURITYGROUPS
BOTTOM2_FIPS = _resource_store.BOTTOM2_FLOATINGIPS BOTTOM2_FIPS = _resource_store.BOTTOM2_FLOATINGIPS
BOTTOM2_ROUTERS = _resource_store.BOTTOM2_ROUTERS BOTTOM2_ROUTERS = _resource_store.BOTTOM2_ROUTERS
BOTTOM2_POLICIES = _resource_store.BOTTOM2_QOS_POLICIES
BOTTOM2_POLICY_RULES = _resource_store.BOTTOM2_QOS_BANDWIDTH_LIMIT_RULES
TEST_TENANT_ID = test_utils.TEST_TENANT_ID TEST_TENANT_ID = test_utils.TEST_TENANT_ID
FakeNeutronContext = test_utils.FakeNeutronContext FakeNeutronContext = test_utils.FakeNeutronContext
@ -271,7 +283,9 @@ class FakeClient(test_utils.FakeClient):
if 'gateway_ip' not in body[_type]: if 'gateway_ip' not in body[_type]:
cidr = body[_type]['cidr'] cidr = body[_type]['cidr']
body[_type]['gateway_ip'] = cidr[:cidr.rindex('.')] + '.1' body[_type]['gateway_ip'] = cidr[:cidr.rindex('.')] + '.1'
if 'id' not in body[_type]: if _type == 'qos_policy':
body['policy']['id'] = uuidutils.generate_uuid()
elif 'id' not in body[_type]:
body[_type]['id'] = uuidutils.generate_uuid() body[_type]['id'] = uuidutils.generate_uuid()
return super(FakeClient, self).create_resources(_type, ctx, body) return super(FakeClient, self).create_resources(_type, ctx, body)
@ -419,7 +433,7 @@ class FakeClient(test_utils.FakeClient):
elif action == 'remove_interface': elif action == 'remove_interface':
return self.remove_interface_routers(ctx, *args, **kwargs) return self.remove_interface_routers(ctx, *args, **kwargs)
def _is_bridge_network_attached(): def _is_bridge_network_attached(self):
pass pass
def create_floatingips(self, ctx, body): def create_floatingips(self, ctx, body):
@ -484,7 +498,86 @@ class FakeClient(test_utils.FakeClient):
# group # group
return copy.deepcopy(sg) return copy.deepcopy(sg)
def get_security_group(self, context, _id, fields=None, tenant_id=None): def get_security_group(self, ctx, _id, fields=None, tenant_id=None):
pass
def get_qos_policies(self, ctx, policy_id):
rules = {'rules': []}
rule_list = \
self._res_map[self.region_name]['qos_bandwidth_limit_rules']
for rule in rule_list:
if rule['qos_policy_id'] == policy_id:
rules['rules'].append(rule)
res_list = self._res_map[self.region_name]['qos_policy']
for policy in res_list:
if policy['id'] == policy_id:
policy['rules'] = rules['rules']
return policy
def update_qos_policies(self, ctx, policy_id, body):
self.update_resources('policy', ctx, policy_id, body)
def delete_qos_policies(self, ctx, policy_id):
self.delete_resources('policy', ctx, policy_id)
def list_bandwidth_limit_rules(self, ctx, filters):
policy_id = filters[0].get("value")
if self.region_name == 'top':
res_list = \
self._res_map[self.region_name]['qos_bandwidth_limit_rules']
else:
res_list = self._res_map[self.region_name]['qos_policy']
for policy in res_list:
if policy['id'] == policy_id:
res_list = policy.get('rules', [])
ret_rules = []
for rule in res_list:
if rule['qos_policy_id'] == policy_id:
ret_rules.append(rule)
return ret_rules
def list_dscp_marking_rules(self, ctx, filters):
return []
def list_minimum_bandwidth_rules(self, ctx, filters):
return []
def create_bandwidth_limit_rules(self, ctx, policy_id, body):
res_list = self._res_map[self.region_name]['qos_policy']
for policy in res_list:
if policy['id'] == policy_id:
rule_id = uuidutils.generate_uuid()
body['bandwidth_limit_rule']['id'] = rule_id
body['bandwidth_limit_rule']['qos_policy_id'] = policy_id
policy['rules'].append(body['bandwidth_limit_rule'])
return body
raise q_exceptions.Conflict()
def create_dscp_marking_rules(self, ctx, policy_id, body):
pass
def create_minimum_bandwidth_rules(self, ctx, policy_id, body):
pass
def delete_bandwidth_limit_rules(self, ctx, combined_id):
(rule_id, policy_id) = combined_id.split('#')
res_list = self._res_map[self.region_name]['qos_policy']
for policy in res_list:
if policy['id'] == policy_id:
for rule in policy['rules']:
if rule['id'] == rule_id:
policy['rules'].remove(rule)
return
raise q_exceptions.Conflict()
def delete_dscp_marking_rules(self, ctx, combined_id):
pass
def delete_minimum_bandwidth_rules(self, ctx, combined_id):
pass pass
def list_security_groups(self, ctx, sg_filters): def list_security_groups(self, ctx, sg_filters):
@ -585,6 +678,26 @@ class FakeBaseRPCAPI(object):
def setup_shadow_ports(self, ctxt, project_id, pod_id, net_id): def setup_shadow_ports(self, ctxt, project_id, pod_id, net_id):
pass pass
def create_qos_policy(self, ctxt, project_id, policy_id, pod_id,
res_type, res_id=None):
combine_id = '%s#%s#%s#%s' % (pod_id, policy_id, res_type, res_id)
self.xmanager.create_qos_policy(
ctxt, payload={constants.JT_QOS_CREATE: combine_id})
def update_qos_policy(self, ctxt, project_id, policy_id, pod_id):
combine_id = '%s#%s' % (pod_id, policy_id)
self.xmanager.update_qos_policy(
ctxt, payload={constants.JT_QOS_UPDATE: combine_id})
def delete_qos_policy(self, ctxt, project_id, policy_id, pod_id):
combine_id = '%s#%s' % (pod_id, policy_id)
self.xmanager.delete_qos_policy(
ctxt, payload={constants.JT_QOS_DELETE: combine_id})
def sync_qos_policy_rules(self, ctxt, project_id, policy_id):
self.xmanager.sync_qos_policy_rules(
ctxt, payload={constants.JT_SYNC_QOS_RULE: policy_id})
class FakeRPCAPI(FakeBaseRPCAPI): class FakeRPCAPI(FakeBaseRPCAPI):
def __init__(self, fake_plugin): def __init__(self, fake_plugin):
@ -659,12 +772,46 @@ class FakeTypeManager(managers.TricircleTypeManager):
break break
class FakePlugin(plugin.TricirclePlugin): class FakeExtensionManager(n_managers.ExtensionManager):
def __init__(self):
super(FakeExtensionManager, self).__init__()
class FakeTricircleQoSDriver(qos_driver.TricircleQoSDriver):
def __init__(self, name, vif_types, vnic_types,
supported_rules,
requires_rpc_notifications):
super(FakeTricircleQoSDriver, self).__init__(
name, vif_types, vnic_types, supported_rules,
requires_rpc_notifications)
self.xjob_handler = FakeRPCAPI(self)
@staticmethod
def create():
return FakeTricircleQoSDriver(
name='tricircle',
vif_types=qos_driver.VIF_TYPES,
vnic_types=portbindings.VNIC_TYPES,
supported_rules=qos_driver.SUPPORTED_RULES,
requires_rpc_notifications=False)
class FakeQosServiceDriverManager(q_manager.QosServiceDriverManager):
def __init__(self):
self._drivers = [FakeTricircleQoSDriver.create()]
self.rpc_notifications_required = False
class FakePlugin(plugin.TricirclePlugin,
central_qos_plugin.TricircleQosPlugin):
def __init__(self): def __init__(self):
self.set_ipam_backend() self.set_ipam_backend()
self.helper = FakeHelper(self) self.helper = FakeHelper(self)
self.xjob_handler = FakeRPCAPI(self) self.xjob_handler = FakeRPCAPI(self)
self.type_manager = FakeTypeManager() self.type_manager = FakeTypeManager()
self.extension_manager = FakeExtensionManager()
self.extension_manager.initialize()
self.driver_manager = FakeQosServiceDriverManager()
def _get_client(self, region_name): def _get_client(self, region_name):
return FakeClient(region_name) return FakeClient(region_name)
@ -809,7 +956,8 @@ class FakeTrunkPlugin(object):
class PluginTest(unittest.TestCase, class PluginTest(unittest.TestCase,
test_security_groups.TricircleSecurityGroupTestMixin): test_security_groups.TricircleSecurityGroupTestMixin,
test_qos.TricircleQosTestMixin):
def setUp(self): def setUp(self):
core.initialize() core.initialize()
core.ModelBase.metadata.create_all(core.get_engine()) core.ModelBase.metadata.create_all(core.get_engine())
@ -909,10 +1057,14 @@ class PluginTest(unittest.TestCase,
port2 = fake_plugin.get_port(neutron_context, 'top_id_2') port2 = fake_plugin.get_port(neutron_context, 'top_id_2')
fake_plugin.get_port(neutron_context, 'top_id_3') fake_plugin.get_port(neutron_context, 'top_id_3')
self.assertEqual({'id': 'top_id_1', 'name': 'bottom'}, port1) self.assertEqual({'id': 'top_id_1', 'name': 'bottom',
self.assertEqual({'id': 'top_id_2', 'name': 'bottom'}, port2) 'qos_policy_id': None}, port1)
self.assertEqual({'id': 'top_id_2', 'name': 'bottom',
'qos_policy_id': None}, port2)
calls = [mock.call(neutron_context, 'top_id_0', None), calls = [mock.call(neutron_context, 'top_id_0', None),
mock.call(neutron_context, 'top_id_3', None)] mock.call().__setitem__('qos_policy_id', None),
mock.call(neutron_context, 'top_id_3', None),
mock.call().__setitem__('qos_policy_id', None)]
mock_plugin_method.assert_has_calls(calls) mock_plugin_method.assert_has_calls(calls)
@patch.object(context, 'get_context_from_neutron_context', @patch.object(context, 'get_context_from_neutron_context',
@ -934,11 +1086,15 @@ class PluginTest(unittest.TestCase,
marker=ports3[-1]['id']) marker=ports3[-1]['id'])
ports = [] ports = []
expected_ports = [{'id': 'top_id_0', 'name': 'top', expected_ports = [{'id': 'top_id_0', 'name': 'top',
'qos_policy_id': None,
'fixed_ips': [{'subnet_id': 'top_subnet_id', 'fixed_ips': [{'subnet_id': 'top_subnet_id',
'ip_address': '10.0.0.1'}]}, 'ip_address': '10.0.0.1'}]},
{'id': 'top_id_1', 'name': 'bottom'}, {'id': 'top_id_1', 'name': 'bottom',
{'id': 'top_id_2', 'name': 'bottom'}, 'qos_policy_id': None},
{'id': 'top_id_3', 'name': 'top'}] {'id': 'top_id_2', 'name': 'bottom',
'qos_policy_id': None},
{'id': 'top_id_3', 'name': 'top',
'qos_policy_id': None}]
for _ports in (ports1, ports2, ports3, ports4): for _ports in (ports1, ports2, ports3, ports4):
ports.extend(_ports) ports.extend(_ports)
six.assertCountEqual(self, expected_ports, ports) six.assertCountEqual(self, expected_ports, ports)
@ -963,9 +1119,11 @@ class PluginTest(unittest.TestCase,
ports3 = fake_plugin.get_ports(neutron_context, ports3 = fake_plugin.get_ports(neutron_context,
filters={'id': ['top_id_4']}) filters={'id': ['top_id_4']})
self.assertEqual([{'id': 'top_id_0', 'name': 'top', self.assertEqual([{'id': 'top_id_0', 'name': 'top',
'qos_policy_id': None,
'fixed_ips': [{'subnet_id': 'top_subnet_id', 'fixed_ips': [{'subnet_id': 'top_subnet_id',
'ip_address': '10.0.0.1'}]}], ports1) 'ip_address': '10.0.0.1'}]}], ports1)
self.assertEqual([{'id': 'top_id_1', 'name': 'bottom'}], ports2) self.assertEqual([{'id': 'top_id_1', 'name': 'bottom',
'qos_policy_id': None}], ports2)
self.assertEqual([], ports3) self.assertEqual([], ports3)
TOP_ROUTERS.append({'id': 'router_id'}) TOP_ROUTERS.append({'id': 'router_id'})
@ -990,9 +1148,9 @@ class PluginTest(unittest.TestCase,
ports = fake_plugin.get_ports(neutron_context, ports = fake_plugin.get_ports(neutron_context,
filters={'device_id': ['router_id']}) filters={'device_id': ['router_id']})
expected = [{'id': 'top_id_1', 'name': 'bottom', expected = [{'id': 'top_id_1', 'name': 'bottom',
'device_id': 'router_id'}, 'qos_policy_id': None, 'device_id': 'router_id'},
{'id': 'top_id_2', 'name': 'bottom', {'id': 'top_id_2', 'name': 'bottom',
'device_id': 'router_id'}] 'qos_policy_id': None, 'device_id': 'router_id'}]
six.assertCountEqual(self, expected, ports) six.assertCountEqual(self, expected, ports)
@patch.object(context, 'get_context_from_neutron_context') @patch.object(context, 'get_context_from_neutron_context')
@ -3372,6 +3530,97 @@ class PluginTest(unittest.TestCase,
'pod_id_1', TOP_SGS, 'pod_id_1', TOP_SGS,
TOP_SG_RULES, BOTTOM1_SGS) TOP_SG_RULES, BOTTOM1_SGS)
@patch.object(context, 'get_context_from_neutron_context')
def test_create_policy(self, mock_context):
fake_plugin = FakePlugin()
q_ctx = FakeNeutronContext()
t_ctx = context.get_db_context()
mock_context.return_value = t_ctx
self._test_create_policy(fake_plugin, q_ctx, t_ctx)
@patch.object(context, 'get_context_from_neutron_context')
def test_update_policy(self, mock_context):
self._basic_pod_route_setup()
fake_plugin = FakePlugin()
q_ctx = FakeNeutronContext()
t_ctx = context.get_db_context()
mock_context.return_value = t_ctx
self._test_update_policy(fake_plugin, q_ctx, t_ctx, 'pod_id_1',
BOTTOM1_POLICIES)
@patch.object(context, 'get_context_from_neutron_context')
def test_delete_policy(self, mock_context):
self._basic_pod_route_setup()
fake_plugin = FakePlugin()
q_ctx = FakeNeutronContext()
t_ctx = context.get_db_context()
mock_context.return_value = t_ctx
self._test_delete_policy(fake_plugin, q_ctx, t_ctx, 'pod_id_1',
BOTTOM1_POLICIES)
@patch.object(context, 'get_context_from_neutron_context')
def test_create_policy_rule(self, mock_context):
self._basic_pod_route_setup()
fake_plugin = FakePlugin()
q_ctx = FakeNeutronContext()
t_ctx = context.get_db_context()
mock_context.return_value = t_ctx
self._test_create_policy_rule(fake_plugin, q_ctx, t_ctx, 'pod_id_1',
BOTTOM1_POLICIES)
@patch.object(context, 'get_context_from_neutron_context')
def test_delete_policy_rule(self, mock_context):
self._basic_pod_route_setup()
fake_plugin = FakePlugin()
q_ctx = FakeNeutronContext()
t_ctx = context.get_db_context()
mock_context.return_value = t_ctx
self._test_delete_policy_rule(fake_plugin, q_ctx, t_ctx, 'pod_id_1',
BOTTOM1_POLICIES)
@patch.object(context, 'get_context_from_neutron_context')
def test_update_network_with_qos_policy(self, mock_context):
self._basic_pod_route_setup()
t_ctx = context.get_db_context()
fake_plugin = FakePlugin()
fake_client = FakeClient('pod_1')
q_ctx = FakeNeutronContext()
mock_context.return_value = t_ctx
tenant_id = TEST_TENANT_ID
net_id, _, _, _ = \
self._prepare_network_subnet(tenant_id, t_ctx, 'pod_1', 1)
self._test_update_network_with_qos_policy(fake_plugin, fake_client,
q_ctx, t_ctx, 'pod_id_1',
net_id, BOTTOM1_POLICIES)
@patch.object(context, 'get_context_from_neutron_context')
def test_update_port_with_qos_policy(self, mock_context):
project_id = TEST_TENANT_ID
self._basic_pod_route_setup()
q_ctx = FakeNeutronContext()
fake_client = FakeClient('pod_1')
t_ctx = context.get_db_context()
fake_plugin = FakePlugin()
mock_context.return_value = t_ctx
(t_net_id, t_subnet_id,
b_net_id, b_subnet_id) = self._prepare_network_subnet(
project_id, t_ctx, 'pod_1', 1)
t_port_id, b_port_id = self._prepare_port_test(
project_id, t_ctx, 'pod_1', 1, t_net_id, b_net_id,
t_subnet_id, b_subnet_id)
self._test_update_port_with_qos_policy(fake_plugin, fake_client,
q_ctx, t_ctx,
'pod_id_1', t_port_id,
b_port_id, BOTTOM1_POLICIES)
@patch.object(FakeBaseRPCAPI, 'setup_shadow_ports') @patch.object(FakeBaseRPCAPI, 'setup_shadow_ports')
@patch.object(FakeClient, 'update_ports') @patch.object(FakeClient, 'update_ports')
@patch.object(context, 'get_context_from_neutron_context') @patch.object(context, 'get_context_from_neutron_context')

View File

@ -0,0 +1,249 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.objects.qos import rule
from oslo_utils import uuidutils
from tricircle.common import constants
from tricircle.db import api as db_api
class TricircleQosTestMixin(object):
def _test_create_policy(self, plugin, q_ctx, t_ctx):
project_id = 'test_prject_id'
t_policy = {
'policy': {
'name': 'test_qos',
'description': 'This policy limits the ports to 10Mbit max.',
'project_id': project_id
}
}
res = plugin.create_policy(q_ctx, t_policy)
res1 = plugin.get_policy(q_ctx, res['id'])
self.assertEqual('test_qos', res['name'])
self.assertEqual(res1['id'], res['id'])
self.assertEqual(res1['name'], res['name'])
self.assertEqual(res['description'], res['description'])
def _test_update_policy(self, plugin, q_ctx, t_ctx,
pod_id, bottom_policy):
project_id = 'test_prject_id'
t_policy = {
'policy': {
'name': 'test_qos',
'description': 'This policy limits the ports to 10Mbit max.',
'project_id': project_id
}
}
res = plugin.create_policy(q_ctx, t_policy)
updated_qos = {
'policy': {
'name': 'test_updated_qos'
}
}
updated_res = plugin.update_policy(q_ctx, res['id'], updated_qos)
self.assertEqual(res['id'], updated_res['id'])
self.assertEqual('test_updated_qos', updated_res['name'])
b_policy_id = uuidutils.generate_uuid()
b_policy = {
'id': b_policy_id, 'name': b_policy_id, 'description': '',
'tenant_id': project_id
}
bottom_policy.append(b_policy)
db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
pod_id, project_id, constants.RT_QOS)
updated_qos = {
'policy': {
'name': 'test_policy'
}
}
updated_res = plugin.update_policy(q_ctx, res['id'], updated_qos)
self.assertEqual('test_policy', updated_res['name'])
self.assertEqual('test_policy', bottom_policy[0]['name'])
def _test_delete_policy(self, plugin, q_ctx,
t_ctx, pod_id, bottom_policy):
project_id = 'test_prject_id'
t_policy = {
'policy': {
'name': 'test_qos',
'description': 'This policy limits the ports to 10Mbit max.',
'project_id': project_id
}
}
res = plugin.create_policy(q_ctx, t_policy)
b_policy_id = uuidutils.generate_uuid()
b_policy = {
'id': b_policy_id, 'name': b_policy_id, 'description': '',
'tenant_id': project_id
}
bottom_policy.append(b_policy)
db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
pod_id, project_id, constants.RT_QOS)
self.assertEqual(1, len(bottom_policy))
plugin.delete_policy(q_ctx, res['id'])
self.assertEqual(0, len(bottom_policy))
def _test_create_policy_rule(self, plugin, q_ctx,
t_ctx, pod_id, bottom_policy):
project_id = 'test_prject_id'
t_policy = {
'policy': {
'name': 'test_qos',
'description': 'This policy limits the ports to 10Mbit max.',
'project_id': project_id
}
}
res = plugin.create_policy(q_ctx, t_policy)
rule_data = {
"bandwidth_limit_rule": {
"max_kbps": "10000"
}
}
t_rule = plugin.create_policy_rule(
q_ctx, rule.QosBandwidthLimitRule, res['id'], rule_data)
res1 = plugin.get_policy(q_ctx, res['id'])
self.assertEqual(1, len(res1['rules']))
self.assertEqual(t_rule['id'], res1['rules'][0]['id'])
b_policy_id = uuidutils.generate_uuid()
b_policy = {'id': b_policy_id, 'name': b_policy_id, 'description': '',
'tenant_id': project_id, 'rules': []}
bottom_policy.append(b_policy)
db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
pod_id, project_id, constants.RT_QOS)
rule_data = {
"bandwidth_limit_rule": {
"max_kbps": "10000",
"max_burst_kbps": "20000"
}
}
t_rule = plugin.create_policy_rule(
q_ctx, rule.QosBandwidthLimitRule, res['id'], rule_data)
self.assertEqual(2, len(bottom_policy[0]['rules']))
b_rule = bottom_policy[0]['rules'][0]
self.assertEqual(b_policy_id, b_rule['qos_policy_id'])
b_rule = bottom_policy[0]['rules'][1]
self.assertEqual(b_policy_id, b_rule['qos_policy_id'])
def _test_delete_policy_rule(self, plugin, q_ctx,
t_ctx, pod_id, bottom_policy):
project_id = 'test_prject_id'
t_policy = {
'policy': {
'name': 'test_qos',
'description': 'This policy limits the ports to 10Mbit max.',
'project_id': project_id
}
}
res = plugin.create_policy(q_ctx, t_policy)
b_policy_id = uuidutils.generate_uuid()
b_policy = {
'id': b_policy_id, 'name': b_policy_id, 'description': '',
'tenant_id': project_id, 'rules': []
}
bottom_policy.append(b_policy)
db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
pod_id, project_id, constants.RT_QOS)
rule_data = {
"bandwidth_limit_rule": {
"max_kbps": "10000"
}
}
res1 = plugin.create_policy_rule(
q_ctx, rule.QosBandwidthLimitRule, res['id'], rule_data)
self.assertEqual(1, len(bottom_policy[0]['rules']))
b_rule = bottom_policy[0]['rules'][0]
self.assertEqual(b_policy_id, b_rule['qos_policy_id'])
plugin.delete_policy_rule(
q_ctx, rule.QosBandwidthLimitRule, res1['id'], res['id'])
self.assertEqual(0, len(bottom_policy[0]['rules']))
@staticmethod
def _create_policy_in_top(self, plugin, q_ctx, t_ctx,
pod_id, bottom_policy):
project_id = 'test_prject_id'
t_policy = {
'policy': {
'name': 'test_qos',
'description': 'This policy limits the ports to 10Mbit max.',
'project_id': project_id,
}
}
return plugin.create_policy(q_ctx, t_policy)
def _test_update_network_with_qos_policy(self, plugin, client, q_ctx,
t_ctx, pod_id, t_net_id,
bottom_policy):
res = \
self._create_policy_in_top(self, plugin, q_ctx, t_ctx,
pod_id, bottom_policy)
update_body = {
'network': {
'qos_policy_id': res['id']}
}
top_net = plugin.update_network(q_ctx, t_net_id, update_body)
self.assertEqual(top_net['qos_policy_id'], res['id'])
route_res = \
db_api.get_bottom_mappings_by_top_id(t_ctx, res['id'],
constants.RT_QOS)
bottom_net = client.get_networks(q_ctx, t_net_id)
self.assertEqual(bottom_net['qos_policy_id'], route_res[0][1])
def _test_update_port_with_qos_policy(self, plugin, client, q_ctx,
t_ctx, pod_id, t_port_id,
b_port_id, bottom_policy):
res = \
self._create_policy_in_top(self, plugin, q_ctx, t_ctx,
pod_id, bottom_policy)
update_body = {
'port': {
'qos_policy_id': res['id']}
}
top_port = plugin.update_port(q_ctx, t_port_id, update_body)
self.assertEqual(top_port['qos_policy_id'], res['id'])
route_res = \
db_api.get_bottom_mappings_by_top_id(t_ctx, res['id'],
constants.RT_QOS)
bottom_port = client.get_ports(q_ctx, b_port_id)
self.assertEqual(bottom_port['qos_policy_id'], route_res[0][1])

View File

@ -55,7 +55,10 @@ class ResourceStore(object):
('sfc_port_pairs', constants.RT_PORT_PAIR), ('sfc_port_pairs', constants.RT_PORT_PAIR),
('sfc_port_pair_groups', constants.RT_PORT_PAIR_GROUP), ('sfc_port_pair_groups', constants.RT_PORT_PAIR_GROUP),
('sfc_port_chains', constants.RT_PORT_CHAIN), ('sfc_port_chains', constants.RT_PORT_CHAIN),
('sfc_flow_classifiers', constants.RT_FLOW_CLASSIFIER)] ('sfc_flow_classifiers', constants.RT_FLOW_CLASSIFIER),
('qos_policies', constants.RT_QOS),
('qos_bandwidth_limit_rules',
'qos_bandwidth_limit_rules')]
def __init__(self): def __init__(self):
self.store_list = [] self.store_list = []
@ -556,9 +559,13 @@ class FakeClient(object):
def create_resources(self, _type, ctx, body): def create_resources(self, _type, ctx, body):
res_list = self._res_map[self.region_name][_type] res_list = self._res_map[self.region_name][_type]
if _type == 'qos_policy':
_type = 'policy'
res = dict(body[_type]) res = dict(body[_type])
if 'id' not in res: if 'id' not in res:
res['id'] = uuidutils.generate_uuid() res['id'] = uuidutils.generate_uuid()
if _type == 'policy' and 'rules' not in res:
res['rules'] = []
res_list.append(res) res_list.append(res)
return res return res
@ -586,6 +593,8 @@ class FakeClient(object):
return None return None
def delete_resources(self, _type, ctx, _id): def delete_resources(self, _type, ctx, _id):
if _type is 'policy':
_type = 'qos_policy'
index = -1 index = -1
res_list = self._res_map[self.region_name][_type] res_list = self._res_map[self.region_name][_type]
for i, res in enumerate(res_list): for i, res in enumerate(res_list):
@ -595,7 +604,10 @@ class FakeClient(object):
del res_list[index] del res_list[index]
def update_resources(self, _type, ctx, _id, body): def update_resources(self, _type, ctx, _id, body):
res_list = self._res_map[self.region_name][_type] if _type == 'policy':
res_list = self._res_map[self.region_name]['qos_policy']
else:
res_list = self._res_map[self.region_name][_type]
updated = False updated = False
for res in res_list: for res in res_list:
if res['id'] == _id: if res['id'] == _id:

View File

@ -159,7 +159,12 @@ class XManager(PeriodicTasks):
constants.JT_NETWORK_UPDATE: self.update_network, constants.JT_NETWORK_UPDATE: self.update_network,
constants.JT_SUBNET_UPDATE: self.update_subnet, constants.JT_SUBNET_UPDATE: self.update_subnet,
constants.JT_SHADOW_PORT_SETUP: self.setup_shadow_ports, constants.JT_SHADOW_PORT_SETUP: self.setup_shadow_ports,
constants.JT_TRUNK_SYNC: self.sync_trunk} constants.JT_TRUNK_SYNC: self.sync_trunk,
constants.JT_QOS_CREATE: self.create_qos_policy,
constants.JT_QOS_UPDATE: self.update_qos_policy,
constants.JT_QOS_DELETE: self.delete_qos_policy,
constants.JT_SYNC_QOS_RULE: self.sync_qos_policy_rules
}
self.helper = helper.NetworkHelper() self.helper = helper.NetworkHelper()
self.xjob_handler = xrpcapi.XJobAPI() self.xjob_handler = xrpcapi.XJobAPI()
super(XManager, self).__init__() super(XManager, self).__init__()
@ -898,8 +903,9 @@ class XManager(PeriodicTasks):
ctx, t_network_id, constants.RT_NETWORK) ctx, t_network_id, constants.RT_NETWORK)
b_pods = [mapping[0] for mapping in mappings] b_pods = [mapping[0] for mapping in mappings]
for b_pod in b_pods: for b_pod in b_pods:
self.xjob_handler.update_network(ctx, project_id, self.xjob_handler.update_network(
t_network_id, b_pod['pod_id']) ctx, project_id, t_network_id,
b_pod['pod_id'])
return return
b_pod = db_api.get_pod(ctx, b_pod_id) b_pod = db_api.get_pod(ctx, b_pod_id)
@ -918,6 +924,9 @@ class XManager(PeriodicTasks):
} }
} }
if not t_network.get('qos_policy_id', None):
body['network']['qos_policy_id'] = None
try: try:
b_client.update_networks(ctx, b_network_id, body) b_client.update_networks(ctx, b_network_id, body)
except q_cli_exceptions.NotFound: except q_cli_exceptions.NotFound:
@ -1527,3 +1536,287 @@ class XManager(PeriodicTasks):
fc_id=b_fc_ids[0]) fc_id=b_fc_ids[0])
self.xjob_handler.recycle_resources(ctx, t_pc['project_id']) self.xjob_handler.recycle_resources(ctx, t_pc['project_id'])
@_job_handle(constants.JT_QOS_CREATE)
def create_qos_policy(self, ctx, payload):
(b_pod_id, t_policy_id, res_type, res_id) = payload[
constants.JT_QOS_CREATE].split('#')
t_client = self._get_client()
t_policy = t_client.get_qos_policies(ctx, t_policy_id)
if not t_policy:
# we just end this job if top policy no longer exists
return
project_id = t_policy['tenant_id']
if b_pod_id == constants.POD_NOT_SPECIFIED:
mappings = db_api.get_bottom_mappings_by_top_id(
ctx, res_id, res_type)
for b_pod, _ in mappings:
self.xjob_handler.create_qos_policy(ctx, project_id,
t_policy_id,
b_pod['pod_id'],
res_type,
res_id)
return
b_pod = db_api.get_pod(ctx, b_pod_id)
b_region_name = b_pod['region_name']
b_client = self._get_client(region_name=b_region_name)
body = {
'policy': {
'description': t_policy.get('description', ''),
'tenant_id': t_policy.get('tenant_id', ''),
'project_id': t_policy.get('project_id', ''),
'shared': t_policy.get('shared', False),
'name': t_policy.get('name', '')
}
}
try:
_, b_policy_id = self.helper.prepare_bottom_element(
ctx, project_id, b_pod, t_policy, constants.RT_QOS, body)
if res_id:
if res_type == constants.RT_NETWORK:
body = {
"network": {
"qos_policy_id": b_policy_id
}
}
b_client.update_networks(ctx, res_id, body)
if res_type == constants.RT_PORT:
body = {
"port": {
"qos_policy_id": b_policy_id
}
}
b_client.update_ports(ctx, res_id, body)
if t_policy['rules']:
self.xjob_handler.sync_qos_policy_rules(
ctx, project_id, t_policy_id)
except q_cli_exceptions.NotFound:
LOG.error('qos policy: %(policy_id)s not found,'
'pod name: %(name)s',
{'policy_id': t_policy_id, 'name': b_region_name})
@_job_handle(constants.JT_QOS_UPDATE)
def update_qos_policy(self, ctx, payload):
(b_pod_id, t_policy_id) = payload[
constants.JT_QOS_UPDATE].split('#')
t_client = self._get_client()
t_policy = t_client.get_qos_policies(ctx, t_policy_id)
if not t_policy:
return
project_id = t_policy['tenant_id']
if b_pod_id == constants.POD_NOT_SPECIFIED:
mappings = db_api.get_bottom_mappings_by_top_id(
ctx, t_policy_id, constants.RT_QOS)
for b_pod, _ in mappings:
self.xjob_handler.update_qos_policy(ctx, project_id,
t_policy_id,
b_pod['pod_id'])
return
b_pod = db_api.get_pod(ctx, b_pod_id)
b_region_name = b_pod['region_name']
b_client = self._get_client(region_name=b_region_name)
b_policy_id = db_api.get_bottom_id_by_top_id_region_name(
ctx, t_policy_id, b_region_name, constants.RT_QOS)
if not b_policy_id:
return
body = {
'policy': {
'description': t_policy.get('description', ''),
'shared': t_policy.get('shared', ''),
'name': t_policy.get('name', '')
}
}
try:
b_client.update_qos_policies(ctx, b_policy_id, body)
except q_cli_exceptions.NotFound:
LOG.error('qos policy: %(policy_id)s not found,'
'pod name: %(name)s',
{'policy_id': t_policy_id, 'name': b_region_name})
@_job_handle(constants.JT_QOS_DELETE)
def delete_qos_policy(self, ctx, payload):
(b_pod_id, t_policy_id) = payload[
constants.JT_QOS_DELETE].split('#')
project_id = ctx.tenant_id
if b_pod_id == constants.POD_NOT_SPECIFIED:
mappings = db_api.get_bottom_mappings_by_top_id(
ctx, t_policy_id, constants.RT_QOS)
for b_pod, _ in mappings:
self.xjob_handler.delete_qos_policy(ctx, project_id,
t_policy_id,
b_pod['pod_id'])
return
b_pod = db_api.get_pod(ctx, b_pod_id)
b_region_name = b_pod['region_name']
b_client = self._get_client(region_name=b_region_name)
b_policy_id = db_api.get_bottom_id_by_top_id_region_name(
ctx, t_policy_id, b_region_name, constants.RT_QOS)
try:
b_client.delete_qos_policies(ctx, b_policy_id)
db_api.delete_mappings_by_bottom_id(ctx, b_policy_id)
except q_cli_exceptions.NotFound:
LOG.error('qos policy: %(policy_id)s not found,'
'pod name: %(name)s',
{'policy_id': t_policy_id, 'name': b_region_name})
@staticmethod
def _safe_create_policy_rule(
t_context, client, rule_type, policy_id, body):
try:
return getattr(client, 'create_%s_rules' % rule_type)(
t_context, policy_id, body)
except q_exceptions.Conflict:
return
@staticmethod
def _safe_get_policy_rule(t_context, client, rule_type, rule_id,
policy_id):
combine_id = '%s#%s' % (rule_id, policy_id)
return getattr(client, 'get_%s_rules' % rule_type)(
t_context, combine_id)
@staticmethod
def _safe_delete_policy_rule(t_context, client, rule_type, rule_id,
policy_id):
combine_id = '%s#%s' % (rule_id, policy_id)
getattr(client, 'delete_%s_rules' % rule_type)(
t_context, combine_id)
@staticmethod
def _construct_bottom_bandwidth_limit_rule(t_rule):
return {
"max_kbps": t_rule["max_kbps"],
"max_burst_kbps": t_rule["max_burst_kbps"]
}
@staticmethod
def _construct_bottom_dscp_marking_rule(t_rule):
return {
"dscp_mark": t_rule["dscp_mark"]
}
@staticmethod
def _construct_bottom_minimum_bandwidth_rule(t_rule):
return {
"min_kbps": t_rule["min_kbps"],
"direction": t_rule["direction"]
}
def _construct_bottom_policy_rule(self, rule_type, rule_data):
return getattr(self, '_construct_bottom_%s_rule' % rule_type)(
rule_data)
@staticmethod
def _compare_bandwidth_limit_rule(rule1, rule2):
for key in ('max_kbps', 'max_burst_kbps'):
if rule1[key] != rule2[key]:
return False
return True
@staticmethod
def _compare_dscp_marking_rule(rule1, rule2):
for key in ('dscp_mark',):
if rule1[key] != rule2[key]:
return False
return True
@staticmethod
def _compare_minimum_bandwidth_rule(rule1, rule2):
for key in ('min_kbps', 'direction'):
if rule1[key] != rule2[key]:
return False
return True
def _compare_policy_rule(self, rule_type, rule1, rule2):
return getattr(self, '_compare_%s_rule' % rule_type)(rule1, rule2)
@_job_handle(constants.JT_SYNC_QOS_RULE)
def sync_qos_policy_rules(self, ctx, payload):
policy_id = payload[constants.JT_SYNC_QOS_RULE]
top_client = self._get_client()
bandwidth_limit_rules = \
top_client.list_bandwidth_limit_rules(ctx, filters=[{
'key': 'policy_id', 'comparator': 'eq', 'value': policy_id}])
dscp_marking_rules = \
top_client.list_dscp_marking_rules(ctx, filters=[{
'key': 'policy_id', 'comparator': 'eq', 'value': policy_id}])
minimum_bandwidth_rules = \
top_client.list_minimum_bandwidth_rules(ctx, filters=[{
'key': 'policy_id', 'comparator': 'eq', 'value': policy_id}])
mappings = db_api.get_bottom_mappings_by_top_id(
ctx, policy_id, constants.RT_QOS)
self._sync_policy_rules(
ctx, mappings, 'bandwidth_limit_rule', bandwidth_limit_rules)
self._sync_policy_rules(
ctx, mappings, 'dscp_marking_rule', dscp_marking_rules)
self._sync_policy_rules(
ctx, mappings, 'minimum_bandwidth_rule', minimum_bandwidth_rules)
def _sync_policy_rules(self, ctx, mappings, rule_type, rules):
if rule_type == 'bandwidth_limit_rule':
rule_types = 'bandwidth_limit_rules'
prefix = 'bandwidth_limit'
elif rule_type == 'dscp_marking_rule':
rule_types = 'dscp_marking_rules'
prefix = 'dscp_marking'
else:
rule_types = 'minimum_bandwidth_rules'
prefix = 'minimum_bandwidth'
new_b_rules = []
for t_rule in rules:
new_b_rules.append(
getattr(self, '_construct_bottom_%s' % rule_type)(t_rule))
for pod, b_policy_id in mappings:
client = self._get_client(pod['region_name'])
b_rules = getattr(client, 'list_%s' % rule_types)(
ctx, filters=[{'key': 'policy_id',
'comparator': 'eq',
'value': b_policy_id}])
add_rules = []
del_rules = []
match_index = set()
for b_rule in b_rules:
match = False
for i, rule in enumerate(new_b_rules):
if getattr(self, '_compare_%s' % rule_type)(b_rule, rule):
match = True
match_index.add(i)
break
if not match:
del_rules.append(b_rule)
for i, rule in enumerate(new_b_rules):
if i not in match_index:
add_rules.append(rule)
for del_rule in del_rules:
self._safe_delete_policy_rule(
ctx, client, prefix, del_rule['id'],
b_policy_id)
if add_rules:
for new_rule in add_rules:
rule_body = {rule_type: new_rule}
self._safe_create_policy_rule(
ctx, client, prefix,
b_policy_id, rule_body)