Merge remote-tracking branch 'origin/master' into merge-master-into-pecan

Change-Id: I28d09f217464c1abef6fc38907071a65c17da86c
This commit is contained in:
Doug Wiegley 2015-09-02 15:40:45 -06:00
commit 6f2849c916
58 changed files with 794 additions and 480 deletions

View File

@ -1,5 +1,4 @@
# Defines configuration options for SRIOV NIC Switch MechanismDriver
# and Agent
[ml2_sriov]
# (ListOpt) Comma-separated list of
@ -13,22 +12,3 @@
# DEPRECATED: This option is deprecated in the Liberty release
# and will be removed in the Mitaka release. From Mitaka the agent will
# always be required.
[sriov_nic]
# (ListOpt) Comma-separated list of <physical_network>:<network_device>
# tuples mapping physical network names to the agent's node-specific
# physical network device interfaces of SR-IOV physical function to be used
# for VLAN networks. All physical networks listed in network_vlan_ranges on
# the server should have mappings to appropriate interfaces on each agent.
#
# physical_device_mappings =
# Example: physical_device_mappings = physnet1:eth1
#
# (ListOpt) Comma-separated list of <network_device>:<vfs__to_exclude>
# tuples, mapping network_device to the agent's node-specific list of virtual
# functions that should not be used for virtual networking.
# vfs_to_exclude is a semicolon-separated list of virtual
# functions to exclude from network_device. The network_device in the
# mapping should appear in the physical_device_mappings list.
# exclude_devices =
# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3

View File

@ -0,0 +1,20 @@
# Defines configuration options for SRIOV NIC Switch Agent
[sriov_nic]
# (ListOpt) Comma-separated list of <physical_network>:<network_device>
# tuples mapping physical network names to the agent's node-specific
# physical network device interfaces of SR-IOV physical function to be used
# for VLAN networks. All physical networks listed in network_vlan_ranges on
# the server should have mappings to appropriate interfaces on each agent.
#
# physical_device_mappings =
# Example: physical_device_mappings = physnet1:eth1
#
# (ListOpt) Comma-separated list of <network_device>:<vfs__to_exclude>
# tuples, mapping network_device to the agent's node-specific list of virtual
# functions that should not be used for virtual networking.
# vfs_to_exclude is a semicolon-separated list of virtual
# functions to exclude from network_device. The network_device in the
# mapping should appear in the physical_device_mappings list.
# exclude_devices =
# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3

View File

@ -94,7 +94,7 @@ def get_log_args(conf, log_file_name, **kwargs):
if log_dir:
cmd_args.append('--log-dir=%s' % log_dir)
if kwargs.get('metadata_proxy_watch_log') is False:
cmd_args.append('--metadata_proxy_watch_log=false')
cmd_args.append('--nometadata_proxy_watch_log')
else:
if conf.use_syslog:
cmd_args.append('--use-syslog')

View File

@ -152,6 +152,10 @@ class OVSBridge(BaseOVS):
super(OVSBridge, self).__init__()
self.br_name = br_name
self.datapath_type = datapath_type
self.agent_uuid_stamp = '0x0'
def set_agent_uuid_stamp(self, val):
self.agent_uuid_stamp = val
def set_controller(self, controllers):
self.ovsdb.set_controller(self.br_name,
@ -260,6 +264,10 @@ class OVSBridge(BaseOVS):
self.br_name, 'datapath_id')
def do_action_flows(self, action, kwargs_list):
if action != 'del':
for kw in kwargs_list:
if 'cookie' not in kw:
kw['cookie'] = self.agent_uuid_stamp
flow_strs = [_build_flow_expr_str(kw, action) for kw in kwargs_list]
self.run_ofctl('%s-flows' % action, ['-'], '\n'.join(flow_strs))

View File

@ -344,8 +344,10 @@ class RouterInfo(object):
for existing_port in existing_ports:
current_port = current_ports_dict.get(existing_port['id'])
if current_port:
if sorted(existing_port['fixed_ips']) != (
sorted(current_port['fixed_ips'])):
if (sorted(existing_port['fixed_ips'],
key=common_utils.safe_sort_key) !=
sorted(current_port['fixed_ips'],
key=common_utils.safe_sort_key)):
updated_ports[current_port['id']] = current_port
return updated_ports

View File

@ -502,30 +502,29 @@ class IpRouteCommand(IpDeviceCommandBase):
"""Return an instance of IpRouteCommand which works on given table"""
return IpRouteCommand(self._parent, table)
def _table_args(self):
def _table_args(self, override=None):
if override:
return ['table', override]
return ['table', self._table] if self._table else []
def _dev_args(self):
return ['dev', self.name] if self.name else []
def add_gateway(self, gateway, metric=None, table=None):
ip_version = get_ip_version(gateway)
args = ['replace', 'default', 'via', gateway]
if metric:
args += ['metric', metric]
args += ['dev', self.name]
if table:
args += ['table', table]
else:
args += self._table_args()
args += self._dev_args()
args += self._table_args(table)
self._as_root([ip_version], tuple(args))
def delete_gateway(self, gateway, table=None):
ip_version = get_ip_version(gateway)
args = ['del', 'default',
'via', gateway,
'dev', self.name]
if table:
args += ['table', table]
else:
args += self._table_args()
'via', gateway]
args += self._dev_args()
args += self._table_args(table)
try:
self._as_root([ip_version], tuple(args))
except RuntimeError as rte:
@ -537,7 +536,9 @@ class IpRouteCommand(IpDeviceCommandBase):
def list_onlink_routes(self, ip_version):
def iterate_routes():
args = ['list', 'dev', self.name, 'scope', 'link']
args = ['list']
args += self._dev_args()
args += ['scope', 'link']
args += self._table_args()
output = self._run([ip_version], tuple(args))
for line in output.split('\n'):
@ -549,20 +550,25 @@ class IpRouteCommand(IpDeviceCommandBase):
def add_onlink_route(self, cidr):
ip_version = get_ip_version(cidr)
args = ['replace', cidr, 'dev', self.name, 'scope', 'link']
args = ['replace', cidr]
args += self._dev_args()
args += ['scope', 'link']
args += self._table_args()
self._as_root([ip_version], tuple(args))
def delete_onlink_route(self, cidr):
ip_version = get_ip_version(cidr)
args = ['del', cidr, 'dev', self.name, 'scope', 'link']
args = ['del', cidr]
args += self._dev_args()
args += ['scope', 'link']
args += self._table_args()
self._as_root([ip_version], tuple(args))
def get_gateway(self, scope=None, filters=None, ip_version=None):
options = [ip_version] if ip_version else []
args = ['list', 'dev', self.name]
args = ['list']
args += self._dev_args()
args += self._table_args()
if filters:
args += filters
@ -640,19 +646,26 @@ class IpRouteCommand(IpDeviceCommandBase):
def add_route(self, cidr, ip, table=None):
ip_version = get_ip_version(cidr)
args = ['replace', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
args = ['replace', cidr, 'via', ip]
args += self._dev_args()
args += self._table_args(table)
self._as_root([ip_version], tuple(args))
def delete_route(self, cidr, ip, table=None):
ip_version = get_ip_version(cidr)
args = ['del', cidr, 'via', ip, 'dev', self.name]
if table:
args += ['table', table]
args = ['del', cidr, 'via', ip]
args += self._dev_args()
args += self._table_args(table)
self._as_root([ip_version], tuple(args))
class IPRoute(SubProcessBase):
def __init__(self, namespace=None, table=None):
super(IPRoute, self).__init__(namespace=namespace)
self.name = None
self.route = IpRouteCommand(self, table=table)
class IpNeighCommand(IpDeviceCommandBase):
COMMAND = 'neigh'

View File

@ -28,6 +28,7 @@ from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import utils
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.extensions import portsecurity as psec
from neutron.i18n import _LI
@ -817,7 +818,6 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
try:
return self._device_zone_map[short_port_id]
except KeyError:
self._free_zones_from_removed_ports()
return self._generate_device_zone(short_port_id)
def _free_zones_from_removed_ports(self):
@ -833,7 +833,13 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
def _generate_device_zone(self, short_port_id):
"""Generates a unique conntrack zone for the passed in ID."""
zone = self._find_open_zone()
try:
zone = self._find_open_zone()
except n_exc.CTZoneExhaustedError:
# Free some zones and try again, repeat failure will not be caught
self._free_zones_from_removed_ports()
zone = self._find_open_zone()
self._device_zone_map[short_port_id] = zone
LOG.debug("Assigned CT zone %(z)s to port %(dev)s.",
{'z': zone, 'dev': short_port_id})
@ -854,8 +860,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
# gap found, let's use it!
return index + 1
# conntrack zones exhausted :( :(
raise RuntimeError("iptables conntrack zones exhausted. "
"iptables rules cannot be applied.")
raise n_exc.CTZoneExhaustedError()
class OVSHybridIptablesFirewallDriver(IptablesFirewallDriver):

View File

@ -104,6 +104,8 @@ class SimpleInterfaceMonitor(OvsdbMonitor):
for ovs_id, action, name, ofport, external_ids in json:
if external_ids:
external_ids = ovsdb.val_to_py(external_ids)
if ofport:
ofport = ovsdb.val_to_py(ofport)
device = {'name': name,
'ofport': ofport,
'external_ids': external_ids}

View File

@ -53,9 +53,11 @@ class MetadataDriver(object):
@classmethod
def metadata_mangle_rules(cls, mark):
return [('PREROUTING', '-d 169.254.169.254/32 '
'-i %(interface_name)s '
'-p tcp -m tcp --dport 80 '
'-j MARK --set-xmark %(value)s/%(mask)s' %
{'value': mark,
{'interface_name': namespaces.INTERNAL_DEV_PREFIX + '+',
'value': mark,
'mask': constants.ROUTER_MARK_MASK})]
@classmethod

View File

@ -27,10 +27,10 @@ import webob.dec
import webob.exc
from neutron.common import exceptions
from neutron.common import repos
import neutron.extensions
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
from neutron.services import provider_configuration
from neutron import wsgi
@ -580,8 +580,9 @@ class PluginAwareExtensionManager(ExtensionManager):
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls(get_extensions_path(),
manager.NeutronManager.get_service_plugins())
service_plugins = manager.NeutronManager.get_service_plugins()
cls._instance = cls(get_extensions_path(service_plugins),
service_plugins)
return cls._instance
def get_supported_extension_aliases(self):
@ -648,31 +649,30 @@ class ResourceExtension(object):
# Returns the extension paths from a config entry and the __path__
# of neutron.extensions
def get_extensions_path():
paths = neutron.extensions.__path__
def get_extensions_path(service_plugins=None):
paths = collections.OrderedDict()
neutron_mods = repos.NeutronModules()
for x in neutron_mods.installed_list():
try:
paths += neutron_mods.module(x).extensions.__path__
except AttributeError:
# Occurs normally if module has no extensions sub-module
pass
# Add Neutron core extensions
paths[neutron.extensions.__path__[0]] = 1
if service_plugins:
# Add Neutron *-aas extensions
for plugin in service_plugins.values():
neutron_mod = provider_configuration.NeutronModule(
plugin.__module__.split('.')[0])
try:
paths[neutron_mod.module().extensions.__path__[0]] = 1
except AttributeError:
# Occurs normally if module has no extensions sub-module
pass
# Add external/other plugins extensions
if cfg.CONF.api_extensions_path:
paths.append(cfg.CONF.api_extensions_path)
# If the path has dups in it, from discovery + conf file, the duplicate
# import of the same module and super() do not play nicely, so weed
# out the duplicates, preserving search order.
z = collections.OrderedDict()
for x in paths:
z[x] = 1
paths = z.keys()
for path in cfg.CONF.api_extensions_path.split(":"):
paths[path] = 1
LOG.debug("get_extension_paths = %s", paths)
# Re-build the extension string
path = ':'.join(paths)
return path

View File

@ -110,11 +110,6 @@ class ResourcesPullRpcCallback(object):
def pull(self, context, resource_type, version, resource_id):
obj = prod_registry.pull(resource_type, resource_id, context=context)
if obj:
#TODO(QoS): Remove in the future with new version of
# versionedobjects containing
# https://review.openstack.org/#/c/207998/
if version == obj.VERSION:
version = None
return obj.obj_to_primitive(target_version=version)

View File

@ -26,17 +26,21 @@ from neutron.i18n import _LE, _LW
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('VXLAN', 'neutron.plugins.ml2.drivers.linuxbridge.'
'agent.common.config')
cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config')
cfg.CONF.import_group('ml2_sriov',
'neutron.plugins.ml2.drivers.mech_sriov.mech_driver')
dhcp_agent.register_options()
cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
def setup_conf():
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('VXLAN', 'neutron.plugins.ml2.drivers.linuxbridge.'
'agent.common.config')
cfg.CONF.import_group('ml2', 'neutron.plugins.ml2.config')
cfg.CONF.import_group('ml2_sriov',
'neutron.plugins.ml2.drivers.mech_sriov.mech_driver.'
'mech_driver')
dhcp_agent.register_options()
cfg.CONF.register_opts(l3_hamode_db.L3_HA_OPTS)
class BoolOptCallback(cfg.BoolOpt):
@ -260,6 +264,7 @@ def all_tests_passed():
def main():
setup_conf()
cfg.CONF.register_cli_opts(OPTS)
cfg.CONF.set_override('use_stderr', True)
config.setup_logging()

View File

@ -521,3 +521,8 @@ class NetworkSubnetPoolAffinityError(BadRequest):
class ObjectActionError(NeutronException):
message = _('Object action %(action)s failed because: %(reason)s')
class CTZoneExhaustedError(NeutronException):
message = _("IPtables conntrack zones exhausted, iptables rules cannot "
"be applied.")

View File

@ -1,99 +0,0 @@
# Copyright (c) 2015, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import os
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import configparser as ConfigParser
LOG = logging.getLogger(__name__)
class NeutronModules(object):
MODULES = {
'neutron_fwaas': {
'alembic-name': 'fwaas',
},
'neutron_lbaas': {
'alembic-name': 'lbaas',
},
'neutron_vpnaas': {
'alembic-name': 'vpnaas',
},
}
def __init__(self):
self.repos = {}
for repo in self.MODULES:
self.repos[repo] = {}
self.repos[repo]['mod'] = self._import_or_none(repo)
self.repos[repo]['ini'] = None
def _import_or_none(self, module):
try:
return importlib.import_module(module)
except ImportError:
return None
def installed_list(self):
z = filter(lambda k: self.repos[k]['mod'] is not None, self.repos)
LOG.debug("NeutronModules related repos installed = %s", z)
return z
def module(self, module):
return self.repos[module]['mod']
def alembic_name(self, module):
return self.MODULES[module]['alembic-name']
# Return an INI parser for the child module. oslo.config is a bit too
# magical in its INI loading, and in one notable case, we need to merge
# together the [service_providers] section across at least four
# repositories.
def ini(self, module):
if self.repos[module]['ini'] is None:
neutron_dir = None
try:
neutron_dir = cfg.CONF.config_dir
except cfg.NoSuchOptError:
pass
if neutron_dir is None:
neutron_dir = '/etc/neutron'
ini = ConfigParser.SafeConfigParser()
ini_path = os.path.join(neutron_dir, '%s.conf' % module)
if os.path.exists(ini_path):
ini.read(ini_path)
self.repos[module]['ini'] = ini
return self.repos[module]['ini']
def service_providers(self, module):
ini = self.ini(module)
sp = []
try:
for name, value in ini.items('service_providers'):
if name == 'service_provider':
sp.append(value)
except ConfigParser.NoSectionError:
pass
return sp

View File

@ -18,6 +18,7 @@
"""Utilities and helper functions."""
import collections
import datetime
import decimal
import errno
@ -250,6 +251,13 @@ def compare_elements(a, b):
return set(a) == set(b)
def safe_sort_key(value):
"""Return value hash or build one for dictionaries."""
if isinstance(value, collections.Mapping):
return sorted(value.items())
return value
def dict2str(dic):
return ','.join("%s=%s" % (key, val)
for key, val in sorted(six.iteritems(dic)))

View File

@ -270,7 +270,6 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
# listed explicitly here by subnet ID) are associated
# with the port.
if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or
ipv6_utils.is_ipv6_pd_enabled(subnet) or
not is_auto_addr_subnet):
fixed_ip_set.append({'subnet_id': subnet['id']})

View File

@ -39,14 +39,6 @@ from neutron.plugins.common import utils as p_utils
LOG = logging.getLogger(__name__)
DEVICE_OWNER_DVR_INTERFACE = l3_const.DEVICE_OWNER_DVR_INTERFACE
DEVICE_OWNER_DVR_SNAT = l3_const.DEVICE_OWNER_ROUTER_SNAT
FLOATINGIP_AGENT_INTF_KEY = l3_const.FLOATINGIP_AGENT_INTF_KEY
DEVICE_OWNER_AGENT_GW = l3_const.DEVICE_OWNER_AGENT_GW
SNAT_ROUTER_INTF_KEY = l3_const.SNAT_ROUTER_INTF_KEY
router_distributed_opts = [
cfg.BoolOpt('router_distributed',
default=False,
@ -62,9 +54,9 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
router_device_owners = (
l3_db.L3_NAT_db_mixin.router_device_owners +
(DEVICE_OWNER_DVR_INTERFACE,
DEVICE_OWNER_DVR_SNAT,
DEVICE_OWNER_AGENT_GW))
(l3_const.DEVICE_OWNER_DVR_INTERFACE,
l3_const.DEVICE_OWNER_ROUTER_SNAT,
l3_const.DEVICE_OWNER_AGENT_GW))
extra_attributes = (
l3_attrs_db.ExtraAttributesMixin.extra_attributes + [{
@ -117,7 +109,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
"""Update the model to support the dvr case of a router."""
if data.get('distributed'):
old_owner = l3_const.DEVICE_OWNER_ROUTER_INTF
new_owner = DEVICE_OWNER_DVR_INTERFACE
new_owner = l3_const.DEVICE_OWNER_DVR_INTERFACE
for rp in router_db.attached_ports.filter_by(port_type=old_owner):
rp.port_type = new_owner
rp.port.device_owner = new_owner
@ -178,7 +170,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
if router_is_uuid:
router = self._get_router(context, router)
if is_distributed_router(router):
return DEVICE_OWNER_DVR_INTERFACE
return l3_const.DEVICE_OWNER_DVR_INTERFACE
return super(L3_NAT_with_dvr_db_mixin,
self)._get_device_owner(context, router)
@ -321,7 +313,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
def _port_has_ipv6_address(self, port):
"""Overridden to return False if DVR SNAT port."""
if port['device_owner'] == DEVICE_OWNER_DVR_SNAT:
if port['device_owner'] == l3_const.DEVICE_OWNER_ROUTER_SNAT:
return False
return super(L3_NAT_with_dvr_db_mixin,
self)._port_has_ipv6_address(port)
@ -379,7 +371,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
qry = context.session.query(l3_db.RouterPort)
qry = qry.filter(
l3_db.RouterPort.router_id.in_(router_ids),
l3_db.RouterPort.port_type == DEVICE_OWNER_DVR_SNAT
l3_db.RouterPort.port_type == l3_const.DEVICE_OWNER_ROUTER_SNAT
)
interfaces = collections.defaultdict(list)
for rp in qry:
@ -426,7 +418,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
if router['gw_port_id']:
snat_router_intfs = snat_intfs_by_router_id[router['id']]
LOG.debug("SNAT ports returned: %s ", snat_router_intfs)
router[SNAT_ROUTER_INTF_KEY] = snat_router_intfs
router[l3_const.SNAT_ROUTER_INTF_KEY] = snat_router_intfs
return routers_dict
def _process_floating_ips_dvr(self, context, routers_dict,
@ -455,7 +447,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
if not fip_agent_id:
return []
filters = {'device_id': [fip_agent_id],
'device_owner': [DEVICE_OWNER_AGENT_GW]}
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]}
interfaces = self._core_plugin.get_ports(context.elevated(), filters)
LOG.debug("Return the FIP ports: %s ", interfaces)
return interfaces
@ -483,8 +475,8 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
ports_to_populate.append(router['gw_port'])
if router.get(l3_const.FLOATINGIP_AGENT_INTF_KEY):
ports_to_populate += router[l3_const.FLOATINGIP_AGENT_INTF_KEY]
if router.get(SNAT_ROUTER_INTF_KEY):
ports_to_populate += router[SNAT_ROUTER_INTF_KEY]
if router.get(l3_const.SNAT_ROUTER_INTF_KEY):
ports_to_populate += router[l3_const.SNAT_ROUTER_INTF_KEY]
ports_to_populate += interfaces
self._populate_subnets_for_ports(context, ports_to_populate)
self._process_interfaces(routers_dict, interfaces)
@ -495,7 +487,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
vm_port_db = port or self._core_plugin.get_port(context, port_id)
device_owner = vm_port_db['device_owner'] if vm_port_db else ""
if (n_utils.is_dvr_serviced(device_owner) or
device_owner == DEVICE_OWNER_AGENT_GW):
device_owner == l3_const.DEVICE_OWNER_AGENT_GW):
return vm_port_db[portbindings.HOST_ID]
def _get_agent_gw_ports_exist_for_network(
@ -508,7 +500,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
filters = {
'network_id': [network_id],
'device_id': [agent_id],
'device_owner': [DEVICE_OWNER_AGENT_GW]
'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW]
}
ports = self._core_plugin.get_ports(context, filters)
if ports:
@ -543,7 +535,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
self, context, host_id, ext_net_id):
"""Function to delete FIP gateway port with given ext_net_id."""
# delete any fip agent gw port
device_filter = {'device_owner': [DEVICE_OWNER_AGENT_GW],
device_filter = {'device_owner': [l3_const.DEVICE_OWNER_AGENT_GW],
'network_id': [ext_net_id]}
ports = self._core_plugin.get_ports(context,
filters=device_filter)
@ -573,7 +565,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
port_data = {'tenant_id': '',
'network_id': network_id,
'device_id': l3_agent_db['id'],
'device_owner': DEVICE_OWNER_AGENT_GW,
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW,
'binding:host_id': host,
'admin_state_up': True,
'name': ''}
@ -593,7 +585,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
qry = context.session.query(l3_db.RouterPort)
qry = qry.filter_by(
router_id=router_id,
port_type=DEVICE_OWNER_DVR_SNAT
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT
)
ports = [self._core_plugin._make_port_dict(rp.port, None)
@ -607,7 +599,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
'network_id': network_id,
'fixed_ips': [{'subnet_id': subnet_id}],
'device_id': router.id,
'device_owner': DEVICE_OWNER_DVR_SNAT,
'device_owner': l3_const.DEVICE_OWNER_ROUTER_SNAT,
'admin_state_up': True,
'name': ''}
snat_port = p_utils.create_port(self._core_plugin, context,
@ -620,7 +612,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
router_port = l3_db.RouterPort(
port_id=snat_port['id'],
router_id=router.id,
port_type=DEVICE_OWNER_DVR_SNAT
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT
)
context.session.add(router_port)
@ -645,7 +637,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
int_ports = (
rp.port for rp in
router.attached_ports.filter_by(
port_type=DEVICE_OWNER_DVR_INTERFACE
port_type=l3_const.DEVICE_OWNER_DVR_INTERFACE
)
)
LOG.info(_LI('SNAT interface port list does not exist,'
@ -680,7 +672,7 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
filters = {'fixed_ips': {'subnet_id': [subnet]}}
ports = self._core_plugin.get_ports(context, filters=filters)
for port in ports:
if port['device_owner'] == DEVICE_OWNER_DVR_INTERFACE:
if port['device_owner'] == l3_const.DEVICE_OWNER_DVR_INTERFACE:
router_id = port['device_id']
router_dict = self._get_router(context, router_id)
if router_dict.extra_attributes.distributed:
@ -704,7 +696,8 @@ class L3_NAT_with_dvr_db_mixin(l3_db.L3_NAT_db_mixin,
# changeset size since it is late in cycle
ports = (
rp.port.id for rp in
router.attached_ports.filter_by(port_type=DEVICE_OWNER_DVR_SNAT)
router.attached_ports.filter_by(
port_type=l3_const.DEVICE_OWNER_ROUTER_SNAT)
if rp.port
)

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from itertools import chain
from oslo_log import log as logging
import sqlalchemy as sa
@ -42,14 +44,36 @@ class ServiceTypeManager(object):
return cls._instance
def __init__(self):
self._load_conf()
self.config = {}
# TODO(armax): remove these as soon as *-aaS start using
# the newly introduced add_provider_configuration API
self.config['LOADBALANCER'] = (
pconf.ProviderConfiguration('neutron_lbaas'))
self.config['LOADBALANCERV2'] = (
pconf.ProviderConfiguration('neutron_lbaas'))
self.config['FIREWALL'] = (
pconf.ProviderConfiguration('neutron_fwaas'))
self.config['VPN'] = (
pconf.ProviderConfiguration('neutron_vpnaas'))
def _load_conf(self):
self.conf = pconf.ProviderConfiguration(
pconf.parse_service_provider_opt())
def add_provider_configuration(self, service_type, configuration):
"""Add or update the provider configuration for the service type."""
LOG.debug('Adding provider configuration for service %s', service_type)
self.config.update({service_type: configuration})
def get_service_providers(self, context, filters=None, fields=None):
return self.conf.get_service_providers(filters, fields)
if filters and 'service_type' in filters:
return list(
chain.from_iterable(self.config[svc_type].
get_service_providers(filters, fields)
for svc_type in filters['service_type']
if svc_type in self.config)
)
return list(
chain.from_iterable(
self.config[p].get_service_providers(filters, fields)
for p in self.config)
)
def get_default_service_provider(self, context, service_type):
"""Return the default provider for a given service type."""
@ -65,7 +89,7 @@ class ServiceTypeManager(object):
def add_resource_association(self, context, service_type, provider_name,
resource_id):
r = self.conf.get_service_providers(
r = self.get_service_providers(context,
filters={'service_type': [service_type], 'name': [provider_name]})
if not r:
raise pconf.ServiceProviderNotFound(provider=provider_name,

View File

@ -67,15 +67,8 @@ VHOST_USER_OVS_PLUG = 'vhostuser_ovs_plug'
VIF_TYPE_UNBOUND = 'unbound'
VIF_TYPE_BINDING_FAILED = 'binding_failed'
VIF_TYPE_DISTRIBUTED = 'distributed'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_VHOST_USER = 'vhostuser'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_DVS = 'dvs'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_OTHER = 'other'
VNIC_NORMAL = 'normal'

View File

@ -211,7 +211,8 @@ class IpamSubnet(driver.Subnet):
class SubnetPoolReader(object):
'''Class to assist with reading a subnetpool, loading defaults, and
inferring IP version from prefix list. Provides a common way of
reading a stored model or a create request with defaultable attributes.
reading a stored model or a create request with default table
attributes.
'''
MIN_PREFIX_TYPE = 'min'
MAX_PREFIX_TYPE = 'max'

View File

@ -64,6 +64,18 @@ class QosPolicy(base.NeutronDbObject):
setattr(self, 'rules', rules)
self.obj_reset_changes(['rules'])
def get_rule_by_id(self, rule_id):
"""Return rule specified by rule_id.
@raise QosRuleNotFound: if there is no such rule in the policy.
"""
for rule in self.rules:
if rule_id == rule.id:
return rule
raise exceptions.QosRuleNotFound(policy_id=self.id,
rule_id=rule_id)
@staticmethod
def _is_policy_accessible(context, db_obj):
#TODO(QoS): Look at I3426b13eede8bfa29729cf3efea3419fb91175c4 for

View File

@ -60,10 +60,13 @@ class NetworkContext(MechanismDriverContext, api.NetworkContext):
class SubnetContext(MechanismDriverContext, api.SubnetContext):
def __init__(self, plugin, plugin_context, subnet, original_subnet=None):
def __init__(self, plugin, plugin_context, subnet, network,
original_subnet=None):
super(SubnetContext, self).__init__(plugin, plugin_context)
self._subnet = subnet
self._original_subnet = original_subnet
self._network_context = NetworkContext(plugin, plugin_context,
network)
@property
def current(self):
@ -73,6 +76,10 @@ class SubnetContext(MechanismDriverContext, api.SubnetContext):
def original(self):
return self._original_subnet
@property
def network(self):
return self._network_context
class PortContext(MechanismDriverContext, api.PortContext):

View File

@ -1 +0,0 @@
networking_arista>=2015.1.1,<2015.2.1

View File

@ -1 +0,0 @@
networking-cisco

View File

@ -34,10 +34,6 @@ _keywords = {
class OpenFlowSwitchMixin(object):
"""Mixin to provide common convenient routines for an openflow switch."""
agent_uuid_stamp = '0x0'
def set_agent_uuid_stamp(self, val):
self.agent_uuid_stamp = val
@staticmethod
def _conv_args(kwargs):
@ -88,14 +84,6 @@ class OpenFlowSwitchMixin(object):
else:
super(OpenFlowSwitchMixin, self).remove_all_flows()
def add_flow(self, **kwargs):
kwargs['cookie'] = self.agent_uuid_stamp
super(OpenFlowSwitchMixin, self).add_flow(**self._conv_args(kwargs))
def mod_flow(self, **kwargs):
kwargs['cookie'] = self.agent_uuid_stamp
super(OpenFlowSwitchMixin, self).mod_flow(**self._conv_args(kwargs))
def _filter_flows(self, flows):
LOG.debug("Agent uuid stamp used to filter flows: %s",
self.agent_uuid_stamp)

View File

@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import signal
import sys
@ -129,7 +130,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
target = oslo_messaging.Target(version='1.3')
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, bridge_classes, integ_br, tun_br, local_ip,
bridge_mappings, polling_interval, tunnel_types=None,
@ -231,6 +233,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
self.updated_ports = set()
# Stores port delete notifications
self.deleted_ports = set()
self.network_ports = collections.defaultdict(set)
# keeps association between ports and ofports to detect ofport change
self.vifname_to_ofport_map = {}
self.setup_rpc()
@ -368,7 +372,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
[constants.TUNNEL, topics.UPDATE],
[constants.TUNNEL, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.DVR, topics.UPDATE]]
[topics.DVR, topics.UPDATE],
[topics.NETWORK, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION,
topics.UPDATE, self.conf.host])
@ -401,8 +406,27 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
self.updated_ports.discard(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
for port_id in self.network_ports[network_id]:
# notifications could arrive out of order, if the port is deleted
# we don't want to update it anymore
if port_id not in self.deleted_ports:
self.updated_ports.add(port_id)
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.network_ports[network_id]})
def _clean_network_ports(self, port_id):
for port_set in self.network_ports.values():
if port_id in port_set:
port_set.remove(port_id)
break
def process_deleted_ports(self, port_info):
# don't try to process removed ports as deleted ports since
# they are already gone
@ -414,6 +438,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
# longer have access to the network
self.sg_agent.remove_devices_filter([port_id])
port = self.int_br.get_vif_port_by_id(port_id)
self._clean_network_ports(port_id)
self.ext_manager.delete_port(self.context,
{"vif_port": port,
"port_id": port_id})
@ -1304,7 +1329,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
has_sgs = 'security_groups' in details
if not port_security or not has_sgs:
security_disabled_devices.append(device)
self._update_port_network(details['port_id'],
details['network_id'])
self.ext_manager.handle_port(self.context, details)
else:
LOG.warn(_LW("Device %s not defined on plugin"), device)
@ -1312,6 +1338,10 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
self.port_dead(port)
return skipped_devices, need_binding_devices, security_disabled_devices
def _update_port_network(self, port_id, network_id):
self._clean_network_ports(port_id)
self.network_ports[network_id].add(port_id)
def treat_ancillary_devices_added(self, devices):
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(

View File

@ -665,6 +665,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
self._process_l3_update(context, updated_network, net_data)
self.type_manager.extend_network_dict_provider(context,
updated_network)
# TODO(QoS): Move out to the extension framework somehow.
need_network_update_notify = (
qos_consts.QOS_POLICY_ID in net_data and
original_network[qos_consts.QOS_POLICY_ID] !=
updated_network[qos_consts.QOS_POLICY_ID])
mech_context = driver_context.NetworkContext(
self, context, updated_network,
original_network=original_network)
@ -675,6 +682,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_network_postcommit(mech_context)
if need_network_update_notify:
self.notifier.network_update(context, updated_network)
return updated_network
def get_network(self, context, id, fields=None):
@ -814,7 +823,9 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
result = super(Ml2Plugin, self).create_subnet(context, subnet)
self.extension_manager.process_create_subnet(
context, subnet[attributes.SUBNET], result)
mech_context = driver_context.SubnetContext(self, context, result)
network = self.get_network(context, result['network_id'])
mech_context = driver_context.SubnetContext(self, context,
result, network)
self.mechanism_manager.create_subnet_precommit(mech_context)
return result, mech_context
@ -842,8 +853,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
context, id, subnet)
self.extension_manager.process_update_subnet(
context, subnet[attributes.SUBNET], updated_subnet)
network = self.get_network(context, updated_subnet['network_id'])
mech_context = driver_context.SubnetContext(
self, context, updated_subnet, original_subnet=original_subnet)
self, context, updated_subnet, network,
original_subnet=original_subnet)
self.mechanism_manager.update_subnet_precommit(mech_context)
# TODO(apech) - handle errors raised by update_subnet, potentially
@ -920,8 +933,10 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# If allocated is None, then all the IPAllocation were
# correctly deleted during the previous pass.
if not allocated:
network = self.get_network(context, subnet['network_id'])
mech_context = driver_context.SubnetContext(self, context,
subnet)
subnet,
network)
self.mechanism_manager.delete_subnet_precommit(
mech_context)
@ -932,7 +947,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
break
for a in allocated:
if a.port_id:
if a.port:
# calling update_port() for each allocation to remove the
# IP from the port and call the MechanismDrivers
data = {attributes.PORT:
@ -942,6 +957,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if ip.subnet_id != id]}}
try:
self.update_port(context, a.port_id, data)
except exc.PortNotFound:
LOG.debug("Port %s deleted concurrently", a.port_id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Exception deleting fixed_ip "

View File

@ -279,7 +279,7 @@ class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin,
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
update_dhcp_port, and removed get_dhcp_port methods.
1.4 - Added network_update
"""
def __init__(self, topic):
@ -293,6 +293,9 @@ class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin,
self.topic_port_delete = topics.get_topic_name(topic,
topics.PORT,
topics.DELETE)
self.topic_network_update = topics.get_topic_name(topic,
topics.NETWORK,
topics.UPDATE)
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
@ -314,3 +317,8 @@ class AgentNotifierApi(dvr_rpc.DVRAgentRpcApiMixin,
cctxt = self.client.prepare(topic=self.topic_port_delete,
fanout=True)
cctxt.cast(context, 'port_delete', port_id=port_id)
def network_update(self, context, network):
cctxt = self.client.prepare(topic=self.topic_network_update,
fanout=True, version='1.4')
cctxt.cast(context, 'network_update', network=network)

View File

@ -1 +0,0 @@
vmware-nsx

View File

@ -13,14 +13,16 @@
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import os
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import configparser as ConfigParser
import stevedore
from neutron.common import exceptions as n_exc
from neutron.common import repos
from neutron.i18n import _LW
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
@ -36,6 +38,67 @@ serviceprovider_opts = [
cfg.CONF.register_opts(serviceprovider_opts, 'service_providers')
class NeutronModule(object):
"""A Neutron extension module."""
def __init__(self, service_module):
self.module_name = service_module
self.repo = {
'mod': self._import_or_none(),
'ini': None
}
def _import_or_none(self):
try:
return importlib.import_module(self.module_name)
except ImportError:
return None
def installed(self):
LOG.debug("NeutronModule installed = %s", self.module_name)
return self.module_name
def module(self):
return self.repo['mod']
# Return an INI parser for the child module. oslo.config is a bit too
# magical in its INI loading, and in one notable case, we need to merge
# together the [service_providers] section across at least four
# repositories.
def ini(self):
if self.repo['ini'] is None:
neutron_dir = None
try:
neutron_dir = cfg.CONF.config_dir
except cfg.NoSuchOptError:
pass
if neutron_dir is None:
neutron_dir = '/etc/neutron'
ini = ConfigParser.SafeConfigParser()
ini_path = os.path.join(neutron_dir, '%s.conf' % self.module_name)
if os.path.exists(ini_path):
ini.read(ini_path)
self.repo['ini'] = ini
return self.repo['ini']
def service_providers(self):
ini = self.ini()
sp = []
try:
for name, value in ini.items('service_providers'):
if name == 'service_provider':
sp.append(value)
except ConfigParser.NoSectionError:
pass
return sp
#global scope function that should be used in service APIs
def normalize_provider_name(name):
return name.lower()
@ -65,32 +128,16 @@ def get_provider_driver_class(driver, namespace=SERVICE_PROVIDERS):
return new_driver
def parse_service_provider_opt():
def parse_service_provider_opt(service_module='neutron'):
"""Parse service definition opts and returns result."""
def validate_name(name):
if len(name) > 255:
raise n_exc.Invalid(
_("Provider name is limited by 255 characters: %s") % name)
# TODO(dougwig) - phase out the neutron.conf location for service
# providers a cycle or two after Kilo.
# Look in neutron.conf for service providers first (legacy mode)
try:
svc_providers_opt = cfg.CONF.service_providers.service_provider
except cfg.NoSuchOptError:
svc_providers_opt = []
# Look in neutron-*aas.conf files for service provider configs
if svc_providers_opt:
LOG.warning(_LW("Reading service_providers from legacy location in "
"neutron.conf, and ignoring values in "
"neutron_*aas.conf files; this override will be "
"going away soon."))
else:
neutron_mods = repos.NeutronModules()
for x in neutron_mods.installed_list():
svc_providers_opt += neutron_mods.service_providers(x)
neutron_mod = NeutronModule(service_module)
svc_providers_opt = neutron_mod.service_providers()
LOG.debug("Service providers = %s", svc_providers_opt)
@ -113,14 +160,7 @@ def parse_service_provider_opt():
prov_def)
LOG.error(msg)
raise n_exc.Invalid(msg)
ALLOWED_SERVICES = constants.EXT_TO_SERVICE_MAPPING.values()
if svc_type not in ALLOWED_SERVICES:
msg = (_("Service type '%(svc_type)s' is not allowed, "
"allowed types: %(allowed)s") %
{'svc_type': svc_type,
'allowed': ALLOWED_SERVICES})
LOG.error(msg)
raise n_exc.Invalid(msg)
driver = get_provider_driver_class(driver)
res.append({'service_type': svc_type,
'name': name,
@ -145,9 +185,10 @@ class ServiceProviderAlreadyAssociated(n_exc.Conflict):
class ProviderConfiguration(object):
def __init__(self, prov_data):
def __init__(self, svc_module='neutron'):
self.providers = {}
for prov in prov_data:
for prov in parse_service_provider_opt(svc_module):
self.add_provider(prov)
def _ensure_driver_unique(self, driver):

View File

@ -109,6 +109,8 @@ class QoSPlugin(qos.QoSPluginBase):
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
policy = self._get_policy_obj(context, policy_id)
# check if the rule belong to the policy
policy.get_rule_by_id(rule_id)
rule = rule_object.QosBandwidthLimitRule(
context, **bandwidth_limit_rule['bandwidth_limit_rule'])
rule.id = rule_id
@ -122,8 +124,7 @@ class QoSPlugin(qos.QoSPluginBase):
with db_api.autonested_transaction(context.session):
# first, validate that we have access to the policy
policy = self._get_policy_obj(context, policy_id)
rule = rule_object.QosBandwidthLimitRule(context)
rule.id = rule_id
rule = policy.get_rule_by_id(rule_id)
rule.delete()
policy.reload_rules()
self.notification_driver_manager.update_policy(context, policy)

View File

@ -33,8 +33,11 @@ class TestOVSAgent(base.OVSAgentTestFramework):
self.wait_until_ports_state(self.ports, up=False)
def test_datapath_type_system(self):
expected = constants.OVS_DATAPATH_SYSTEM
def _check_datapath_type_netdev(self, expected, default=False):
if not default:
self.config.set_override('datapath_type',
expected,
"OVS")
agent = self.create_agent()
self.start_agent(agent)
actual = self.ovs.db_get_val('Bridge',
@ -47,20 +50,16 @@ class TestOVSAgent(base.OVSAgentTestFramework):
self.assertEqual(expected, actual)
def test_datapath_type_netdev(self):
expected = constants.OVS_DATAPATH_NETDEV
self.config.set_override('datapath_type',
expected,
"OVS")
agent = self.create_agent()
self.start_agent(agent)
actual = self.ovs.db_get_val('Bridge',
agent.int_br.br_name,
'datapath_type')
self.assertEqual(expected, actual)
actual = self.ovs.db_get_val('Bridge',
agent.tun_br.br_name,
'datapath_type')
self.assertEqual(expected, actual)
self._check_datapath_type_netdev(
constants.OVS_DATAPATH_NETDEV)
def test_datapath_type_system(self):
self._check_datapath_type_netdev(
constants.OVS_DATAPATH_SYSTEM)
def test_datapath_type_default(self):
self._check_datapath_type_netdev(
constants.OVS_DATAPATH_SYSTEM, default=True)
def test_resync_devices_set_up_after_exception(self):
self.setup_agent_and_ports(

View File

@ -96,6 +96,10 @@ class L3AgentTestFramework(base.BaseSudoTestCase):
get_temp_file_path = functools.partial(self.get_temp_file_path,
root=temp_dir)
conf.set_override('state_path', temp_dir.path)
# NOTE(cbrandily): log_file or log_dir must be set otherwise
# metadata_proxy_watch_log has no effect
conf.set_override('log_file',
get_temp_file_path('log_file'))
conf.set_override('metadata_proxy_socket',
get_temp_file_path('metadata_proxy'))
conf.set_override('ha_confs_path',

View File

@ -14,7 +14,6 @@
from neutron.api.v2 import attributes
from neutron.common import constants as l3_const
from neutron.db import l3_dvr_db
from neutron.extensions import external_net
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import base as ml2_test_base
@ -44,13 +43,13 @@ class L3DvrTestCase(ml2_test_base.ML2TestFramework):
def test_get_device_owner_distributed_router_object(self):
router = self._create_router()
self.assertEqual(
l3_dvr_db.DEVICE_OWNER_DVR_INTERFACE,
l3_const.DEVICE_OWNER_DVR_INTERFACE,
self.l3_plugin._get_device_owner(self.context, router))
def test_get_device_owner_distributed_router_id(self):
router = self._create_router()
self.assertEqual(
l3_dvr_db.DEVICE_OWNER_DVR_INTERFACE,
l3_const.DEVICE_OWNER_DVR_INTERFACE,
self.l3_plugin._get_device_owner(self.context, router['id']))
def test_get_device_owner_centralized(self):
@ -123,7 +122,7 @@ class L3DvrTestCase(ml2_test_base.ML2TestFramework):
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_id': self.l3_agent['id'],
'device_owner': l3_dvr_db.DEVICE_OWNER_AGENT_GW,
'device_owner': l3_const.DEVICE_OWNER_AGENT_GW,
'binding:host_id': '',
'admin_state_up': True,
'name': ''}})

View File

@ -87,6 +87,8 @@ expected_calls_and_values is a list of (expected_call, return_value):
import unittest
from neutron.common import utils
def setup_mock_calls(mocked_call, expected_calls_and_values):
return_values = [call[1] for call in expected_calls_and_values]
@ -114,7 +116,8 @@ class UnorderedList(list):
def __eq__(self, other):
if not isinstance(other, list):
return False
return sorted(self) == sorted(other)
return (sorted(self, key=utils.safe_sort_key) ==
sorted(other, key=utils.safe_sort_key))
def __neq__(self, other):
return not self == other

View File

@ -852,3 +852,19 @@ class TestDeferredOVSBridge(base.BaseTestCase):
def test_getattr_unallowed_attr_failure(self):
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
self.assertRaises(AttributeError, getattr, deferred_br, 'failure')
def test_cookie_passed_to_addmod(self):
self.br = ovs_lib.OVSBridge("br-tun")
self.br.set_agent_uuid_stamp(1234)
expected_calls = [
mock.call('add-flows', ['-'],
'hard_timeout=0,idle_timeout=0,priority=1,'
'cookie=1234,actions=drop'),
mock.call('mod-flows', ['-'],
'cookie=1234,actions=drop')
]
with mock.patch.object(self.br, 'run_ofctl') as f:
with ovs_lib.DeferredOVSBridge(self.br) as deferred_br:
deferred_br.add_flow(actions='drop')
deferred_br.mod_flow(actions='drop')
f.assert_has_calls(expected_calls)

View File

@ -541,13 +541,13 @@ class TestIPCmdBase(base.BaseTestCase):
self.parent.name = 'eth0'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
self.parent._run.assert_has_calls([
mock.call(options, self.command, args)])
def _assert_sudo(self, options, args, use_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
use_root_namespace=use_root_namespace)])
self.parent._as_root.assert_has_calls(
[mock.call(options, self.command, args,
use_root_namespace=use_root_namespace)])
class TestIpRuleCommand(TestIPCmdBase):
@ -975,6 +975,53 @@ class TestIPv6IpRouteCommand(TestIpRouteCommand):
'metric': 1024}}]
class TestIPRoute(TestIpRouteCommand):
"""Leverage existing tests for IpRouteCommand for IPRoute
This test leverages the tests written for IpRouteCommand. The difference
is that the 'dev' argument should not be passed for each of the commands.
So, this test removes the dev argument from the expected arguments in each
assert.
"""
def setUp(self):
super(TestIPRoute, self).setUp()
self.parent = ip_lib.IPRoute()
self.parent._run = mock.Mock()
self.parent._as_root = mock.Mock()
self.route_cmd = self.parent.route
self.check_dev_args = False
def _remove_dev_args(self, args):
def args_without_dev():
previous = None
for arg in args:
if 'dev' not in (arg, previous):
yield arg
previous = arg
return tuple(arg for arg in args_without_dev())
def _assert_call(self, options, args):
if not self.check_dev_args:
args = self._remove_dev_args(args)
super(TestIPRoute, self)._assert_call(options, args)
def _assert_sudo(self, options, args, use_root_namespace=False):
if not self.check_dev_args:
args = self._remove_dev_args(args)
super(TestIPRoute, self)._assert_sudo(options, args)
def test_pullup_route(self):
# This method gets the interface name passed to it as an argument. So,
# don't remove it from the expected arguments.
self.check_dev_args = True
super(TestIPRoute, self).test_pullup_route()
def test_del_gateway_cannot_find_device(self):
# This test doesn't make sense for this case since dev won't be passed
pass
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()

View File

@ -26,6 +26,7 @@ from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_firewall
from neutron.agent import securitygroups_rpc as sg_cfg
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.tests import base
from neutron.tests.unit.api.v2 import test_base
@ -1079,7 +1080,8 @@ class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
cmd.extend(['-d', 'fe80::1'])
else:
cmd.extend(['-s', 'fe80::1'])
cmd.extend(['-w', 1])
# initial data has 1, 2, and 9 in use, CT zone will start at 10.
cmd.extend(['-w', 10])
calls = [
mock.call(cmd, run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
@ -1108,12 +1110,13 @@ class IptablesFirewallTestCase(BaseIptablesFirewallTestCase):
self.firewall.filtered_ports[port['device']] = new_port
self.firewall.filter_defer_apply_off()
calls = [
# initial data has 1, 2, and 9 in use, CT zone will start at 10.
mock.call(['conntrack', '-D', '-f', 'ipv4', '-d', '10.0.0.1',
'-w', 1],
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1]),
mock.call(['conntrack', '-D', '-f', 'ipv6', '-d', 'fe80::1',
'-w', 1],
'-w', 10],
run_as_root=True, check_exit_code=True,
extra_ok_codes=[1])]
self.utils_exec.assert_has_calls(calls)
@ -1836,11 +1839,12 @@ class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
def setUp(self):
super(OVSHybridIptablesFirewallTestCase, self).setUp()
self.firewall = iptables_firewall.OVSHybridIptablesFirewallDriver()
# inital data has 1, 2, and 9 in use, see RAW_TABLE_OUTPUT above.
self._dev_zone_map = {'61634509-31': 2, '8f46cf18-12': 9,
'95c24827-02': 2, 'e804433b-61': 1}
def test__populate_initial_zone_map(self):
expected = {'61634509-31': 2, '8f46cf18-12': 9,
'95c24827-02': 2, 'e804433b-61': 1}
self.assertEqual(expected, self.firewall._device_zone_map)
self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)
def test__generate_device_zone(self):
# inital data has 1, 2, and 9 in use.
@ -1863,12 +1867,17 @@ class OVSHybridIptablesFirewallTestCase(BaseIptablesFirewallTestCase):
# fill it up and then make sure an extra throws an error
for i in range(1, 65536):
self.firewall._device_zone_map['dev-%s' % i] = i
with testtools.ExpectedException(RuntimeError):
with testtools.ExpectedException(n_exc.CTZoneExhaustedError):
self.firewall._find_open_zone()
# with it full, try again, this should trigger a cleanup and return 1
self.assertEqual(1, self.firewall._generate_device_zone('p12'))
self.assertEqual({'p12': 1}, self.firewall._device_zone_map)
def test_get_device_zone(self):
# calling get_device_zone should clear out all of the other entries
# since they aren't in the filtered ports list
self.assertEqual(1, self.firewall.get_device_zone('12345678901234567'))
# initial data has 1, 2, and 9 in use.
self.assertEqual(10,
self.firewall.get_device_zone('12345678901234567'))
# should have been truncated to 11 chars
self.assertEqual({'12345678901': 1}, self.firewall._device_zone_map)
self._dev_zone_map.update({'12345678901': 10})
self.assertEqual(self._dev_zone_map, self.firewall._device_zone_map)

View File

@ -14,6 +14,7 @@
import mock
from neutron.agent.common import ovs_lib
from neutron.agent.linux import ovsdb_monitor
from neutron.tests import base
@ -93,3 +94,13 @@ class TestSimpleInterfaceMonitor(base.BaseTestCase):
self.monitor.get_events()
self.assertTrue(process_events.called)
self.assertFalse(self.monitor.has_updates)
def process_event_unassigned_of_port(self):
output = '{"data":[["e040fbec-0579-4990-8324-d338da33ae88","insert",'
output += '"m50",["set",[]],["map",[]]]],"headings":["row","action",'
output += '"name","ofport","external_ids"]}'
with mock.patch.object(
self.monitor, 'iter_stdout', return_value=[output]):
self.monitor.process_events()
self.assertEqual(self.monitor.new_events['added'][0]['ofport'],
ovs_lib.UNASSIGNED_OFPORT)

View File

@ -48,7 +48,7 @@ class TestMetadataDriverRules(base.BaseTestCase):
metadata_driver.MetadataDriver.metadata_filter_rules(8775, '0x1'))
def test_metadata_mangle_rules(self):
rule = ('PREROUTING', '-d 169.254.169.254/32 '
rule = ('PREROUTING', '-d 169.254.169.254/32 -i qr-+ '
'-p tcp -m tcp --dport 80 '
'-j MARK --set-xmark 0x1/%s' %
constants.ROUTER_MARK_MASK)
@ -121,7 +121,7 @@ class TestMetadataDriverProcess(base.BaseTestCase):
router_id]
if not watch_log:
netns_execute_args.append(
'--metadata_proxy_watch_log=false')
'--nometadata_proxy_watch_log')
ip_mock.assert_has_calls([
mock.call(namespace=router_ns),
mock.call().netns.execute(netns_execute_args, addl_env=None,

View File

@ -146,16 +146,6 @@ class ResourcesPullRpcCallbackTestCase(ResourcesRpcBaseTestCase):
primitive['versioned_object.data'])
self.assertEqual(self.resource_obj.obj_to_primitive(), primitive)
@mock.patch.object(FakeResource, 'obj_to_primitive')
def test_pull_no_backport_for_latest_version(self, to_prim_mock):
with mock.patch.object(resources_rpc.prod_registry, 'pull',
return_value=self.resource_obj):
self.callbacks.pull(
self.context, resource_type=FakeResource.obj_name(),
version=FakeResource.VERSION,
resource_id=self.resource_obj.id)
to_prim_mock.assert_called_with(target_version=None)
@mock.patch.object(FakeResource, 'obj_to_primitive')
def test_pull_backports_to_older_version(self, to_prim_mock):
with mock.patch.object(resources_rpc.prod_registry, 'pull',

View File

@ -67,6 +67,40 @@ class FakePluginWithExtension(object):
self._log("method_to_support_foxnsox_extension", context)
class ExtensionPathTest(base.BaseTestCase):
def setUp(self):
self.base_path = extensions.get_extensions_path()
super(ExtensionPathTest, self).setUp()
def test_get_extensions_path_with_plugins(self):
path = extensions.get_extensions_path(
{constants.CORE: FakePluginWithExtension()})
self.assertEqual(path,
'%s:neutron/tests/unit/extensions' % self.base_path)
def test_get_extensions_path_no_extensions(self):
# Reset to default value, as it's overriden by base class
cfg.CONF.set_override('api_extensions_path', '')
path = extensions.get_extensions_path()
self.assertEqual(path, self.base_path)
def test_get_extensions_path_single_extension(self):
cfg.CONF.set_override('api_extensions_path', 'path1')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1' % self.base_path)
def test_get_extensions_path_multiple_extensions(self):
cfg.CONF.set_override('api_extensions_path', 'path1:path2')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1:path2' % self.base_path)
def test_get_extensions_path_duplicate_extensions(self):
cfg.CONF.set_override('api_extensions_path', 'path1:path1')
path = extensions.get_extensions_path()
self.assertEqual(path, '%s:path1' % self.base_path)
class PluginInterfaceTest(base.BaseTestCase):
def test_issubclass_hook(self):
class A(object):

View File

@ -0,0 +1,24 @@
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.cmd import sanity_check
from neutron.tests import base
class TestSanityCheck(base.BaseTestCase):
def test_setup_conf(self):
# verify that configuration can be successfully imported
sanity_check.setup_conf()

View File

@ -728,8 +728,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase):
for k in keys:
self.assertIn(k, resource[res_name])
if isinstance(keys[k], list):
self.assertEqual(sorted(resource[res_name][k]),
sorted(keys[k]))
self.assertEqual(
sorted(resource[res_name][k], key=utils.safe_sort_key),
sorted(keys[k], key=utils.safe_sort_key))
else:
self.assertEqual(resource[res_name][k], keys[k])
@ -1703,9 +1704,16 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
self.assertEqual(1, len(fixed_ips))
self.assertEqual('10.0.0.2', fixed_ips[0]['ip_address'])
def _make_v6_subnet(self, network, ra_addr_mode):
return (self._make_subnet(self.fmt, network, gateway='fe80::1',
cidr='fe80::/64', ip_version=6,
def _make_v6_subnet(self, network, ra_addr_mode, ipv6_pd=False):
cidr = 'fe80::/64'
gateway = 'fe80::1'
if ipv6_pd:
cidr = None
gateway = None
cfg.CONF.set_override('default_ipv6_subnet_pool',
constants.IPV6_PD_POOL_ID)
return (self._make_subnet(self.fmt, network, gateway=gateway,
cidr=cidr, ip_version=6,
ipv6_ra_mode=ra_addr_mode,
ipv6_address_mode=ra_addr_mode))
@ -1725,10 +1733,11 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
self.assertEqual(self._calc_ipv6_addr_by_EUI64(port, subnet),
port['port']['fixed_ips'][0]['ip_address'])
def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode):
def _test_create_port_with_ipv6_subnet_in_fixed_ips(self, addr_mode,
ipv6_pd=False):
"""Test port create with an IPv6 subnet incl in fixed IPs."""
with self.network(name='net') as network:
subnet = self._make_v6_subnet(network, addr_mode)
subnet = self._make_v6_subnet(network, addr_mode, ipv6_pd)
subnet_id = subnet['subnet']['id']
fixed_ips = [{'subnet_id': subnet_id}]
with self.port(subnet=subnet, fixed_ips=fixed_ips) as port:
@ -1745,6 +1754,10 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s
self._test_create_port_with_ipv6_subnet_in_fixed_ips(
addr_mode=constants.IPV6_SLAAC)
def test_create_port_with_ipv6_pd_subnet_in_fixed_ips(self):
self._test_create_port_with_ipv6_subnet_in_fixed_ips(
addr_mode=constants.IPV6_SLAAC, ipv6_pd=True)
def test_create_port_with_ipv6_dhcp_stateful_subnet_in_fixed_ips(self):
self._test_create_port_with_ipv6_subnet_in_fixed_ips(
addr_mode=constants.DHCPV6_STATEFUL)
@ -2504,9 +2517,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
def test_list_networks_with_sort_native(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1') as net1,\
self.network(admin_status_up=False, name='net2') as net2,\
self.network(admin_status_up=False, name='net3') as net3:
with self.network(admin_state_up=True, name='net1') as net1,\
self.network(admin_state_up=False, name='net2') as net2,\
self.network(admin_state_up=False, name='net3') as net3:
self._test_list_with_sort('network', (net3, net2, net1),
[('admin_state_up', 'asc'),
('name', 'desc')])
@ -2514,9 +2527,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
def test_list_networks_with_sort_extended_attr_native_returns_400(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1'),\
self.network(admin_status_up=False, name='net2'),\
self.network(admin_status_up=False, name='net3'):
with self.network(admin_state_up=True, name='net1'),\
self.network(admin_state_up=False, name='net2'),\
self.network(admin_state_up=False, name='net3'):
req = self.new_list_request(
'networks',
params='sort_key=provider:segmentation_id&sort_dir=asc')
@ -2526,9 +2539,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
def test_list_networks_with_sort_remote_key_native_returns_400(self):
if self._skip_native_sorting:
self.skipTest("Skip test for not implemented sorting feature")
with self.network(admin_status_up=True, name='net1'),\
self.network(admin_status_up=False, name='net2'),\
self.network(admin_status_up=False, name='net3'):
with self.network(admin_state_up=True, name='net1'),\
self.network(admin_state_up=False, name='net2'),\
self.network(admin_state_up=False, name='net3'):
req = self.new_list_request(
'networks', params='sort_key=subnets&sort_dir=asc')
res = req.get_response(self.api)
@ -2539,9 +2552,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase):
'neutron.api.v2.base.Controller._get_sorting_helper',
new=_fake_get_sorting_helper)
helper_patcher.start()
with self.network(admin_status_up=True, name='net1') as net1,\
self.network(admin_status_up=False, name='net2') as net2,\
self.network(admin_status_up=False, name='net3') as net3:
with self.network(admin_state_up=True, name='net1') as net1,\
self.network(admin_state_up=False, name='net2') as net2,\
self.network(admin_state_up=False, name='net3') as net3:
self._test_list_with_sort('network', (net3, net2, net1),
[('admin_state_up', 'asc'),
('name', 'desc')])
@ -3988,8 +4001,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase):
req = self.new_update_request('subnets', data,
res['subnet']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(res['subnet']['host_routes']),
sorted(host_routes))
self.assertEqual(
sorted(res['subnet']['host_routes'], key=utils.safe_sort_key),
sorted(host_routes, key=utils.safe_sort_key))
self.assertEqual(res['subnet']['dns_nameservers'],
dns_nameservers)

View File

@ -152,7 +152,7 @@ class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def test__get_device_owner_distributed(self):
self._test_get_device_owner(
is_distributed=True,
expected=l3_dvr_db.DEVICE_OWNER_DVR_INTERFACE,
expected=l3_const.DEVICE_OWNER_DVR_INTERFACE,
pass_router_id=False)
def _test__is_distributed_router(self, router, expected):

View File

@ -19,6 +19,7 @@ from oslo_utils import uuidutils
from webob import exc
from neutron.common import constants
from neutron.common import utils
from neutron.db import extraroute_db
from neutron.extensions import extraroute
from neutron.extensions import l3
@ -134,8 +135,10 @@ class ExtraRouteDBTestCaseBase(object):
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes))
self.assertEqual(
sorted(body['router']['routes'],
key=utils.safe_sort_key),
sorted(routes, key=utils.safe_sort_key))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
@ -180,14 +183,18 @@ class ExtraRouteDBTestCaseBase(object):
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_orig)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes_orig))
self.assertEqual(
sorted(body['router']['routes'],
key=utils.safe_sort_key),
sorted(routes_orig, key=utils.safe_sort_key))
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_left,
skip_add=True)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes_left))
self.assertEqual(
sorted(body['router']['routes'],
key=utils.safe_sort_key),
sorted(routes_left, key=utils.safe_sort_key))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])

View File

@ -38,65 +38,63 @@ _get_path = test_base._get_path
class ServiceTypeManagerTestCase(testlib_api.SqlTestCase):
def setUp(self):
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
super(ServiceTypeManagerTestCase, self).setUp()
st_db.ServiceTypeManager._instance = None
self.manager = st_db.ServiceTypeManager.get_instance()
self.ctx = context.get_admin_context()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
st_db.ServiceTypeManager._instance = None
self.manager = st_db.ServiceTypeManager.get_instance()
for provider in service_providers:
self.manager.add_provider_configuration(
provider.split(':')[0], provconf.ProviderConfiguration())
def test_service_provider_driver_not_unique(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver'],
'service_providers')
self._set_override([constants.LOADBALANCER + ':lbaas:driver'])
prov = {'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'driver',
'default': False}
self.manager._load_conf()
self.assertRaises(
n_exc.Invalid, self.manager.conf.add_provider, prov)
n_exc.Invalid,
self.manager.config['LOADBALANCER'].add_provider, prov)
def test_get_service_providers(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
constants.DUMMY + ':dummy:dummy_dr'],
'service_providers')
"""Test that get_service_providers filters correctly."""
self._set_override(
[constants.LOADBALANCER +
':lbaas:driver_path1',
constants.FIREWALL +
':fwaas:driver_path2'])
ctx = context.get_admin_context()
provconf.parse_service_provider_opt()
self.manager._load_conf()
res = self.manager.get_service_providers(ctx)
self.assertEqual(len(res), 2)
res = self.manager.get_service_providers(
ctx,
filters=dict(service_type=[constants.DUMMY])
)
self.assertEqual(len(res), 1)
res = self.manager.get_service_providers(
ctx,
filters=dict(service_type=[constants.LOADBALANCER])
)
self.assertEqual(len(res), 1)
res = self.manager.get_service_providers(
ctx,
filters=dict(service_type=[constants.FIREWALL])
)
self.assertEqual(len(res), 1)
def test_multiple_default_providers_specified_for_service(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.LOADBALANCER +
':lbaas2:driver_path:default'],
'service_providers')
self.assertRaises(n_exc.Invalid, self.manager._load_conf)
self.assertRaises(
n_exc.Invalid,
self._set_override,
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.LOADBALANCER +
':lbaas2:driver_path:default'])
def test_get_default_provider(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
self._set_override([constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'])
# can pass None as a context
p = self.manager.get_default_service_provider(None,
constants.LOADBALANCER)
@ -112,13 +110,10 @@ class ServiceTypeManagerTestCase(testlib_api.SqlTestCase):
)
def test_add_resource_association(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
self._set_override([constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'])
ctx = context.get_admin_context()
self.manager.add_resource_association(ctx,
constants.LOADBALANCER,
@ -130,13 +125,10 @@ class ServiceTypeManagerTestCase(testlib_api.SqlTestCase):
ctx.session.delete(assoc)
def test_invalid_resource_association(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'],
'service_providers')
self.manager._load_conf()
self._set_override([constants.LOADBALANCER +
':lbaas1:driver_path:default',
constants.DUMMY +
':lbaas2:driver_path2'])
ctx = context.get_admin_context()
self.assertRaises(provconf.ServiceProviderNotFound,
self.manager.add_resource_association,
@ -200,13 +192,19 @@ class ServiceTypeExtensionTestCase(ServiceTypeExtensionTestCaseBase):
class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase):
"""Tests ServiceTypemanager as a public API."""
def setUp(self):
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
service_providers = [
constants.LOADBALANCER + ':lbaas:driver_path',
constants.DUMMY + ':dummy:dummy_dr'
]
self.service_providers.return_value = service_providers
# Blank out service type manager instance
st_db.ServiceTypeManager._instance = None
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
constants.DUMMY + ':dummy:dummy_dr'],
'service_providers')
self.manager = st_db.ServiceTypeManager.get_instance()
for provider in service_providers:
self.manager.add_provider_configuration(
provider.split(':')[0], provconf.ProviderConfiguration())
super(ServiceTypeManagerExtTestCase, self).setUp()
def _list_service_providers(self):
@ -217,4 +215,4 @@ class ServiceTypeManagerExtTestCase(ServiceTypeExtensionTestCaseBase):
self.assertEqual(res.status_int, webexc.HTTPOk.code)
data = self.deserialize(res)
self.assertIn('service_providers', data)
self.assertEqual(len(data['service_providers']), 2)
self.assertGreaterEqual(len(data['service_providers']), 2)

View File

@ -17,6 +17,7 @@ import webob.exc
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron import context
import neutron.db.api as db
from neutron.extensions import portbindings
@ -945,7 +946,7 @@ class TestN1kvPolicyProfiles(N1kvPluginTestCase):
is_admin=False)
res = self._list(resource, neutron_context=ctx)
self.assertEqual(len(expected_profiles), len(res[resource]))
profiles = sorted(res[resource])
profiles = sorted(res[resource], key=utils.safe_sort_key)
for i in range(len(profiles)):
self.assertEqual(expected_profiles[i].id,
profiles[i]['id'])
@ -1179,8 +1180,10 @@ class TestN1kvSubnets(test_plugin.TestSubnetsV2,
req = self.new_update_request('subnets', data,
subnet['subnet']['id'])
subnet = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(sorted(subnet['subnet']['host_routes']),
sorted(host_routes))
self.assertEqual(
sorted(subnet['subnet']['host_routes'],
key=utils.safe_sort_key),
sorted(host_routes, key=utils.safe_sort_key))
self.assertEqual(sorted(subnet['subnet']['dns_nameservers']),
sorted(dns_nameservers))
# In N1K we need to delete the subnet before the network

View File

@ -62,6 +62,9 @@ class TestMechanismDriver(api.MechanismDriver):
assert(context.current['id'] == context.original['id'])
else:
assert(not context.original)
network_context = context.network
assert(isinstance(network_context, api.NetworkContext))
self._check_network_context(network_context, False)
def create_subnet_precommit(self, context):
self._check_subnet_context(context, False)

View File

@ -43,6 +43,13 @@ FAKE_MAC = '00:11:22:33:44:55'
FAKE_IP1 = '10.0.0.1'
FAKE_IP2 = '10.0.0.2'
TEST_PORT_ID1 = 'port-id-1'
TEST_PORT_ID2 = 'port-id-2'
TEST_PORT_ID3 = 'port-id-3'
TEST_NETWORK_ID1 = 'net-id-1'
TEST_NETWORK_ID2 = 'net-id-2'
class FakeVif(object):
ofport = 99
@ -660,15 +667,67 @@ class TestOvsNeutronAgent(object):
self.agent.agent_state, True)
def test_port_update(self):
port = {"id": "123",
"network_id": "124",
port = {"id": TEST_PORT_ID1,
"network_id": TEST_NETWORK_ID1,
"admin_state_up": False}
self.agent.port_update("unused_context",
port=port,
network_type="vlan",
segmentation_id="1",
physical_network="physnet")
self.assertEqual(set(['123']), self.agent.updated_ports)
self.assertEqual(set([TEST_PORT_ID1]), self.agent.updated_ports)
def test_port_delete_after_update(self):
"""Make sure a port is not marked for delete and update."""
port = {'id': TEST_PORT_ID1}
self.agent.port_update(context=None, port=port)
self.agent.port_delete(context=None, port_id=port['id'])
self.assertEqual(set(), self.agent.updated_ports)
self.assertEqual(set([port['id']]), self.agent.deleted_ports)
def test_process_deleted_ports_cleans_network_ports(self):
self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID1)
self.agent.port_delete(context=None, port_id=TEST_PORT_ID1)
self.agent.sg_agent = mock.Mock()
self.agent.int_br = mock.Mock()
self.agent.process_deleted_ports(port_info={})
self.assertEqual(set(), self.agent.network_ports[TEST_NETWORK_ID1])
def test_network_update(self):
"""Network update marks port for update. """
network = {'id': TEST_NETWORK_ID1}
port = {'id': TEST_PORT_ID1, 'network_id': network['id']}
self.agent._update_port_network(port['id'], port['network_id'])
self.agent.network_update(context=None, network=network)
self.assertEqual(set([port['id']]), self.agent.updated_ports)
def test_network_update_outoforder(self):
"""Network update arrives later than port_delete.
But the main agent loop still didn't process the ports,
so we ensure the port is not marked for update.
"""
network = {'id': TEST_NETWORK_ID1}
port = {'id': TEST_PORT_ID1, 'network_id': network['id']}
self.agent._update_port_network(port['id'], port['network_id'])
self.agent.port_delete(context=None, port_id=port['id'])
self.agent.network_update(context=None, network=network)
self.assertEqual(set(), self.agent.updated_ports)
def test_update_port_network(self):
"""Ensure ports are associated and moved across networks correctly."""
self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID1)
self.agent._update_port_network(TEST_PORT_ID2, TEST_NETWORK_ID1)
self.agent._update_port_network(TEST_PORT_ID3, TEST_NETWORK_ID2)
self.agent._update_port_network(TEST_PORT_ID1, TEST_NETWORK_ID2)
self.assertEqual(set([TEST_PORT_ID2]),
self.agent.network_ports[TEST_NETWORK_ID1])
self.assertEqual(set([TEST_PORT_ID1, TEST_PORT_ID3]),
self.agent.network_ports[TEST_NETWORK_ID2])
def test_port_delete(self):
vif = FakeVif()

View File

@ -98,19 +98,48 @@ class TestQosPlugin(base.BaseQosTestCase):
self._validate_notif_driver_params('update_policy')
def test_update_policy_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
return_value=self.policy):
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self._validate_notif_driver_params('update_policy')
def test_delete_policy_rule(self):
def test_update_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
return_value=self.policy):
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
n_exc.QosRuleNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.delete_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id)
self.ctxt, self.rule.id, _policy.id)
self._validate_notif_driver_params('update_policy')
def test_delete_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
n_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, _policy.id)
def test_get_policy_bandwidth_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_by_id',
return_value=self.policy):

View File

@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.common import exceptions as n_exc
@ -22,15 +24,22 @@ from neutron.tests import base
class ParseServiceProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ParseServiceProviderConfigurationTestCase, self).setUp()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
def test_default_service_provider_configuration(self):
providers = cfg.CONF.service_providers.service_provider
self.assertEqual(providers, [])
def test_parse_single_service_provider_opt(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path'],
'service_providers')
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path'])
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
@ -40,10 +49,8 @@ class ParseServiceProviderConfigurationTestCase(base.BaseTestCase):
self.assertEqual(res, [expected])
def test_parse_single_default_service_provider_opt(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path:default'],
'service_providers')
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path:default'])
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
@ -53,56 +60,46 @@ class ParseServiceProviderConfigurationTestCase(base.BaseTestCase):
self.assertEqual(res, [expected])
def test_parse_multi_service_provider_opt(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
constants.LOADBALANCER + ':name1:path1',
constants.LOADBALANCER +
':name2:path2:default'],
'service_providers')
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path',
constants.LOADBALANCER + ':name1:path1',
constants.LOADBALANCER +
':name2:path2:default'])
res = provconf.parse_service_provider_opt()
# This parsing crosses repos if additional projects are installed,
# so check that at least what we expect is there; there may be more.
self.assertTrue(len(res) >= 3)
def test_parse_service_provider_opt_not_allowed_raises(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
'svc_type:name1:path1'],
'service_providers')
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
def test_parse_service_provider_invalid_format(self):
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':lbaas:driver_path',
'svc_type:name1:path1:def'],
'service_providers')
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':',
'svc_type:name1:path1:def'],
'service_providers')
self._set_override([constants.LOADBALANCER +
':',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
def test_parse_service_provider_name_too_long(self):
name = 'a' * 256
cfg.CONF.set_override('service_provider',
[constants.LOADBALANCER +
':' + name + ':driver_path',
'svc_type:name1:path1:def'],
'service_providers')
self._set_override([constants.LOADBALANCER +
':' + name + ':driver_path',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
class ProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ProviderConfigurationTestCase, self).setUp()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
def test_ensure_driver_unique(self):
pconf = provconf.ProviderConfiguration([])
pconf = provconf.ProviderConfiguration()
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
@ -110,7 +107,7 @@ class ProviderConfigurationTestCase(base.BaseTestCase):
self.assertIsNone(pconf._ensure_driver_unique('another_driver1'))
def test_ensure_default_unique(self):
pconf = provconf.ProviderConfiguration([])
pconf = provconf.ProviderConfiguration()
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
@ -121,7 +118,7 @@ class ProviderConfigurationTestCase(base.BaseTestCase):
self.assertIsNone(pconf._ensure_default_unique('svctype1', False))
def test_add_provider(self):
pconf = provconf.ProviderConfiguration([])
pconf = provconf.ProviderConfiguration()
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
@ -134,7 +131,7 @@ class ProviderConfigurationTestCase(base.BaseTestCase):
[{'driver': 'path', 'default': False}])
def test_add_duplicate_provider(self):
pconf = provconf.ProviderConfiguration([])
pconf = provconf.ProviderConfiguration()
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
@ -144,6 +141,10 @@ class ProviderConfigurationTestCase(base.BaseTestCase):
self.assertEqual(len(pconf.providers), 1)
def test_get_service_providers(self):
self._set_override([constants.LOADBALANCER + ':name:path',
constants.LOADBALANCER + ':name2:path2',
'st2:name:driver:default',
'st3:name2:driver2:default'])
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
@ -161,7 +162,7 @@ class ProviderConfigurationTestCase(base.BaseTestCase):
'name': 'name2',
'driver': 'driver2',
'default': True}]
pconf = provconf.ProviderConfiguration(provs)
pconf = provconf.ProviderConfiguration()
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
@ -170,6 +171,8 @@ class ProviderConfigurationTestCase(base.BaseTestCase):
self.assertEqual(p, [prov])
def test_get_service_providers_with_fields(self):
self._set_override([constants.LOADBALANCER + ":name:path",
constants.LOADBALANCER + ":name2:path2"])
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
@ -178,7 +181,7 @@ class ProviderConfigurationTestCase(base.BaseTestCase):
'name': 'name2',
'driver': 'path2',
'default': False}]
pconf = provconf.ProviderConfiguration(provs)
pconf = provconf.ProviderConfiguration()
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],

View File

@ -15,7 +15,7 @@ httplib2>=0.7.5
requests>=2.5.2
Jinja2>=2.6 # BSD License (3 clause)
keystonemiddleware>=2.0.0
netaddr>=0.7.12
netaddr!=0.7.16,>=0.7.12
python-neutronclient<3,>=2.6.0
retrying!=1.3.0,>=1.2.3 # Apache-2.0
SQLAlchemy<1.1.0,>=0.9.7
@ -37,7 +37,7 @@ oslo.rootwrap>=2.0.0 # Apache-2.0
oslo.serialization>=1.4.0 # Apache-2.0
oslo.service>=0.7.0 # Apache-2.0
oslo.utils>=2.0.0 # Apache-2.0
oslo.versionedobjects>=0.6.0
oslo.versionedobjects>=0.9.0
python-novaclient>=2.26.0

View File

@ -118,7 +118,9 @@ commands = python setup.py test --testr-args='{posargs: \
neutron.tests.unit.plugins.brocade.test_brocade_db \
neutron.tests.unit.plugins.brocade.test_brocade_plugin \
neutron.tests.unit.plugins.brocade.test_brocade_vlan \
neutron.tests.unit.plugins.embrane.test_embrane_neutron_plugin \
neutron.tests.unit.plugins.oneconvergence.test_nvsd_agent \
neutron.tests.unit.plugins.oneconvergence.test_nvsd_plugin \
neutron.tests.unit.plugins.oneconvergence.test_plugin_helper \
neutron.tests.unit.plugins.oneconvergence.test_nvsdlib \
neutron.tests.unit.plugins.ibm.test_sdnve_agent \
@ -158,9 +160,11 @@ commands = python setup.py test --testr-args='{posargs: \
neutron.tests.unit.scheduler.test_dhcp_agent_scheduler \
neutron.tests.unit.db.test_agentschedulers_db \
neutron.tests.unit.db.test_allowedaddresspairs_db \
neutron.tests.unit.db.test_db_base_plugin_v2 \
neutron.tests.unit.db.test_ipam_backend_mixin \
neutron.tests.unit.db.test_l3_dvr_db \
neutron.tests.unit.db.test_l3_hamode_db \
neutron.tests.unit.db.test_ipam_pluggable_backend \
neutron.tests.unit.db.test_migration \
neutron.tests.unit.db.test_agents_db \
neutron.tests.unit.db.quota.test_api \
@ -230,6 +234,7 @@ commands = python setup.py test --testr-args='{posargs: \
neutron.tests.unit.extensions.test_flavors \
neutron.tests.unit.extensions.test_l3_ext_gw_mode \
neutron.tests.unit.extensions.test_extra_dhcp_opt \
neutron.tests.unit.extensions.test_extraroute \
neutron.tests.unit.extensions.test_netmtu \
neutron.tests.unit.extensions.test_vlantransparent \
neutron.tests.unit.extensions.extendedattribute \