From 40783cee5e520173a4cc37c23dd97be2d37b65fd Mon Sep 17 00:00:00 2001 From: Angus Lees Date: Mon, 2 Mar 2015 17:06:54 +1100 Subject: [PATCH 001/292] Ensure tests run under python2.7 tox uses whatever python version that tox ran with as the default python version for environments (py27,py3x, etc are given a more specific default). Debian's python-tox (for example) uses python3, but we require python2.7 - and a naive `apt-get install python-tox; tox -epep8` fails with py3-related errors on Debian. This change explicitly sets `basepython = python2.7` in several testing environments that would otherwise have used the default basepython. Change-Id: I377ac6d72dec5b85c105d8a1a74f6974efb84dcf --- tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tox.ini b/tox.ini index 54d91612310..e09a4042260 100644 --- a/tox.ini +++ b/tox.ini @@ -26,9 +26,11 @@ commands = setenv = VIRTUAL_ENV={envdir} [testenv:api] +basepython = python2.7 setenv = OS_TEST_PATH=./neutron/tests/api [testenv:functional] +basepython = python2.7 setenv = OS_TEST_PATH=./neutron/tests/functional OS_TEST_TIMEOUT=90 deps = @@ -36,6 +38,7 @@ deps = -r{toxinidir}/neutron/tests/functional/requirements.txt [testenv:dsvm-functional] +basepython = python2.7 setenv = OS_TEST_PATH=./neutron/tests/functional OS_SUDO_TESTING=1 OS_ROOTWRAP_CMD=sudo {envbindir}/neutron-rootwrap {envdir}/etc/neutron/rootwrap.conf @@ -50,6 +53,7 @@ sitepackages = True downloadcache = ~/cache/pip [testenv:pep8] +basepython = python2.7 deps = {[testenv]deps} pylint @@ -62,6 +66,7 @@ commands= whitelist_externals = sh [testenv:cover] +basepython = python2.7 commands = python setup.py testr --coverage --testr-args='{posargs}' From 857322c30ecfed11e770f0aea7b234f8ffdc0db1 Mon Sep 17 00:00:00 2001 From: Ann Kamyshnikova Date: Wed, 4 Mar 2015 15:50:07 +0300 Subject: [PATCH 002/292] Use accessors instead of private attributes for Ml2 plugin Start using accessors instead of private attributes of PortContext and NetworkContext where it is possible. Closes-bug: #1424587 Change-Id: Ie3d963ae6cee9782d65ba683936072602c78ebaa --- neutron/plugins/ml2/managers.py | 4 ++-- neutron/plugins/ml2/plugin.py | 15 ++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index 4fc271d5ef6..6c3743e1937 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -595,7 +595,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager): def _bind_port_level(self, context, level, segments_to_bind): binding = context._binding - port_id = context._port['id'] + port_id = context.current['id'] LOG.debug("Attempting to bind port %(port)s on host %(host)s " "at level %(level)s using segments %(segments)s", {'port': port_id, @@ -652,7 +652,7 @@ class MechanismManager(stevedore.named.NamedExtensionManager): driver.name) binding.vif_type = portbindings.VIF_TYPE_BINDING_FAILED LOG.error(_LE("Failed to bind port %(port)s on host %(host)s"), - {'port': context._port['id'], + {'port': context.current['id'], 'host': binding.host}) def _check_driver_to_bind(self, driver, segments_to_bind, binding_levels): diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 25b8be482d8..421c1379c62 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -247,7 +247,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, def _bind_port_if_needed(self, context, allow_notify=False, need_notify=False): plugin_context = context._plugin_context - port_id = context._port['id'] + port_id = context.current['id'] # Since the mechanism driver bind_port() calls must be made # outside a DB transaction locking the port state, it is @@ -316,7 +316,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, def _bind_port(self, orig_context): # Construct a new PortContext from the one from the previous # transaction. - port = orig_context._port + port = orig_context.current orig_binding = orig_context._binding new_binding = models.PortBinding( host=orig_binding.host, @@ -328,7 +328,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self._update_port_dict_binding(port, new_binding) new_context = driver_context.PortContext( self, orig_context._plugin_context, port, - orig_context._network_context._network, new_binding, None) + orig_context.network.current, new_binding, None) # Attempt to bind the port and return the context with the # result. @@ -516,7 +516,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, '_ml2_port_result_filter_hook') def _notify_port_updated(self, mech_context): - port = mech_context._port + port = mech_context.current segment = mech_context.bottom_bound_segment if not segment: # REVISIT(rkukura): This should notify agent to unplug port @@ -958,7 +958,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, LOG.error(_LE("_bind_port_if_needed " "failed, deleting port '%s'"), result['id']) self.delete_port(context, result['id']) - return bound_context._port + + return bound_context.current def create_port_bulk(self, context, ports): objects = self._create_bulk_ml2(attributes.PORT, context, ports) @@ -979,7 +980,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, for obj in objects: obj['bound_context'] = self._bind_port_if_needed( obj['mech_context']) - return [obj['bound_context']._port for obj in objects] + return [obj['bound_context'].current for obj in objects] except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): resource_ids = [res['result']['id'] for res in objects] @@ -1056,7 +1057,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, mech_context, allow_notify=True, need_notify=need_port_update_notify) - return bound_context._port + return bound_context.current def _process_dvr_port_binding(self, mech_context, context, attrs): session = mech_context._plugin_context.session From 55536a4ecb6c71e5451b8a9664d87e32146f071d Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 10 Apr 2015 15:07:33 +0200 Subject: [PATCH 003/292] Removed ml2_conf_odl.ini config file The file is already packaged into decomposed networking-odl repo [1]. [1]: https://git.openstack.org/cgit/stackforge/networking-odl/tree/etc/neutron/plugins/ml2/ml2_conf_odl.ini Closes-Bug: #1442615 Change-Id: Ic280454190aab4e3b881cde15a882808b652861e (cherry picked from commit b3334eca0ae9f9c64ccd646035e69081f669e3e4) --- etc/neutron/plugins/ml2/ml2_conf_odl.ini | 30 ------------------------ setup.cfg | 1 - 2 files changed, 31 deletions(-) delete mode 100644 etc/neutron/plugins/ml2/ml2_conf_odl.ini diff --git a/etc/neutron/plugins/ml2/ml2_conf_odl.ini b/etc/neutron/plugins/ml2/ml2_conf_odl.ini deleted file mode 100644 index 9e88c1bbfa5..00000000000 --- a/etc/neutron/plugins/ml2/ml2_conf_odl.ini +++ /dev/null @@ -1,30 +0,0 @@ -# Configuration for the OpenDaylight MechanismDriver - -[ml2_odl] -# (StrOpt) OpenDaylight REST URL -# If this is not set then no HTTP requests will be made. -# -# url = -# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron - -# (StrOpt) Username for HTTP basic authentication to ODL. -# -# username = -# Example: username = admin - -# (StrOpt) Password for HTTP basic authentication to ODL. -# -# password = -# Example: password = admin - -# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion. -# This is an optional parameter, default value is 10 seconds. -# -# timeout = 10 -# Example: timeout = 15 - -# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout. -# This is an optional parameter, default value is 30 minutes. -# -# session_timeout = 30 -# Example: session_timeout = 60 diff --git a/setup.cfg b/setup.cfg index e689f10e2d0..6fe838f178e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -69,7 +69,6 @@ data_files = etc/neutron/plugins/ml2/ml2_conf_cisco.ini etc/neutron/plugins/ml2/ml2_conf_mlnx.ini etc/neutron/plugins/ml2/ml2_conf_ncs.ini - etc/neutron/plugins/ml2/ml2_conf_odl.ini etc/neutron/plugins/ml2/ml2_conf_ofa.ini etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini etc/neutron/plugins/ml2/ml2_conf_sriov.ini From 1dc98e414f200a78a6b1dc78f222c588646e6935 Mon Sep 17 00:00:00 2001 From: Dane LeBlanc Date: Thu, 9 Apr 2015 10:32:33 -0400 Subject: [PATCH 004/292] IPv6 SLAAC subnet create should update ports on net If ports are first created on a network, and then an IPv6 SLAAC or DHCPv6-stateless subnet is created on that network, then the ports created prior to the subnet create are not getting automatically updated (associated) with addresses for the SLAAC/DHCPv6-stateless subnet, as required. Change-Id: I88d04a13ce5b8ed4c88eac734e589e8a90e986a0 Closes-Bug: 1427474 Closes-Bug: 1441382 Closes-Bug: 1440183 (cherry picked from commit bd1044ba0e9d7d0f4752c891ac340b115f0019c4) --- neutron/db/db_base_plugin_v2.py | 68 ++++++++++++++---- .../tests/unit/db/test_db_base_plugin_v2.py | 69 +++++++++++++++++++ 2 files changed, 125 insertions(+), 12 deletions(-) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index f7bcf8db538..dcf7adc6f6f 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -472,9 +472,9 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, # from subnet else: if is_auto_addr: - prefix = subnet['cidr'] - ip_address = ipv6_utils.get_ipv6_addr_by_EUI64( - prefix, mac_address) + ip_address = self._calculate_ipv6_eui64_addr(context, + subnet, + mac_address) ips.append({'ip_address': ip_address.format(), 'subnet_id': subnet['id']}) else: @@ -531,6 +531,17 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, ips = self._allocate_fixed_ips(context, to_add, mac_address) return ips, prev_ips + def _calculate_ipv6_eui64_addr(self, context, subnet, mac_addr): + prefix = subnet['cidr'] + network_id = subnet['network_id'] + ip_address = ipv6_utils.get_ipv6_addr_by_EUI64( + prefix, mac_addr).format() + if not self._check_unique_ip(context, network_id, + subnet['id'], ip_address): + raise n_exc.IpAddressInUse(net_id=network_id, + ip_address=ip_address) + return ip_address + def _allocate_ips_for_port(self, context, port): """Allocate IP addresses for the port. @@ -585,13 +596,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, for subnet in v6_stateless: # IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets # are implicitly included. - prefix = subnet['cidr'] - ip_address = ipv6_utils.get_ipv6_addr_by_EUI64(prefix, - p['mac_address']) - if not self._check_unique_ip(context, p['network_id'], - subnet['id'], ip_address.format()): - raise n_exc.IpAddressInUse(net_id=p['network_id'], - ip_address=ip_address.format()) + ip_address = self._calculate_ipv6_eui64_addr(context, subnet, + p['mac_address']) ips.append({'ip_address': ip_address.format(), 'subnet_id': subnet['id']}) @@ -1343,8 +1349,46 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, 'subnet pool') raise n_exc.BadRequest(resource='subnets', msg=msg) # Create subnet from the implicit(AKA null) pool - return self._create_subnet_from_implicit_pool(context, subnet) - return self._create_subnet_from_pool(context, subnet, subnetpool_id) + created_subnet = self._create_subnet_from_implicit_pool(context, + subnet) + else: + created_subnet = self._create_subnet_from_pool(context, subnet, + subnetpool_id) + + # If this subnet supports auto-addressing, then update any + # internal ports on the network with addresses for this subnet. + if ipv6_utils.is_auto_address_subnet(created_subnet): + self._add_auto_addrs_on_network_ports(context, created_subnet) + + return created_subnet + + def _add_auto_addrs_on_network_ports(self, context, subnet): + """For an auto-address subnet, add addrs for ports on the net.""" + with context.session.begin(subtransactions=True): + network_id = subnet['network_id'] + port_qry = context.session.query(models_v2.Port) + for port in port_qry.filter( + and_(models_v2.Port.network_id == network_id, + models_v2.Port.device_owner != + constants.DEVICE_OWNER_ROUTER_SNAT, + ~models_v2.Port.device_owner.in_( + constants.ROUTER_INTERFACE_OWNERS))): + ip_address = self._calculate_ipv6_eui64_addr( + context, subnet, port['mac_address']) + allocated = models_v2.IPAllocation(network_id=network_id, + port_id=port['id'], + ip_address=ip_address, + subnet_id=subnet['id']) + try: + # Do the insertion of each IP allocation entry within + # the context of a nested transaction, so that the entry + # is rolled back independently of other entries whenever + # the corresponding port has been deleted. + with context.session.begin_nested(): + context.session.add(allocated) + except db_exc.DBReferenceError: + LOG.debug("Port %s was deleted while updating it with an " + "IPv6 auto-address. Ignoring.", port['id']) def _update_subnet_dns_nameservers(self, context, id, s): old_dns_list = self._get_dns_by_subnet(context, id) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 0dc18001dd4..e8ef97e8fdb 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -20,7 +20,9 @@ import itertools import mock import netaddr from oslo_config import cfg +from oslo_db import exception as db_exc from oslo_utils import importutils +from sqlalchemy import orm from testtools import matchers import webob.exc @@ -3811,6 +3813,71 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) + def _test_create_subnet_ipv6_auto_addr_with_port_on_network( + self, addr_mode, device_owner=DEVICE_OWNER_COMPUTE, + insert_db_reference_error=False): + # Create a network with one IPv4 subnet and one port + with self.network() as network,\ + self.subnet(network=network) as v4_subnet,\ + self.port(subnet=v4_subnet, device_owner=device_owner) as port: + if insert_db_reference_error: + def db_ref_err_for_ipalloc(instance): + if instance.__class__.__name__ == 'IPAllocation': + raise db_exc.DBReferenceError( + 'dummy_table', 'dummy_constraint', + 'dummy_key', 'dummy_key_table') + mock.patch.object(orm.Session, 'add', + side_effect=db_ref_err_for_ipalloc).start() + # Add an IPv6 auto-address subnet to the network + v6_subnet = self._make_subnet(self.fmt, network, 'fe80::1', + 'fe80::/64', ip_version=6, + ipv6_ra_mode=addr_mode, + ipv6_address_mode=addr_mode) + if (insert_db_reference_error + or device_owner == constants.DEVICE_OWNER_ROUTER_SNAT + or device_owner in constants.ROUTER_INTERFACE_OWNERS): + # DVR SNAT and router interfaces should not have been + # updated with addresses from the new auto-address subnet + self.assertEqual(1, len(port['port']['fixed_ips'])) + else: + # Confirm that the port has been updated with an address + # from the new auto-address subnet + req = self.new_show_request('ports', port['port']['id'], + self.fmt) + sport = self.deserialize(self.fmt, req.get_response(self.api)) + fixed_ips = sport['port']['fixed_ips'] + self.assertEqual(2, len(fixed_ips)) + self.assertIn(v6_subnet['subnet']['id'], + [fixed_ip['subnet_id'] for fixed_ip + in fixed_ips]) + + def test_create_subnet_ipv6_slaac_with_port_on_network(self): + self._test_create_subnet_ipv6_auto_addr_with_port_on_network( + constants.IPV6_SLAAC) + + def test_create_subnet_dhcpv6_stateless_with_port_on_network(self): + self._test_create_subnet_ipv6_auto_addr_with_port_on_network( + constants.DHCPV6_STATELESS) + + def test_create_subnet_ipv6_slaac_with_dhcp_port_on_network(self): + self._test_create_subnet_ipv6_auto_addr_with_port_on_network( + constants.IPV6_SLAAC, + device_owner=constants.DEVICE_OWNER_DHCP) + + def test_create_subnet_ipv6_slaac_with_router_intf_on_network(self): + self._test_create_subnet_ipv6_auto_addr_with_port_on_network( + constants.IPV6_SLAAC, + device_owner=constants.DEVICE_OWNER_ROUTER_INTF) + + def test_create_subnet_ipv6_slaac_with_snat_intf_on_network(self): + self._test_create_subnet_ipv6_auto_addr_with_port_on_network( + constants.IPV6_SLAAC, + device_owner=constants.DEVICE_OWNER_ROUTER_SNAT) + + def test_create_subnet_ipv6_slaac_with_db_reference_error(self): + self._test_create_subnet_ipv6_auto_addr_with_port_on_network( + constants.IPV6_SLAAC, insert_db_reference_error=True) + def test_update_subnet_no_gateway(self): with self.subnet() as subnet: data = {'subnet': {'gateway_ip': '10.0.0.1'}} @@ -5330,6 +5397,7 @@ class TestNeutronDbPluginV2(base.BaseTestCase): 'enable_dhcp': True, 'gateway_ip': u'2001:100::1', 'id': u'd1a28edd-bd83-480a-bd40-93d036c89f13', + 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': u'slaac'}, @@ -5338,6 +5406,7 @@ class TestNeutronDbPluginV2(base.BaseTestCase): 'enable_dhcp': True, 'gateway_ip': u'2001:200::1', 'id': u'dc813d3d-ed66-4184-8570-7325c8195e28', + 'network_id': 'fbb9b578-95eb-4b79-a116-78e5c4927176', 'ip_version': 6, 'ipv6_address_mode': None, 'ipv6_ra_mode': u'slaac'}] From ffc48f286e1756302d9259dc514dd562d3c251ba Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Thu, 16 Apr 2015 13:38:46 -0400 Subject: [PATCH 005/292] Add Kilo release milestone Change-Id: Id7d969c92b7c757b766760681357ac13c8079ca3 --- .../alembic_migrations/versions/HEAD | 2 +- .../versions/kilo_release.py | 29 +++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 neutron/db/migration/alembic_migrations/versions/kilo_release.py diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD index d9e9459801f..062799591c1 100644 --- a/neutron/db/migration/alembic_migrations/versions/HEAD +++ b/neutron/db/migration/alembic_migrations/versions/HEAD @@ -1 +1 @@ -20c469a5f920 +kilo diff --git a/neutron/db/migration/alembic_migrations/versions/kilo_release.py b/neutron/db/migration/alembic_migrations/versions/kilo_release.py new file mode 100644 index 00000000000..fd0911d273a --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/kilo_release.py @@ -0,0 +1,29 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""kilo + +Revision ID: kilo +Revises: 20c469a5f920 +Create Date: 2015-04-16 00:00:00.000000 + +""" + +# revision identifiers, used by Alembic. +revision = 'kilo' +down_revision = '20c469a5f920' + + +def upgrade(): + """A no-op migration for marking the Kilo release.""" + pass From 2add4e5ad4d12c817737d04ddb973b3aeeb25af3 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 16 Apr 2015 16:27:38 -0700 Subject: [PATCH 006/292] Update .gitreview to point to stable/kilo This is the stable/kilo branch. When people make changes here it's highly likely that they want to propose them to stable/kilo on gerrit. Change-Id: Ie61a9f0c0b0b4896da33a201e42b1c4bc4bae49b --- .gitreview | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitreview b/.gitreview index 184583f0d66..df092ef8a47 100644 --- a/.gitreview +++ b/.gitreview @@ -2,3 +2,4 @@ host=review.openstack.org port=29418 project=openstack/neutron.git +defaultbranch=stable/kilo From 8b8095e43a143426c501669167490d7867a55749 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Tue, 31 Mar 2015 08:53:56 -0700 Subject: [PATCH 007/292] Revert "Add ipset element and hashsize tunables" This reverts commit b5b919a7a3569ccb93c3d7d523c1edfaeddb7cb9. The current ipset manager code isn't robust enough to handle ipsets that already exist with different parameters. This reverts the ability to change the parameters so we don't break upgrades to Kilo. Conflicts: neutron/agent/linux/ipset_manager.py neutron/tests/unit/agent/linux/test_ipset_manager.py Change-Id: I538714df52424f0502cb75daea310517d1142c42 Closes-Bug: #1444201 (cherry picked from commit 03be14a569d240865dabff8b4c30385abf1dbe62) --- etc/neutron.conf | 9 ----- neutron/agent/common/config.py | 15 -------- neutron/agent/linux/ipset_manager.py | 20 +--------- .../unit/agent/linux/test_ipset_manager.py | 37 ++----------------- 4 files changed, 6 insertions(+), 75 deletions(-) diff --git a/etc/neutron.conf b/etc/neutron.conf index 5d8640f90ea..2983cc6c0ff 100644 --- a/etc/neutron.conf +++ b/etc/neutron.conf @@ -660,15 +660,6 @@ # each rule's purpose. (System must support the iptables comments module.) # comment_iptables_rules = True -# Maximum number of elements which can be stored in an IPset. -# If None is specified, the system default will be used. -# ipset_maxelem = 131072 - -# Initial hash size for an IPset. Must be a power of 2, -# else the kernel will round it up automatically. -# If None is specified, the system default will be used. -# ipset_hashsize = 2048 - # Root helper daemon application to use when possible. # root_helper_daemon = diff --git a/neutron/agent/common/config.py b/neutron/agent/common/config.py index efc1ca47602..7e63ea38789 100644 --- a/neutron/agent/common/config.py +++ b/neutron/agent/common/config.py @@ -63,17 +63,6 @@ IPTABLES_OPTS = [ help=_("Add comments to iptables rules.")), ] -IPSET_OPTS = [ - cfg.IntOpt('ipset_maxelem', default=131072, - help=_("Maximum number of elements which can be stored in " - "an IPset. If None is specified, the system default " - "will be used.")), - cfg.IntOpt('ipset_hashsize', default=2048, - help=_("Initial hash size for an IPset. Must be a power of 2, " - "else the kernel will round it up automatically. If " - "None is specified, the system default will be used.")), -] - PROCESS_MONITOR_OPTS = [ cfg.StrOpt('check_child_processes_action', default='respawn', choices=['respawn', 'exit'], @@ -133,10 +122,6 @@ def register_iptables_opts(conf): conf.register_opts(IPTABLES_OPTS, 'AGENT') -def register_ipset_opts(conf): - conf.register_opts(IPSET_OPTS, 'AGENT') - - def register_process_monitor_opts(conf): conf.register_opts(PROCESS_MONITOR_OPTS, 'AGENT') diff --git a/neutron/agent/linux/ipset_manager.py b/neutron/agent/linux/ipset_manager.py index 33b6379b586..e5ab7a01e9c 100644 --- a/neutron/agent/linux/ipset_manager.py +++ b/neutron/agent/linux/ipset_manager.py @@ -11,9 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from oslo_config import cfg - -from neutron.agent.common import config from neutron.agent.linux import utils as linux_utils from neutron.common import utils @@ -32,7 +29,6 @@ class IpsetManager(object): def __init__(self, execute=None, namespace=None): self.execute = execute or linux_utils.execute self.namespace = namespace - config.register_ipset_opts(cfg.CONF) self.ipset_sets = {} @staticmethod @@ -43,15 +39,6 @@ class IpsetManager(object): name = ethertype + id return name[:IPSET_NAME_MAX_LENGTH] - @staticmethod - def get_hashargs(): - args = [] - if cfg.CONF.AGENT.ipset_hashsize: - args.extend(['hashsize', str(cfg.CONF.AGENT.ipset_hashsize)]) - if cfg.CONF.AGENT.ipset_maxelem: - args.extend(['maxelem', str(cfg.CONF.AGENT.ipset_maxelem)]) - return args - def set_exists(self, id, ethertype): """Returns true if the id+ethertype pair is known to the manager.""" set_name = self.get_name(id, ethertype) @@ -98,10 +85,8 @@ class IpsetManager(object): def _refresh_set(self, set_name, member_ips, ethertype): new_set_name = set_name + SWAP_SUFFIX set_type = self._get_ipset_set_type(ethertype) - hash_args = ' '.join(self.get_hashargs()) - process_input = ["create %s hash:ip family %s %s" % (new_set_name, - set_type, - hash_args)] + process_input = ["create %s hash:ip family %s" % (new_set_name, + set_type)] for ip in member_ips: process_input.append("add %s %s" % (new_set_name, ip)) @@ -118,7 +103,6 @@ class IpsetManager(object): def _create_set(self, set_name, ethertype): cmd = ['ipset', 'create', '-exist', set_name, 'hash:ip', 'family', self._get_ipset_set_type(ethertype)] - cmd.extend(self.get_hashargs()) self._apply(cmd) self.ipset_sets[set_name] = [] diff --git a/neutron/tests/unit/agent/linux/test_ipset_manager.py b/neutron/tests/unit/agent/linux/test_ipset_manager.py index 19fbb7e20e6..cbd156218ff 100644 --- a/neutron/tests/unit/agent/linux/test_ipset_manager.py +++ b/neutron/tests/unit/agent/linux/test_ipset_manager.py @@ -12,9 +12,7 @@ # limitations under the License. import mock -from oslo_config import cfg -from neutron.agent.common import config as a_cfg from neutron.agent.linux import ipset_manager from neutron.tests import base @@ -27,13 +25,8 @@ FAKE_IPS = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4', class BaseIpsetManagerTest(base.BaseTestCase): - def setUp(self, maxelem=None, hashsize=None): + def setUp(self): super(BaseIpsetManagerTest, self).setUp() - cfg.CONF.register_opts(a_cfg.IPSET_OPTS, 'AGENT') - cfg.CONF.set_override('ipset_maxelem', maxelem, 'AGENT') - cfg.CONF.set_override('ipset_hashsize', hashsize, 'AGENT') - self.maxelem = maxelem - self.hashsize = hashsize self.ipset = ipset_manager.IpsetManager() self.execute = mock.patch.object(self.ipset, "execute").start() self.expected_calls = [] @@ -43,13 +36,7 @@ class BaseIpsetManagerTest(base.BaseTestCase): self.execute.assert_has_calls(self.expected_calls, any_order=False) def expect_set(self, addresses): - hash_args = [] - if self.hashsize: - hash_args.extend(['hashsize', str(self.hashsize)]) - if self.maxelem: - hash_args.extend(['maxelem', str(self.maxelem)]) - temp_input = ['create IPv4fake_sgid-new hash:ip family inet %s' % - ' '.join(hash_args)] + temp_input = ['create IPv4fake_sgid-new hash:ip family inet'] temp_input.extend('add IPv4fake_sgid-new %s' % ip for ip in addresses) input = '\n'.join(temp_input) self.expected_calls.extend([ @@ -76,14 +63,9 @@ class BaseIpsetManagerTest(base.BaseTestCase): run_as_root=True) for ip in addresses) def expect_create(self): - ipset_call = ['ipset', 'create', '-exist', TEST_SET_NAME, - 'hash:ip', 'family', 'inet'] - if self.hashsize: - ipset_call.extend(['hashsize', str(self.hashsize)]) - if self.maxelem: - ipset_call.extend(['maxelem', str(self.maxelem)]) self.expected_calls.append( - mock.call(ipset_call, + mock.call(['ipset', 'create', '-exist', TEST_SET_NAME, + 'hash:ip', 'family', 'inet'], process_input=None, run_as_root=True)) @@ -103,10 +85,6 @@ class BaseIpsetManagerTest(base.BaseTestCase): class IpsetManagerTestCase(BaseIpsetManagerTest): - """Run all tests, but with maxelem/hashsize values not configured - """ - def setUp(self): - super(IpsetManagerTestCase, self).setUp() def test_set_exists(self): self.add_first_ip() @@ -139,10 +117,3 @@ class IpsetManagerTestCase(BaseIpsetManagerTest): self.expect_destroy() self.ipset.destroy(TEST_SET_ID, ETHERTYPE) self.verify_mock_calls() - - -class IpsetManagerTestCaseHashArgs(IpsetManagerTestCase): - """Run all the above tests, but with maxelem/hashsize values configured - """ - def setUp(self): - super(IpsetManagerTestCase, self).setUp(maxelem=131072, hashsize=2048) From a6b2c22dcea73754dbfd0ef39c60ad28ab2dbb73 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Mon, 30 Mar 2015 23:52:56 -0700 Subject: [PATCH 008/292] Set IPset hash type to 'net' instead of 'ip' The previous hash type was 'ip' and this caused a major issue with the allowed address pairs extension since it results in CIDRs being passed to ipset. When the hash type is 'ip', a CIDR is completely enumerated into all of its addresses so 10.100.0.0/16 results in ~65k entries. This meant a single allowed_address_pairs entry could easily exhaust an entire set. This patch changes the hash type to 'net', which is designed to handle a CIDRs as a single entry. This patch also changes the names of the ipsets because creating an ipset with different parameters will cause an error and our ipset manager code isn't robust enough to handle that at this time. There is another ongoing patch to fix that but it won't be ready in time.[1] The related bug was closed by increasing the set limit, which did alleviate the problem. However, this change would also address the issue because the gate tests run an allowed address pairs extension test with the CIDR mentioned above. 1. I59e2e1c090cb95ee1bd14dbb53b6ff2c5e2713fd Related-Bug: #1439817 Closes-Bug: #1444397 Change-Id: I8177699b157cd3eac46e2f481f47b5d966c49b07 (cherry picked from commit a38b5df5cd3c47672705aad4c30e789ae11ec958) --- neutron/agent/linux/ipset_manager.py | 6 +++--- neutron/tests/unit/agent/linux/test_ipset_manager.py | 7 ++++--- neutron/tests/unit/agent/test_securitygroups_rpc.py | 10 +++++----- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/neutron/agent/linux/ipset_manager.py b/neutron/agent/linux/ipset_manager.py index e5ab7a01e9c..0f764185239 100644 --- a/neutron/agent/linux/ipset_manager.py +++ b/neutron/agent/linux/ipset_manager.py @@ -36,7 +36,7 @@ class IpsetManager(object): """Returns the given ipset name for an id+ethertype pair. This reference can be used from iptables. """ - name = ethertype + id + name = 'NET' + ethertype + id return name[:IPSET_NAME_MAX_LENGTH] def set_exists(self, id, ethertype): @@ -85,7 +85,7 @@ class IpsetManager(object): def _refresh_set(self, set_name, member_ips, ethertype): new_set_name = set_name + SWAP_SUFFIX set_type = self._get_ipset_set_type(ethertype) - process_input = ["create %s hash:ip family %s" % (new_set_name, + process_input = ["create %s hash:net family %s" % (new_set_name, set_type)] for ip in member_ips: process_input.append("add %s %s" % (new_set_name, ip)) @@ -101,7 +101,7 @@ class IpsetManager(object): self.ipset_sets[set_name].remove(member_ip) def _create_set(self, set_name, ethertype): - cmd = ['ipset', 'create', '-exist', set_name, 'hash:ip', 'family', + cmd = ['ipset', 'create', '-exist', set_name, 'hash:net', 'family', self._get_ipset_set_type(ethertype)] self._apply(cmd) self.ipset_sets[set_name] = [] diff --git a/neutron/tests/unit/agent/linux/test_ipset_manager.py b/neutron/tests/unit/agent/linux/test_ipset_manager.py index cbd156218ff..44840086f60 100644 --- a/neutron/tests/unit/agent/linux/test_ipset_manager.py +++ b/neutron/tests/unit/agent/linux/test_ipset_manager.py @@ -36,8 +36,9 @@ class BaseIpsetManagerTest(base.BaseTestCase): self.execute.assert_has_calls(self.expected_calls, any_order=False) def expect_set(self, addresses): - temp_input = ['create IPv4fake_sgid-new hash:ip family inet'] - temp_input.extend('add IPv4fake_sgid-new %s' % ip for ip in addresses) + temp_input = ['create NETIPv4fake_sgid-new hash:net family inet'] + temp_input.extend('add NETIPv4fake_sgid-new %s' % ip + for ip in addresses) input = '\n'.join(temp_input) self.expected_calls.extend([ mock.call(['ipset', 'restore', '-exist'], @@ -65,7 +66,7 @@ class BaseIpsetManagerTest(base.BaseTestCase): def expect_create(self): self.expected_calls.append( mock.call(['ipset', 'create', '-exist', TEST_SET_NAME, - 'hash:ip', 'family', 'inet'], + 'hash:net', 'family', 'inet'], process_input=None, run_as_root=True)) diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index feabbcbfdbd..6a4e2990f41 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -1776,7 +1776,7 @@ IPSET_FILTER_1 = """# Generated by iptables_manager [0:0] -A %(bn)s-i_port1 -s 10.0.0.2/32 -p udp -m udp --sport 67 --dport 68 \ -j RETURN [0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_port1 -m set --match-set IPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_port1 -m set --match-set NETIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback [0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ @@ -1935,7 +1935,7 @@ IPSET_FILTER_2 = """# Generated by iptables_manager [0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_%(port1)s -m set --match-set IPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_%(port1)s -m set --match-set NETIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback [0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ @@ -1963,7 +1963,7 @@ RETURN [0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_%(port2)s -m set --match-set IPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_%(port2)s -m set --match-set NETIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback [0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ @@ -2018,7 +2018,7 @@ IPSET_FILTER_2_3 = """# Generated by iptables_manager [0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_%(port1)s -m set --match-set IPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_%(port1)s -m set --match-set NETIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_%(port1)s -p icmp -j RETURN [0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback @@ -2047,7 +2047,7 @@ RETURN [0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_%(port2)s -m set --match-set IPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_%(port2)s -m set --match-set NETIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_%(port2)s -p icmp -j RETURN [0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback From e2f6902315de76a1020aa87ea161c8fdc6697ed7 Mon Sep 17 00:00:00 2001 From: Swaminathan Vasudevan Date: Tue, 14 Apr 2015 21:34:33 -0700 Subject: [PATCH 009/292] Fixes race condition and boosts the scheduling performance This patch fixes a race-condition that occurs when the scheduler tries to check for dvr serviceable ports before it schedules a router when a subnet is associated with a router. Sometimes the dhcp port creation is delayed and so the router is not scheduled to the l3-agent. Also it boosts the scheduling performance on dvr-snat node for scheduling a router. This patch will provide a work around to fix this race condition and to boost the scheduling performance by scheduling a router on a dvr-snat when dhcp is enabled on the provided subnet, instead of checking all the available ports on the subnet. Closes-Bug: #1442494 Change-Id: I089fefdd8535bdc9ed90b3230438ab0bfb6aab4f (cherry picked from commit c65d3ab6ad4589e6e4a6b488d2eb5d1e4cfee138) --- neutron/db/l3_agentschedulers_db.py | 19 ++++++++++++++ .../unit/scheduler/test_l3_agent_scheduler.py | 26 +++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index 931436bddcb..f661dcc6221 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -379,6 +379,25 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, return False core_plugin = manager.NeutronManager.get_plugin() + # NOTE(swami):Before checking for existence of dvr + # serviceable ports on the host managed by the l3 + # agent, let's verify if at least one subnet has + # dhcp enabled. If so, then the host will have a + # dvr serviceable port, which is in fact the DHCP + # port. + # This optimization is valid assuming that the L3 + # DVR_SNAT node will be the one hosting the DHCP + # Agent. + agent_conf = self.get_configuration_dict(l3_agent) + agent_mode = agent_conf.get(constants.L3_AGENT_MODE, + constants.L3_AGENT_MODE_LEGACY) + + for subnet_id in subnet_ids: + subnet_dict = core_plugin.get_subnet(context, subnet_id) + if (subnet_dict['enable_dhcp'] and ( + agent_mode == constants.L3_AGENT_MODE_DVR_SNAT)): + return True + filter = {'fixed_ips': {'subnet_id': subnet_ids}} ports = core_plugin.get_ports(context, filters=filter) for port in ports: diff --git a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py index a68d2e70874..58b53471ed3 100644 --- a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py @@ -671,6 +671,29 @@ class L3SchedulerTestBaseMixin(object): l3_agent, router['id']) self.assertFalse(val) + def test_check_ports_exist_on_l3agent_with_dhcp_enabled_subnets(self): + self._register_l3_dvr_agents() + router = self._make_router(self.fmt, + tenant_id=str(uuid.uuid4()), + name='r2') + router['external_gateway_info'] = None + router['id'] = str(uuid.uuid4()) + router['distributed'] = True + + agent_list = [self.l3_dvr_snat_agent] + subnet = {'id': str(uuid.uuid4()), + 'enable_dhcp': True} + + self.get_subnet_ids_on_router = mock.Mock( + return_value=[subnet['id']]) + + self.plugin.get_subnet = mock.Mock(return_value=subnet) + self.plugin.get_ports = mock.Mock() + val = self.check_ports_exist_on_l3agent( + self.adminContext, agent_list[0], router['id']) + self.assertTrue(val) + self.assertFalse(self.plugin.get_ports.called) + def test_check_ports_exist_on_l3agent_if_no_subnets_then_return(self): l3_agent, router = self._prepare_check_ports_exist_tests() with mock.patch.object(manager.NeutronManager, @@ -698,9 +721,12 @@ class L3SchedulerTestBaseMixin(object): 'binding:host_id': 'host_1', 'device_owner': 'compute:', 'id': 1234} + subnet = {'id': str(uuid.uuid4()), + 'enable_dhcp': False} self.plugin.get_ports.return_value = [port] self.get_subnet_ids_on_router = mock.Mock( return_value=[port['subnet_id']]) + self.plugin.get_subnet = mock.Mock(return_value=subnet) val = self.check_ports_exist_on_l3agent(self.adminContext, l3_agent, router['id']) self.assertTrue(val) From 952e556e4e6b0e25ca584481ef6745fc08ace970 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 17 Apr 2015 08:47:35 -0700 Subject: [PATCH 010/292] Register ibm-db-alembic import for DB2 migrations This patch adds the optional runtime dependency to support db2 for alembic migration, will not break the default mysql path. This is needed for the IBM DB2 third party CI to work on this project. Closes-Bug: 1442524 Change-Id: I9db112dbdc2d02df88ad676a6b21018f1d6f1724 --- neutron/db/migration/alembic_migrations/env.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/neutron/db/migration/alembic_migrations/env.py b/neutron/db/migration/alembic_migrations/env.py index 9966f55e797..7ea6c5ca51e 100644 --- a/neutron/db/migration/alembic_migrations/env.py +++ b/neutron/db/migration/alembic_migrations/env.py @@ -23,6 +23,13 @@ from sqlalchemy import event from neutron.db.migration.models import head # noqa from neutron.db import model_base +try: + # NOTE(mriedem): This is to register the DB2 alembic code which + # is an optional runtime dependency. + from ibm_db_alembic.ibm_db import IbmDbImpl # noqa # pylint: disable=unused-import +except ImportError: + pass + MYSQL_ENGINE = None From f7ae3a04b541767c638fc4c8ff1e0db78ab94996 Mon Sep 17 00:00:00 2001 From: Andreas Jaeger Date: Mon, 20 Apr 2015 11:07:37 +0200 Subject: [PATCH 011/292] Release Import of Translations from Transifex Manual import of Translations from Transifex. This change also removes all po files that are less than 66 per cent translated since such partially translated files will not help users. This updates also recreates all pot (translation source files) to reflect the state of the repository. This change needs to be done manually since the automatic import does not handle the proposed branches and we need to sync with latest translations. Change-Id: I1b7bd1773bcd12ab282e77ee0dc41c27846fb66b --- .../locale/de/LC_MESSAGES/neutron-log-info.po | 947 ----------------- .../locale/es/LC_MESSAGES/neutron-log-info.po | 948 ----------------- .../locale/fr/LC_MESSAGES/neutron-log-info.po | 954 ------------------ .../locale/it/LC_MESSAGES/neutron-log-info.po | 942 ----------------- .../locale/ja/LC_MESSAGES/neutron-log-info.po | 944 ----------------- .../ko_KR/LC_MESSAGES/neutron-log-info.po | 937 ----------------- neutron/locale/neutron-log-error.pot | 172 ++-- neutron/locale/neutron-log-info.pot | 107 +- neutron/locale/neutron.pot | 369 ++++--- .../pt_BR/LC_MESSAGES/neutron-log-info.po | 943 ----------------- .../zh_CN/LC_MESSAGES/neutron-log-info.po | 936 ----------------- .../zh_TW/LC_MESSAGES/neutron-log-info.po | 934 ----------------- 12 files changed, 359 insertions(+), 8774 deletions(-) delete mode 100644 neutron/locale/de/LC_MESSAGES/neutron-log-info.po delete mode 100644 neutron/locale/es/LC_MESSAGES/neutron-log-info.po delete mode 100644 neutron/locale/fr/LC_MESSAGES/neutron-log-info.po delete mode 100644 neutron/locale/it/LC_MESSAGES/neutron-log-info.po delete mode 100644 neutron/locale/ja/LC_MESSAGES/neutron-log-info.po delete mode 100644 neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po delete mode 100644 neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po delete mode 100644 neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po delete mode 100644 neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po diff --git a/neutron/locale/de/LC_MESSAGES/neutron-log-info.po b/neutron/locale/de/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index 7f268a3acd5..00000000000 --- a/neutron/locale/de/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,947 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -# Carsten Duch , 2014 -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-03-31 22:26+0000\n" -"Last-Translator: openstackjenkins \n" -"Language-Team: German (http://www.transifex.com/projects/p/neutron/language/" -"de/)\n" -"Language: de\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "Laden von Plug-in: %s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "" - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "Ausgelöste HTTP-Ausnahme: %s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s mit HTTP %(status)d zurückgegeben" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s hat einen Fehler zurückgegeben: %(exception)s" - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "Sicherheitsgruppenerweiterung wurde inaktiviert." - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "Vorbereiten von Filtern für Geräte %s" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "Sicherheitsgruppenregel aktualisiert %r" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "Sicherheitsgruppenmitglied aktualisiert %r" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "Provider-Regel aktualisiert" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "Gerätefilter für %r entfernen" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "Firewallregeln aktualisieren" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "DHCP-Agent gestartet" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "Synchronisation von Status" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "'agent_updated' (Agent aktualisiert) durch Serverseite %s!" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "Agent der Ebene 3 gestartet" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "Gerät %s ist bereits vorhanden" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "Versuch, ungefilterten Portfilter %s zu aktualisieren" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "Versuch, ungefilterten Portfilter %r zu entfernen" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "Erweiterungsmanager wird initialisiert." - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "Geladene Erweiterung: %s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "" -"Das Zulassen der Sortierung ist aktiviert, da die native Paginierung die " -"native Sortierung erfordert" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "OVS-Bereinigungsprozedur erfolgreich abgeschlossen" - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "Agent erfolgreich initialisiert, läuft jetzt... " - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "Protokollfunktion aktiviert!" - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "Konfigurations-Paste-Datei: %s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr "" - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"Überprüfung für CIDR: %(new_cidr)s fehlgeschlagen - Überschneidung mit " -"Teilnetz %(subnet_id)s (CIDR: %(cidr)s)" - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "Ungültige IP-Adresse in Pool gefunden: %(start)s - %(end)s:" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "" -"Angegebene IP-Adressen stimmen nicht mit der Teilnetz-IP-Version überein" - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "" -"Anfangs-IP-Adresse (%(start)s) ist größer als Ende-IP-Adresse (%(end)s)" - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "Pool gefunden, der größer ist als Teilnetz-CIDR:%(start)s - %(end)s" - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Überschneidungen bei Bereichen gefunden: %(l_range)s und %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "" -"Port %s wird übersprungen, da keine IP-Adresse auf ihm konfiguriert ist" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet backdoor hört auf %(port)s für Prozess %(pid)d" - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "" -"Überspringe periodische Aufgabe %(task)s weil der Intervall negativ ist" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Überspringe periodische Aufgabe %(task)s weil sie deaktiviert ist" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "%s abgefangen. Vorgang wird beendet" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "" -"Übergeordneter Prozess wurde unerwartet abgebrochen. Vorgang wird beendet" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "Untergeordnetes Element %s abgefangen; Vorgang wird beendet" - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "Verzweigung zu schnell; im Ruhemodus" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "Untergeordnetes Element %d gestartet" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "Starten von %d Workers" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "Untergeordnetes Element %(pid)d durch Signal %(sig)d abgebrochen" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "Untergeordnete %(pid)s mit Status %(code)d beendet" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "%s abgefangen, untergeordnete Elemente werden gestoppt" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "" - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "Warten auf Beenden von %d untergeordneten Elementen" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "Zugeordnetes VLAN (%d) aus dem Pool" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "Kein %s-Plug-in geladen" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s: %(function_name)s mit Argumenten %(args)s ignoriert" - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"Schleifeniteration hat Intervall (%(polling_interval)s contra %(elapsed)s) " -"überschritten!" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "RPC-'agent_id': %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Port %(device)s aktualisiert. Details: %(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "Gerät %s nicht für Plug-in definiert" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "Zusatzeinheit %s entfernt" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "Port %s aktualisiert." - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "RPC-Dämon für Linux-Brückenagent gestartet!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "Agent nicht synchron mit Plug-in!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "Schnittstellenzuordnungen: %s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Bereiche für Netz-VLAN: %s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "" - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "" -"Zuweisung von %(vlan_id)s als lokale VLAN-Adresse für net-id=%(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "Zurückfordern von vlan = %(vlan_id)s von net-id = %(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "" -"Zuordnung von physischem Netz %(physical_network)s zu Brücke %(bridge)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "Agententunnel nicht synchron mit Plug-in!" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "" - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "" diff --git a/neutron/locale/es/LC_MESSAGES/neutron-log-info.po b/neutron/locale/es/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index a8ef8fe790b..00000000000 --- a/neutron/locale/es/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,948 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-03-31 22:26+0000\n" -"Last-Translator: openstackjenkins \n" -"Language-Team: Spanish (http://www.transifex.com/projects/p/neutron/language/" -"es/)\n" -"Language: es\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "Cargando complementos: %s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "" - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "Excepción de HTTP emitida: %s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "Se ha devuelto %(url)s con HTTP %(status)d" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s ha devuelto un error: %(exception)s" - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "La extensión security-group se ha inhabilitado." - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "Preparando filtros para dispositivos %s" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "Se ha actualizado la regla de grupo de seguridad %r" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "Se ha actualizado el miembro de grupo de seguridad %r" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "Se ha actualizado regla de proveedor" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "Eliminar filtro de dispositivo para %r" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "Renovar reglas de cortafuegos" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "Se ha iniciado al agente DHCP" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "Sincronizando estado" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated por el lado del servidor %s!" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "Se ha iniciado al agente L3" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "El dispositivo %s ya existe" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "Se ha intentado actualizar el filtro de puerto que no está filtrado %s" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "Se ha intentado eliminar el filtro de puerto que no está filtrado %r" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "Inicializando gestor de ampliación." - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "Ampliación cargada: %s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "" -"Permitir ordenación está habilitado porque la paginación nativa requiere " -"ordenación nativa" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "La limpieza de OVS se ha completado satisfactoriamente" - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "" -"El agente se ha inicializado satisfactoriamente, ahora se está ejecutando... " - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "Registro habilitado." - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "Archivo de configuración de pegar: %s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr "" - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"Se ha encontrado un error en validación para CIDR: %(new_cidr)s; se solapa " -"con la subred %(subnet_id)s (CIDR: %(cidr)s)" - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "" -"Se ha encontrado una dirección IP no válida en la agrupación: %(start)s - " -"%(end)s:" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "" -"Las direcciones IP especificadas no coinciden con la versión de IP de subred " - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "" -"La IP de inicio (%(start)s) es mayor que la IP de finalización (%(end)s)" - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "" -"Se ha encontrado una agrupación mayor que el CIDR de subred: %(start)s - " -"%(end)s" - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Rangos de solapamiento encontrados: %(l_range)s y %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "Saltando el puerto %s, ya que no hay ninguna IP configurada en él" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet oculto escuchando en %(port)s para el proceso %(pid)d" - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "Omitiendo la tarea periódica %(task)s porque el intervalo es negativo" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Omitiendo la tarea periódica %(task)s porque está inhabilitada" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "Se ha captado %s, saliendo" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "El proceso padre se ha detenido inesperadamente, saliendo" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "Hijo captado %s, saliendo" - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "Bifurcación demasiado rápida, en reposo" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "Se ha iniciado el hijo %d" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "Iniciando %d trabajadores" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "Hijo %(pid)d matado por señal %(sig)d" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "El hijo %(pid)s ha salido con el estado %(code)d" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "Se ha captado %s, deteniendo hijos" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "" - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "En espera de %d hijos para salir" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "Vlan asignada (%d) de la agrupación" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "No se ha cargado ningún plug-in de %s" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "" -"Se ha ignorado %(plugin_key)s: %(function_name)s con los argumentos %(args)s " - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"La iteración de bucle ha superado el intervalo (%(polling_interval)s frente " -"a %(elapsed)s)." - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "agent_id de RPC: %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Se ha actualizado el puerto %(device)s. Detalles: %(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "El dispositivo %s no está definido en el plug-in" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "Se ha eliminado el adjunto %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "El puerto %s se ha actualizado." - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "Se ha iniciado el daemon RPC de agente de LinuxBridge." - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "El agente está fuera de sincronización con el plug-in." - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "Correlaciones de interfaz: %s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Rangos de VLAN de red: %s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "" - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "Asignando %(vlan_id)s como vlan local para net-id=%(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "Reclamando vlan = %(vlan_id)s de net-id = %(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "" -"Correlacionando la red física %(physical_network)s con el puente %(bridge)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "Túnel de agente fuera de sincronización con el plug-in. " - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "" - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "" diff --git a/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po b/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index 51ecece6d0f..00000000000 --- a/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,954 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -# Maxime COQUEREL , 2014 -# Patte D , 2015 -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-04-01 13:11+0000\n" -"Last-Translator: Patte D \n" -"Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" -"fr/)\n" -"Language: fr\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "Chargement du plugin core: %s" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "Le service %s est supporté par le core plugin" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "Chargement du plug-in : %s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "Chargement quota_driver: %s." - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "Service Neutron démarré, en écoute sur %(host)s:%(port)s" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "Exception HTTP générée : %s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s retourné avec HTTP %(status)d" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s a retourné une erreur : %(exception)s." - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "Extension du groupe de sécurité désactivée." - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "Préparation des filtres pour les unités %s" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "Règle de groupe de sécurité mise à jour %r" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "Membre de groupe de sécurité mis à jour %r" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "Règle de fournisseur mise à jour" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "Suppression du filtre d'unités pour %r" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "Régénération des règles de pare-feu" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "Port %(port_id)s n'est pas présent dans le pont %(br_name)s" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "Agent DHCP démarré" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "Etat de synchronisation" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "Etat de synchronisation complet" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated au niveau du serveur %s !" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "Agent de niveau 3 démarré" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "Le processus est exécuté avec uid/gid: %(uid)s/%(gid)s" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "L'unité %s existe déjà" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "" -"Tentative effectuée de mise à jour du filtre de ports (sans filtrage %s)" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "" -"Tentative effectuée de suppression du filtre de ports (sans filtrage %r)" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "Initialisation du gestionnaire d'extension." - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "Extension chargée : %s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "" -"Autorisation de tri activée car la mise en page native nécessite le tri natif" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "Échec %(action)s (Erreur client): %(exc)s" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "Le nettoyage d'OVS s'est terminé avec succès." - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "Agent initialisé avec succès, en cours d'exécution... " - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "Consignation activée !" - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "Config du fichier de collage : %s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr "IPv6 n'est pas activé sur le système." - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"La validation du routage CIDR %(new_cidr)s a échoué : il chevauche le sous-" -"réseau %(subnet_id)s (CIDR : %(cidr)s) " - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "Adresse IP non valide trouvée dans le pool : %(start)s - %(end)s :" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "" -"Les adresses IP spécifiées ne correspondent à la version IP du sous-réseau" - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "" -"L'adresse IP de début (%(start)s) est supérieure à l'adresse IP de fin " -"(%(end)s)." - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "" -"Un pool plus volumineux que le routage CIDR de sous-réseau %(start)s - " -"%(end)s a été trouvé." - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Chevauchement d'intervalles trouvés : %(l_range)s et %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "Ignorer le port %s car aucune adresse IP n'est configurée" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d" - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "Tâche périodique %(task)s ignorée car son intervalle est négatif" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Tâche périodique %(task)s car elle est désactivée" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "%s interceptée, sortie" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "Processus parent arrêté de manière inattendue, sortie" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "L'enfant a reçu %s, sortie" - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "Bifurcation trop rapide, pause" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "Enfant démarré %d" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "Démarrage des travailleurs %d" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "Enfant %(pid)d arrêté par le signal %(sig)d" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "Processus fils %(pid)s terminé avec le status %(code)d" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "%s interceptée, arrêt de l'enfant" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "Pause demandée après suppression de thread. Nettoyage." - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "En attente %d enfants pour sortie" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "Réseau VLAN alloué (%d) depuis le pool" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "Aucun plug-in %s chargé" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s : %(function_name)s avec les arguments %(args)s ignoré" - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"L'itération de boucle a dépassé l'intervalle (%(polling_interval)s contre " -"%(elapsed)s) !" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "agent_id RPC : %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Port %(device)s mis à jour. Détails : %(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "Unité %s non définie sur le plug-in" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "Connexion %s retirée" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "Port %s mis à jour." - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "Serveur démon RPC de l'agent LinuxBridge démarré !" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "Agent non synchronisé avec le plug-in !" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "Mappages d'interface : %s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "Tentative %(count)s de liaison port %(port)s" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "Le port %s a été effacé en même temps" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "Le sous-réseau %s a été effacé en même temps" - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" -"L'information de liaison pour le port %s n'a pas été trouvée, elle peut déjà " -"avoir été effacée." - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Plages de réseau local virtuel de réseau : %s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "Nom du réseau changé en %s" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "Nom de port changé en %s" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "Démarrage du service de l'agent APIC" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "service de l'agent APIC démarré" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "Agent initialisé avec succès, en cours d'exécution..." - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "" -"Affectation de %(vlan_id)s comme réseau local virtuel pour net-id = " -"%(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "" -"Récupération du réseau local virtuel = %(vlan_id)s à partir de net-id = " -"%(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "Ajout %s à la liste de ponts." - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "Mappage du réseau physique %(physical_network)s sur le pont %(bridge)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "Configuration complète de l'équipement %s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "Port auxiliaire %s ajouté" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "Tunnel d'agent désynchronisé avec le plug-in !" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "Aucun équipement avec MAC %s défini sur l'agent." - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "Appareil avec adresse MAC %s non-défini dans le plugin" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "Retrait de l'appareil ayant pour mac_address %s" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "Mappages d'Équipements Physiques: %s" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "Equipements exclus: %s" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "Agent %s déjà présent" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "Le plugin avait déjà lancé les RPC dans le processus parent." - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" -"Le fournisseur par défaut n'est pas spécifié pour le type de service %s" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "Chargement du pilote de Mesures %s" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "Chargement de pilote d'interface %s" diff --git a/neutron/locale/it/LC_MESSAGES/neutron-log-info.po b/neutron/locale/it/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index 1f2fd145644..00000000000 --- a/neutron/locale/it/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,942 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -# PierAlberto , 2014 -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-03-31 22:26+0000\n" -"Last-Translator: openstackjenkins \n" -"Language-Team: Italian (http://www.transifex.com/projects/p/neutron/language/" -"it/)\n" -"Language: it\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=2; plural=(n != 1);\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "Caricamento plugin: %s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "" - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "Generata eccezione HTTP: %s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s restituito con HTTP %(status)d" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s ha restituito un errore: %(exception)s" - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "Estensione di security-group disabilitata." - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "Preparazione filtri per i dispositivi %s" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "Regola gruppo di sicurezza aggiornata %r" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "Membro gruppo di sicurezza aggiornato %r" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "Provider regola aggiornato" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "Rimuovi filtro dispositivo per %r" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "Aggiorna regole firewall" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "Agent DHCP avviato" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "Stato sincronizzazione" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated dal lato server %s!" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "Agent L3 avviato" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "L'unità %s già esiste" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "Tentativo di aggiornare il filtro della porta che non è filtrata %s" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "Tentativo di rimuovere il filtro della porta che non è filtrata %r" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "Inizializzazione gestore estensioni." - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "Estensione caricata: %s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "" -"Consenti ordinamento è abilitato in quanto la paginaziona nativa richiede " -"l'ordinamento nativo" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "Ripulitura di OVS completata correttamente" - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "Agent inizializzato correttamente, ora in esecuzione... " - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "Accesso abilitato!" - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "Configurazione file paste: %s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr "" - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"Convalida per CIDR: %(new_cidr)s non riuscita - si sovrappone con la " -"sottorete %(subnet_id)s (CIDR: %(cidr)s)" - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "Trovato un indirizzo IP invalido nel pool: %(start)s - %(end)s:" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "" -"Gli indirizzi IP specificati non corrispondono alla versione IP della " -"sottorete" - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "L'IP iniziale (%(start)s) è superiore all'IP finale (%(end)s)" - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "Trovato un pool più grande della sottorete CIDR:%(start)s - %(end)s" - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Trovati gli intervalli di sovrapposizione: %(l_range)s e %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "La porta %s viene ignorata in quanto non ha nessun IP configurato" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Ascolto di eventlet backdoor su %(port)s per il processo %(pid)d" - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "" -"Abbadono dell'attività periodica %(task)s perché l'intervalo è negativo" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Abbadono dell'attività periodica %(task)s perché è disabilitata" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "Rilevato %s, esistente" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "Il processo principale è stato interrotto inaspettatamente, uscire" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "Cogliere Child %s, uscendo" - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "Sblocco troppo veloce, attendere" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "Child avviato %d" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "Avvio %d operatori" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "Child %(pid)d interrotto dal segnale %(sig)d" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "Child %(pid)s terminato con stato %(code)d" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "Intercettato %s, arresto in corso dei children" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "" - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "In attesa %d degli elementi secondari per uscire" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "vlan (%d) allocata dal pool" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "Nessun plugin %s caricato" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s: %(function_name)s con argomenti %(args)s ignorato" - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"L'iterazione loop supera l'intervallo (%(polling_interval)s vs. %(elapsed)s)!" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "agent-id RPC: %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Porta %(device)s aggiornata. Dettagli: %(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "Unità %s non definita nel plugin" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "Collegamento %s rimosso" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "Porta %s aggiornata." - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "LinuxBridge Agent RPC Daemon avviato!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "Agent non sincronizzato con il plugin!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "Associazioni interfaccia: %s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Intervalli di rete VLAN: %s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "" - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "Assegnazione %(vlan_id)s come vlan locale per net-id=%(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "Recupero vlan = %(vlan_id)s da net-id = %(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "Associazione rete fisica %(physical_network)s al bridge %(bridge)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "Il tunnel agent non è sincronizzato con il plugin!" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "" - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "" diff --git a/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po b/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index dd818e5b59e..00000000000 --- a/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,944 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -# Sasuke(Kyohei MORIYAMA) <>, 2014 -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-03-31 22:26+0000\n" -"Last-Translator: openstackjenkins \n" -"Language-Team: Japanese (http://www.transifex.com/projects/p/neutron/" -"language/ja/)\n" -"Language: ja\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=1; plural=0;\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "プラグインの読み込み中: %s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "" - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP 例外がスローされました: %s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "HTTP %(status)d の %(url)s が返されました" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s が障害を返しました: %(exception)s" - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "security-group 拡張を無効にしました。" - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "デバイス %s のフィルターを準備中" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "セキュリティー・グループ・ルールが %r を更新しました" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "セキュリティー・グループ・メンバーが %r を更新しました" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "プロバイダー・ルールが更新されました" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "%r のデバイス・フィルターを削除" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "ファイアウォール・ルールの最新表示" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "DHCP エージェントが始動しました" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "状態の同期中" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "サーバー・サイド %s による agent_updated!" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "L3 エージェントが始動しました" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "デバイス %s は既に存在します" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "フィルター処理されていないポート・フィルター %s を更新しようとしました" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "フィルター処理されていないポート・フィルター %r を削除しようとしました" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "拡張マネージャーを初期化しています。" - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "拡張をロードしました: %s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "" -"ネイティブ・ページ編集にはネイティブ・ソートが必要なため、ソートの許可が有効" -"になっています" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "OVS のクリーンアップが正常に完了しました" - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "エージェントが正常に初期化されました。現在実行中です... " - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "ロギングは有効です" - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "構成貼り付けファイル: %s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr " このシステムでは、 IPv6が有効ではありません。" - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"CIDR %(new_cidr)s の検証が失敗しました。サブネット %(subnet_id)s (CIDR: " -"%(cidr)s) とオーバーラップしています" - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "プールで無効な IP アドレスが見つかりました: %(start)s から %(end)s:" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "指定された IP アドレスが、サブネット IP バージョンと一致しません" - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "開始 IP (%(start)s) が終了 IP (%(end)s) より大きくなっています" - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "" -"サブネット CIDR より大きいプールが見つかりました: %(start)s から %(end)s" - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "オーバーラップする範囲が見つかりました: %(l_range)s から %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "ポート %s には IP が構成されていないため、このポートをスキップします" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet backdoorは、プロセス%(pid)dの%(port)sをリスニングしています。" - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "タスクの間隔が負であるため、定期タスク %(task)s をスキップしています" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "タスクが使用不可であるため、定期タスク %(task)s をスキップしています" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "%s が見つかりました。終了しています" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "親プロセスが予期せずに停止しました。終了しています" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "" - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "fork が早すぎます。スリープ状態にしています" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "子 %d を開始しました" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "%d ワーカーを開始しています" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "子 %(pid)d がシグナル %(sig)d によって強制終了されました" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "子 %(pid)s が状況 %(code)d で終了しました" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "%s が見つかりました。子を停止しています" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "" - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "%d 個の子で終了を待機しています" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "プールからの割り振り済み VLAN (%d)" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "%s プラグインはロードされませんでした" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "" -"%(plugin_key)s: 引数 %(args)s が指定された %(function_name)s は無視されます" - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"ループ反復が間隔を超えました (%(polling_interval)s に対して %(elapsed)s)。" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "RPC agent_id: %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "ポート %(device)s が更新されました。詳細: %(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "デバイス %s がプラグインで定義されていません" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "接続機構 %s が削除されました" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "ポート %s が更新されました。" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "LinuxBridge Agent RPC デーモンが開始しました。" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "エージェントがプラグインと非同期です。" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "インターフェース・マッピング: %s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "ネットワーク VLAN の範囲: %s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "" - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "" -"%(vlan_id)s を net-id=%(net_uuid)s のローカル VLAN として割り当てています" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "VLAN = %(vlan_id)s を net-id = %(net_uuid)s から再利用中" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "" -"物理ネットワーク %(physical_network)s をブリッジ %(bridge)s にマップしていま" -"す" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "エージェント・トンネルがプラグインと非同期です" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "" - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "" diff --git a/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index ca98207a36c..00000000000 --- a/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,937 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-03-31 22:26+0000\n" -"Last-Translator: openstackjenkins \n" -"Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/neutron/" -"language/ko_KR/)\n" -"Language: ko_KR\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=1; plural=0;\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "로딩 플러그인: %s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "" - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP 예외 처리: %s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s이(가) HTTP %(status)d(으)로 리턴되었음" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s이(가) 결함을 리턴함: %(exception)s" - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "보안 그룹 확장을 사용하지 않습니다. " - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "%s 디바이스에 대한 필터 준비" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "보안 그룹 규칙이 %r을(를) 업데이트함" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "보안 그룹 멤버가 %r을(를) 업데이트함" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "제공자 규칙이 업데이트됨" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "%r의 디바이스 필터 제거" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "방화벽 규칙 새로 고치기" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "DHCP 에이전트가 시작됨" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "상태 동기화 중" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "서버측 %s!에 의한 agent_updated" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "L3 에이전트가 시작됨" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "%s 디바이스가 이미 존재함" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "필터링된 %s이(가) 아닌 포트 필터를 업데이트하려고 시도함" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "필터링된 %r이(가) 아닌 포트 필터를 제거하려고 시도함" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "확장기능 관리자를 초기화 중입니다. " - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "로드된 확장: %s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "" -"네이티브 페이지 번호 매기기에 네이티브 정렬이 필요하므로 정렬을 사용할 수 있" -"음" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "OVS 정리가 완료됨" - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "에이전트가 초기화되었으며, 지금 실행 중... " - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "로깅 사용!" - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "구성 붙여넣기 파일: %s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr "" - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"CIDR %(new_cidr)s 유효성 검증 실패 - 서브넷 %(subnet_id)s(CIDR: %(cidr)s)과" -"(와) 겹침" - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "풀에서 올바르지 않은 IP 주소 발견: %(start)s - %(end)s:" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "지정된 IP 주소가 서브넷 IP 버전과 일치하지 않음" - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "시작 IP(%(start)s)가 끝 IP(%(end)s)보다 큼" - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "서브넷 CIDR보다 큰 풀 발견: %(start)s - %(end)s" - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "겹치는 범위 발견: %(l_range)s 및 %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "구성된 IP가 없어서 포트 %s을(를) 건너뜀" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet 백도어는 프로세스 %(pid)d 일 동안 %(port)s에서 수신" - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "간격이 음수이기 때문에 주기적 태스크 %(task)s을(를) 건너뜀" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "사용 안하기 때문에 주기적 태스크 %(task)s을(를) 건너뜀" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "%s 발견, 종료 중" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "상위 프로세스가 예기치 않게 정지했습니다. 종료 중" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "자식으로 된 %s가 존재함." - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "포크가 너무 빠름. 정지 중" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "%d 하위를 시작했음" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "%d 작업자 시작 중" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "%(pid)d 하위가 %(sig)d 신호에 의해 강제 종료됨" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "%(pid)s 하위가 %(code)d 상태와 함께 종료했음" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "%s 발견, 하위 중지 중" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "" - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "%d 하위에서 종료하기를 대기 중임" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "풀에서 할당된 vlan(%d)" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "로드된 %s 플러그인이 없음" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s: %(args)s 인수를 갖는 %(function_name)s이(가) 무시됨" - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "루프 반복이 간격을 초과했습니다(%(polling_interval)s 대 %(elapsed)s)!" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "RPC agent_id: %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "%(device)s 포트가 업데이트되었습니다. 세부사항: %(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "%s 디바이스가 플러그인에서 정의되지 않음" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "첨부 %s이(가) 제거됨" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "%s 포트가 업데이트되었습니다. " - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "LinuxBridge 에이전트 RPC 디먼이 시작되었습니다!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "에이전트가 플러그인과 동기화되지 않았습니다!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "인터페이스 맵핑: %s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "형식 드라이버 이름을 설정했습니다: %s" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "형식 드라이버 이름을 불러왔습니다: %s" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "등록된 형식: %s" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "network_types를 임대합니다: %s" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "'%s' 형식 드라이버 초기화중" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "매커니즘 드라이버 이름을 설정했습니다: %s" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "매커니즘 드라이버 이름을 불러왔습니다: %s" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "모듈러 L2 플러그인 초기화를 완료했습니다" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "네트워크 VLAN 범위: %s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "" - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "%(vlan_id)s을(를) net-id=%(net_uuid)s에 대한 로컬 vlan으로 지정 중" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "net-id = %(net_uuid)s에서 vlan = %(vlan_id)s 재확보 중" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "실제 네트워크 %(physical_network)s을(를) 브릿지 %(bridge)s에 맵핑 중" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "에이전트 터널이 플러그인과 동기화되지 않았습니다!" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "" - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "" diff --git a/neutron/locale/neutron-log-error.pot b/neutron/locale/neutron-log-error.pot index e3d529db988..fbef98f6c96 100644 --- a/neutron/locale/neutron-log-error.pot +++ b/neutron/locale/neutron-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.1.dev240\n" +"Project-Id-Version: neutron 2015.1.dev1.g2add4e5\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" +"POT-Creation-Date: 2015-04-20 11:03+0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -89,7 +89,7 @@ msgstr "" msgid "Timed out retrieving ofport on port %(pname)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:489 +#: neutron/agent/common/ovs_lib.py:506 #, python-format msgid "OVS flows could not be applied on bridge %s" msgstr "" @@ -113,56 +113,56 @@ msgstr "" msgid "Network %s info call failed." msgstr "" -#: neutron/agent/dhcp/agent.py:577 neutron/agent/l3/agent.py:617 -#: neutron/agent/metadata/agent.py:304 +#: neutron/agent/dhcp/agent.py:577 neutron/agent/l3/agent.py:606 +#: neutron/agent/metadata/agent.py:311 #: neutron/plugins/hyperv/agent/l2_agent.py:94 #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:108 #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:779 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:276 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:284 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:129 #: neutron/services/metering/agents/metering_agent.py:283 msgid "Failed reporting state!" msgstr "" -#: neutron/agent/l3/agent.py:173 neutron/tests/unit/test_l3_agent.py:1914 +#: neutron/agent/l3/agent.py:172 neutron/tests/unit/agent/l3/test_agent.py:2084 #, python-format msgid "Error importing interface driver '%s'" msgstr "" -#: neutron/agent/l3/agent.py:234 neutron/agent/linux/dhcp.py:786 +#: neutron/agent/l3/agent.py:232 neutron/agent/linux/dhcp.py:787 msgid "An interface driver must be specified" msgstr "" -#: neutron/agent/l3/agent.py:239 +#: neutron/agent/l3/agent.py:237 msgid "Router id is required if not using namespaces." msgstr "" -#: neutron/agent/l3/agent.py:246 +#: neutron/agent/l3/agent.py:244 #, python-format msgid "%s used in config as ipv6_gateway is not a valid IPv6 link-local address." msgstr "" -#: neutron/agent/l3/agent.py:387 +#: neutron/agent/l3/agent.py:379 #, python-format msgid "The external network bridge '%s' does not exist" msgstr "" -#: neutron/agent/l3/agent.py:447 +#: neutron/agent/l3/agent.py:433 #, python-format msgid "Failed to fetch router information for '%s'" msgstr "" -#: neutron/agent/l3/agent.py:471 +#: neutron/agent/l3/agent.py:457 #, python-format msgid "Removing incompatible router '%s'" msgstr "" -#: neutron/agent/l3/agent.py:475 +#: neutron/agent/l3/agent.py:461 #, python-format msgid "Failed to process compatible router '%s'" msgstr "" -#: neutron/agent/l3/agent.py:524 +#: neutron/agent/l3/agent.py:513 msgid "Failed synchronizing routers due to RPC error" msgstr "" @@ -182,7 +182,7 @@ msgstr "" msgid "DVR: removed snat failed" msgstr "" -#: neutron/agent/l3/dvr_router.py:500 +#: neutron/agent/l3/dvr_router.py:505 msgid "Missing subnet/agent_gateway_port" msgstr "" @@ -239,28 +239,28 @@ msgstr "" msgid "Pidfile %s already exist. Daemon already running?" msgstr "" -#: neutron/agent/linux/dhcp.py:792 +#: neutron/agent/linux/dhcp.py:793 #, python-format msgid "Error importing interface driver '%(driver)s': %(inner)s" msgstr "" -#: neutron/agent/linux/external_process.py:222 +#: neutron/agent/linux/external_process.py:224 #, python-format msgid "" "%(service)s for %(resource_type)s with uuid %(uuid)s not found. The " "process should not have died" msgstr "" -#: neutron/agent/linux/external_process.py:242 +#: neutron/agent/linux/external_process.py:244 #, python-format msgid "respawning %(service)s for uuid %(uuid)s" msgstr "" -#: neutron/agent/linux/external_process.py:248 +#: neutron/agent/linux/external_process.py:250 msgid "Exiting agent as programmed in check_child_processes_actions" msgstr "" -#: neutron/agent/linux/external_process.py:259 +#: neutron/agent/linux/external_process.py:261 #, python-format msgid "" "Exiting agent because of a malfunction with the %(service)s process " @@ -283,7 +283,7 @@ msgstr "" msgid "Failed unplugging interface '%s'" msgstr "" -#: neutron/agent/linux/ip_lib.py:673 +#: neutron/agent/linux/ip_lib.py:678 #, python-format msgid "Failed sending gratuitous ARP to %(addr)s on %(iface)s in namespace %(ns)s" msgstr "" @@ -317,17 +317,17 @@ msgstr "" msgid "Error received from ovsdb monitor: %s" msgstr "" -#: neutron/agent/linux/utils.py:219 +#: neutron/agent/linux/utils.py:220 #, python-format msgid "Unable to convert value in %s" msgstr "" -#: neutron/agent/metadata/agent.py:109 +#: neutron/agent/metadata/agent.py:116 #: neutron/agent/metadata/namespace_proxy.py:56 msgid "Unexpected error." msgstr "" -#: neutron/agent/ovsdb/impl_idl.py:103 +#: neutron/agent/ovsdb/impl_idl.py:106 #, python-format msgid "OVSDB Error: %s" msgstr "" @@ -470,11 +470,17 @@ msgstr "" #: neutron/cmd/sanity_check.py:126 msgid "" +"Check for Open vSwitch support of ARP header matching failed. ARP " +"spoofing suppression will not work. A newer version of OVS is required." +msgstr "" + +#: neutron/cmd/sanity_check.py:135 +msgid "" "Check for VF management support failed. Please ensure that the version of" " ip link being used has VF support." msgstr "" -#: neutron/cmd/sanity_check.py:136 +#: neutron/cmd/sanity_check.py:145 msgid "Check for native OVSDB support failed." msgstr "" @@ -483,11 +489,11 @@ msgstr "" msgid "Unexpected exception while checking supported feature via command: %s" msgstr "" -#: neutron/cmd/sanity/checks.py:119 +#: neutron/cmd/sanity/checks.py:129 msgid "Unexpected exception while checking supported ip link command" msgstr "" -#: neutron/cmd/sanity/checks.py:165 +#: neutron/cmd/sanity/checks.py:175 #, python-format msgid "" "Failed to import required modules. Ensure that the python-openvswitch " @@ -506,12 +512,12 @@ msgid "" "%(agent)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:926 neutron/plugins/ml2/plugin.py:552 +#: neutron/db/db_base_plugin_v2.py:931 neutron/plugins/ml2/plugin.py:559 #, python-format msgid "An exception occurred while creating the %(resource)s:%(item)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1689 +#: neutron/db/db_base_plugin_v2.py:1699 #, python-format msgid "Unable to generate mac address after %s attempts" msgstr "" @@ -535,6 +541,10 @@ msgstr "" msgid "Exception encountered during router rescheduling." msgstr "" +#: neutron/db/l3_db.py:539 +msgid "Cannot have multiple IPv4 subnets on router port" +msgstr "" + #: neutron/db/metering/metering_rpc.py:47 #, python-format msgid "Unable to find agent %s." @@ -660,7 +670,7 @@ msgid "" "the roll back. of a remove_router_interface operation" msgstr "" -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:661 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:679 #: neutron/services/l3_router/l3_sdnve.py:203 #, python-format msgid "Delete floatingip failed in SDN-VE: %s" @@ -674,8 +684,8 @@ msgid "" msgstr "" #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:255 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1617 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1629 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1712 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1724 #, python-format msgid "%s Agent terminated!" msgstr "" @@ -730,152 +740,152 @@ msgstr "" msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" msgstr "" -#: neutron/plugins/ml2/db.py:242 neutron/plugins/ml2/db.py:328 -#: neutron/plugins/ml2/plugin.py:1303 +#: neutron/plugins/ml2/db.py:242 neutron/plugins/ml2/db.py:327 +#: neutron/plugins/ml2/plugin.py:1310 #, python-format msgid "Multiple ports have port_id starting with %s" msgstr "" -#: neutron/plugins/ml2/managers.py:56 +#: neutron/plugins/ml2/managers.py:57 #, python-format msgid "" "Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s'" " is already registered for type '%(type)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:72 +#: neutron/plugins/ml2/managers.py:73 #, python-format msgid "No type driver for tenant network_type: %s. Service terminated!" msgstr "" -#: neutron/plugins/ml2/managers.py:144 +#: neutron/plugins/ml2/managers.py:145 #, python-format msgid "Network %s has no segments" msgstr "" -#: neutron/plugins/ml2/managers.py:227 neutron/plugins/ml2/managers.py:254 +#: neutron/plugins/ml2/managers.py:228 neutron/plugins/ml2/managers.py:255 #, python-format msgid "Failed to release segment '%s' because network type is not supported." msgstr "" -#: neutron/plugins/ml2/managers.py:326 +#: neutron/plugins/ml2/managers.py:330 #, python-format msgid "Mechanism driver '%(name)s' failed in %(method)s" msgstr "" -#: neutron/plugins/ml2/managers.py:612 neutron/plugins/ml2/managers.py:674 +#: neutron/plugins/ml2/managers.py:616 neutron/plugins/ml2/managers.py:678 #, python-format msgid "Failed to bind port %(port)s on host %(host)s" msgstr "" -#: neutron/plugins/ml2/managers.py:627 +#: neutron/plugins/ml2/managers.py:631 #, python-format msgid "" "Exceeded maximum binding levels attempting to bind port %(port)s on host " "%(host)s" msgstr "" -#: neutron/plugins/ml2/managers.py:670 +#: neutron/plugins/ml2/managers.py:674 #, python-format msgid "Mechanism driver %s failed in bind_port" msgstr "" -#: neutron/plugins/ml2/managers.py:741 +#: neutron/plugins/ml2/managers.py:745 #, python-format msgid "Extension driver '%(name)s' failed in %(method)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:275 +#: neutron/plugins/ml2/plugin.py:282 #, python-format msgid "Failed to commit binding results for %(port)s after %(max)s tries" msgstr "" -#: neutron/plugins/ml2/plugin.py:432 +#: neutron/plugins/ml2/plugin.py:439 #, python-format msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:443 +#: neutron/plugins/ml2/plugin.py:450 #, python-format msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:529 +#: neutron/plugins/ml2/plugin.py:536 #, python-format msgid "Could not find %s to delete." msgstr "" -#: neutron/plugins/ml2/plugin.py:532 +#: neutron/plugins/ml2/plugin.py:539 #, python-format msgid "Could not delete %(res)s %(id)s." msgstr "" -#: neutron/plugins/ml2/plugin.py:565 +#: neutron/plugins/ml2/plugin.py:572 #, python-format msgid "" "mechanism_manager.create_%(res)s_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:611 +#: neutron/plugins/ml2/plugin.py:618 #, python-format msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:681 +#: neutron/plugins/ml2/plugin.py:688 #, python-format msgid "Exception auto-deleting port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:693 +#: neutron/plugins/ml2/plugin.py:700 #, python-format msgid "Exception auto-deleting subnet %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:775 +#: neutron/plugins/ml2/plugin.py:782 msgid "mechanism_manager.delete_network_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:796 +#: neutron/plugins/ml2/plugin.py:803 #, python-format msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:901 +#: neutron/plugins/ml2/plugin.py:908 #, python-format msgid "Exception deleting fixed_ip from port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:910 +#: neutron/plugins/ml2/plugin.py:917 msgid "mechanism_manager.delete_subnet_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:975 +#: neutron/plugins/ml2/plugin.py:982 #, python-format msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:987 +#: neutron/plugins/ml2/plugin.py:994 #, python-format msgid "_bind_port_if_needed failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1017 +#: neutron/plugins/ml2/plugin.py:1024 #, python-format msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1162 +#: neutron/plugins/ml2/plugin.py:1169 #, python-format msgid "No Host supplied to bind DVR Port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1284 +#: neutron/plugins/ml2/plugin.py:1291 #, python-format msgid "mechanism_manager.delete_port_postcommit failed for port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1316 +#: neutron/plugins/ml2/plugin.py:1323 #, python-format msgid "Binding info for DVR port %s not found" msgstr "" @@ -1045,102 +1055,102 @@ msgstr "" msgid "a different subnet %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:348 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:356 msgid "No tunnel_type specified, cannot create tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:351 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:374 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:359 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:382 #, python-format msgid "tunnel_type %s not supported by agent" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:367 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:375 msgid "No tunnel_ip specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:371 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:379 msgid "No tunnel_type specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:509 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:517 #, python-format msgid "No local VLAN available for net-id=%s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:548 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:556 #, python-format msgid "" "Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " "tunneling disabled" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:567 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:575 #, python-format msgid "" "Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:595 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:603 #, python-format msgid "" "Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:604 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:612 #, python-format msgid "" "Cannot provision unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:660 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:668 #, python-format msgid "" "Cannot reclaim unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:805 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:855 msgid "" "Failed to create OVS patch port. Cannot have tunneling enabled on this " "agent, since this version of OVS does not support tunnels or patch ports." " Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:932 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:982 #, python-format msgid "" "Bridge %(bridge)s for physical network %(physical_network)s does not " "exist. Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1084 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1174 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1286 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1379 #, python-format msgid "" "process_network_ports - iteration:%d - failure while retrieving port " "details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1315 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1408 #, python-format msgid "" "process_ancillary_network_ports - iteration:%d - failure while retrieving" " port details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1461 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1554 msgid "Error while synchronizing tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1531 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1625 msgid "Error while processing VIF ports" msgstr "" diff --git a/neutron/locale/neutron-log-info.pot b/neutron/locale/neutron-log-info.pot index 6a23a3540be..12dacb5febd 100644 --- a/neutron/locale/neutron-log-info.pot +++ b/neutron/locale/neutron-log-info.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.1.dev240\n" +"Project-Id-Version: neutron 2015.1.dev1.g2add4e5\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" +"POT-Creation-Date: 2015-04-20 11:03+0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -122,7 +122,7 @@ msgstr "" msgid "No ports here to refresh firewall" msgstr "" -#: neutron/agent/common/ovs_lib.py:393 +#: neutron/agent/common/ovs_lib.py:410 #, python-format msgid "Port %(port_id)s not present in bridge %(br_name)s" msgstr "" @@ -139,13 +139,13 @@ msgstr "" msgid "Synchronizing state complete" msgstr "" -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 +#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:611 #: neutron/services/metering/agents/metering_agent.py:286 #, python-format msgid "agent_updated by server side %s!" msgstr "" -#: neutron/agent/l3/agent.py:551 +#: neutron/agent/l3/agent.py:540 msgid "L3 agent started" msgstr "" @@ -166,7 +166,7 @@ msgstr "" msgid "Process runs with uid/gid: %(uid)s/%(gid)s" msgstr "" -#: neutron/agent/linux/dhcp.py:656 +#: neutron/agent/linux/dhcp.py:657 #, python-format msgid "" "Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is " @@ -234,7 +234,7 @@ msgstr "" #: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1729 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 msgid "Agent initialized successfully, now running... " msgstr "" @@ -311,7 +311,7 @@ msgstr "" msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1507 +#: neutron/db/db_base_plugin_v2.py:1517 #, python-format msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" msgstr "" @@ -322,7 +322,7 @@ msgid "" "rescheduling is disabled." msgstr "" -#: neutron/db/l3_db.py:1114 +#: neutron/db/l3_db.py:1178 #, python-format msgid "Skipping port %s as no IP is configure on it" msgstr "" @@ -332,12 +332,12 @@ msgstr "" msgid "Centralizing distributed router %s is not supported" msgstr "" -#: neutron/db/l3_dvr_db.py:535 +#: neutron/db/l3_dvr_db.py:539 #, python-format msgid "Agent Gateway port does not exist, so create one: %s" msgstr "" -#: neutron/db/l3_dvr_db.py:619 +#: neutron/db/l3_dvr_db.py:623 #, python-format msgid "SNAT interface port list does not exist, so create one: %s" msgstr "" @@ -370,6 +370,10 @@ msgstr "" msgid "%d probe(s) deleted" msgstr "" +#: neutron/extensions/vlantransparent.py:45 +msgid "Disabled vlantransparent extension." +msgstr "" + #: neutron/notifiers/nova.py:266 #, python-format msgid "Nova event response: %s" @@ -564,7 +568,7 @@ msgid "RPC agent_id: %s" msgstr "" #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1245 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 #, python-format msgid "Port %(device)s updated. Details: %(details)s" @@ -576,14 +580,14 @@ msgid "Device %s not defined on plugin" msgstr "" #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1303 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1320 #, python-format msgid "Attachment %s removed" msgstr "" #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1332 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 #, python-format msgid "Port %s updated." @@ -594,7 +598,7 @@ msgid "LinuxBridge Agent RPC Daemon Started!" msgstr "" #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1522 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 msgid "Agent out of sync with plugin!" msgstr "" @@ -610,111 +614,111 @@ msgstr "" msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s" msgstr "" -#: neutron/plugins/ml2/managers.py:43 +#: neutron/plugins/ml2/managers.py:44 #, python-format msgid "Configured type driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:48 +#: neutron/plugins/ml2/managers.py:49 #, python-format msgid "Loaded type driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:64 +#: neutron/plugins/ml2/managers.py:65 #, python-format msgid "Registered types: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:75 +#: neutron/plugins/ml2/managers.py:76 #, python-format msgid "Tenant network_types: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:161 +#: neutron/plugins/ml2/managers.py:162 #, python-format msgid "Initializing driver for type '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:270 +#: neutron/plugins/ml2/managers.py:271 #, python-format msgid "Configured mechanism driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:276 +#: neutron/plugins/ml2/managers.py:277 #, python-format msgid "Loaded mechanism driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:288 +#: neutron/plugins/ml2/managers.py:289 #, python-format msgid "Registered mechanism drivers: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:293 +#: neutron/plugins/ml2/managers.py:294 #, python-format msgid "Initializing mechanism driver '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:699 +#: neutron/plugins/ml2/managers.py:703 #, python-format msgid "Configured extension driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:705 +#: neutron/plugins/ml2/managers.py:709 #, python-format msgid "Loaded extension driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:716 +#: neutron/plugins/ml2/managers.py:720 #, python-format msgid "Registered extension drivers: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:722 +#: neutron/plugins/ml2/managers.py:726 #, python-format msgid "Initializing extension driver '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:730 +#: neutron/plugins/ml2/managers.py:734 #, python-format msgid "Got %(alias)s extension from driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:779 +#: neutron/plugins/ml2/managers.py:783 #, python-format msgid "Extended network dict for driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:786 +#: neutron/plugins/ml2/managers.py:790 #, python-format msgid "Extended subnet dict for driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:793 +#: neutron/plugins/ml2/managers.py:797 #, python-format msgid "Extended port dict for driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:144 +#: neutron/plugins/ml2/plugin.py:151 msgid "Modular L2 Plugin initialization complete" msgstr "" -#: neutron/plugins/ml2/plugin.py:281 +#: neutron/plugins/ml2/plugin.py:288 #, python-format msgid "Attempt %(count)s to bind port %(port)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:678 +#: neutron/plugins/ml2/plugin.py:685 #, python-format msgid "Port %s was deleted concurrently" msgstr "" -#: neutron/plugins/ml2/plugin.py:689 +#: neutron/plugins/ml2/plugin.py:696 #, python-format msgid "Subnet %s was deleted concurrently" msgstr "" -#: neutron/plugins/ml2/plugin.py:1329 +#: neutron/plugins/ml2/plugin.py:1336 #, python-format msgid "" "Binding info for port %s was not found, it might have been deleted " @@ -821,54 +825,61 @@ msgstr "" msgid "L2 Agent operating in DVR Mode with MAC %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:526 #, python-format msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:627 #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:717 +#, python-format +msgid "" +"Skipping ARP spoofing rules for port '%s' because it has port security " +"disabled" +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:764 #, python-format msgid "port_unbound(): net_uuid %s not in local_vlan_map" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:833 #, python-format msgid "Adding %s to list of bridges." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:976 #, python-format msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1125 #, python-format msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 #, python-format msgid "" "Port %s was not found on the integration bridge and will therefore not be" " processed" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1272 #, python-format msgid "Configuration for device %s completed." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1291 #, python-format msgid "Ancillary Port %s added" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1550 msgid "Agent tunnel out of sync with plugin!" msgstr "" diff --git a/neutron/locale/neutron.pot b/neutron/locale/neutron.pot index 1ef546fa12e..724742ef327 100644 --- a/neutron/locale/neutron.pot +++ b/neutron/locale/neutron.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.1.dev240\n" +"Project-Id-Version: neutron 2015.1.dev1.g2add4e5\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" +"POT-Creation-Date: 2015-04-20 11:03+0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -255,17 +255,30 @@ msgstr "" msgid "Add comments to iptables rules." msgstr "" -#: neutron/agent/common/config.py:69 +#: neutron/agent/common/config.py:68 +msgid "" +"Maximum number of elements which can be stored in an IPset. If None is " +"specified, the system default will be used." +msgstr "" + +#: neutron/agent/common/config.py:72 +msgid "" +"Initial hash size for an IPset. Must be a power of 2, else the kernel " +"will round it up automatically. If None is specified, the system default " +"will be used." +msgstr "" + +#: neutron/agent/common/config.py:80 msgid "Action to be executed when a child process dies" msgstr "" -#: neutron/agent/common/config.py:71 +#: neutron/agent/common/config.py:82 msgid "" "Interval between checks of child process liveness (seconds), use 0 to " "disable" msgstr "" -#: neutron/agent/common/config.py:137 +#: neutron/agent/common/config.py:152 msgid "Top-level directory for maintaining dhcp state" msgstr "" @@ -273,16 +286,16 @@ msgstr "" msgid "Timeout in seconds for ovs-vsctl commands" msgstr "" -#: neutron/agent/common/ovs_lib.py:411 +#: neutron/agent/common/ovs_lib.py:428 #, python-format msgid "Unable to determine mac address for %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:505 +#: neutron/agent/common/ovs_lib.py:522 msgid "Cannot match priority on flow deletion or modification" msgstr "" -#: neutron/agent/common/ovs_lib.py:510 +#: neutron/agent/common/ovs_lib.py:527 msgid "Must specify one or more actions on flow addition or modification" msgstr "" @@ -341,7 +354,7 @@ msgstr "" msgid "Use broadcast in DHCP replies" msgstr "" -#: neutron/agent/l3/agent.py:278 +#: neutron/agent/l3/agent.py:276 msgid "" "The 'gateway_external_network_id' option must be configured for this " "agent as Neutron has more than one external network." @@ -472,7 +485,6 @@ msgid "Group (gid or name) running this process after its initialization" msgstr "" #: neutron/agent/l3/keepalived_state_change.py:122 -#: neutron/agent/metadata/driver.py:43 #: neutron/agent/metadata/namespace_proxy.py:153 #: neutron/tests/functional/agent/l3/test_keepalived_state_change.py:31 msgid "Location of Metadata Proxy UNIX domain socket" @@ -516,17 +528,17 @@ msgstr "" msgid "Unable to unlock pid file" msgstr "" -#: neutron/agent/linux/dhcp.py:239 +#: neutron/agent/linux/dhcp.py:240 #, python-format msgid "Error while reading %s" msgstr "" -#: neutron/agent/linux/dhcp.py:246 +#: neutron/agent/linux/dhcp.py:247 #, python-format msgid "Unable to convert value in %s" msgstr "" -#: neutron/agent/linux/dhcp.py:248 +#: neutron/agent/linux/dhcp.py:249 #, python-format msgid "Unable to access %s" msgstr "" @@ -559,32 +571,32 @@ msgstr "" msgid "Admin username" msgstr "" -#: neutron/agent/linux/interface.py:53 neutron/agent/metadata/config.py:24 +#: neutron/agent/linux/interface.py:53 neutron/agent/metadata/config.py:56 #: neutron/plugins/metaplugin/common/config.py:65 msgid "Admin password" msgstr "" -#: neutron/agent/linux/interface.py:56 neutron/agent/metadata/config.py:27 +#: neutron/agent/linux/interface.py:56 neutron/agent/metadata/config.py:59 #: neutron/plugins/metaplugin/common/config.py:68 msgid "Admin tenant name" msgstr "" -#: neutron/agent/linux/interface.py:58 neutron/agent/metadata/config.py:29 +#: neutron/agent/linux/interface.py:58 neutron/agent/metadata/config.py:61 #: neutron/plugins/metaplugin/common/config.py:70 msgid "Authentication URL" msgstr "" -#: neutron/agent/linux/interface.py:60 neutron/agent/metadata/config.py:31 +#: neutron/agent/linux/interface.py:60 neutron/agent/metadata/config.py:63 #: neutron/common/config.py:49 neutron/plugins/metaplugin/common/config.py:72 msgid "The type of authentication to use" msgstr "" -#: neutron/agent/linux/interface.py:62 neutron/agent/metadata/config.py:33 +#: neutron/agent/linux/interface.py:62 neutron/agent/metadata/config.py:65 #: neutron/plugins/metaplugin/common/config.py:74 msgid "Authentication region" msgstr "" -#: neutron/agent/linux/interface.py:65 neutron/agent/metadata/config.py:43 +#: neutron/agent/linux/interface.py:65 neutron/agent/metadata/config.py:75 msgid "Network service endpoint type to pull from the keystone catalog" msgstr "" @@ -630,7 +642,7 @@ msgstr "" msgid "Location to store IPv6 RA config files" msgstr "" -#: neutron/agent/linux/utils.py:118 +#: neutron/agent/linux/utils.py:119 msgid "" "\n" "Command: {cmd}\n" @@ -640,94 +652,45 @@ msgid "" "Stderr: {stderr}" msgstr "" -#: neutron/agent/metadata/agent.py:110 +#: neutron/agent/metadata/agent.py:117 #: neutron/agent/metadata/namespace_proxy.py:57 msgid "An unknown error has occurred. Please try your request again." msgstr "" -#: neutron/agent/metadata/agent.py:187 +#: neutron/agent/metadata/agent.py:194 msgid "" "Either one of parameter network_id or router_id must be passed to " "_get_ports method." msgstr "" -#: neutron/agent/metadata/agent.py:249 +#: neutron/agent/metadata/agent.py:256 #: neutron/agent/metadata/namespace_proxy.py:102 msgid "Remote metadata server experienced an internal server error." msgstr "" -#: neutron/agent/metadata/agent.py:255 +#: neutron/agent/metadata/agent.py:262 #: neutron/agent/metadata/namespace_proxy.py:108 #, python-format msgid "Unexpected response code: %s" msgstr "" -#: neutron/agent/metadata/config.py:22 -#: neutron/plugins/metaplugin/common/config.py:63 -msgid "Admin user" +#: neutron/agent/metadata/config.py:23 +msgid "Location for Metadata Proxy UNIX domain socket." msgstr "" -#: neutron/agent/metadata/config.py:36 -msgid "Turn off verification of the certificate for ssl" -msgstr "" - -#: neutron/agent/metadata/config.py:39 -msgid "Certificate Authority public key (CA cert) file for ssl" -msgstr "" - -#: neutron/agent/metadata/config.py:46 -msgid "IP address used by Nova metadata server." -msgstr "" - -#: neutron/agent/metadata/config.py:49 -msgid "TCP Port used by Nova metadata server." -msgstr "" - -#: neutron/agent/metadata/config.py:52 -msgid "Shared secret to sign instance-id request" -msgstr "" - -#: neutron/agent/metadata/config.py:57 -msgid "Protocol to access nova metadata, http or https" -msgstr "" - -#: neutron/agent/metadata/config.py:59 -msgid "Allow to perform insecure SSL (https) requests to nova metadata" -msgstr "" - -#: neutron/agent/metadata/config.py:63 -msgid "Client certificate for nova metadata api server." -msgstr "" - -#: neutron/agent/metadata/config.py:66 -msgid "Private key of client certificate." -msgstr "" - -#: neutron/agent/metadata/config.py:73 -msgid "Location for Metadata Proxy UNIX domain socket" -msgstr "" - -#: neutron/agent/metadata/config.py:76 -msgid "Number of separate worker processes for metadata server" -msgstr "" - -#: neutron/agent/metadata/config.py:80 -msgid "Number of backlog requests to configure the metadata server socket with" -msgstr "" - -#: neutron/agent/metadata/driver.py:47 +#: neutron/agent/metadata/config.py:26 msgid "" "User (uid or name) running metadata proxy after its initialization (if " -"empty: agent effective user)" +"empty: agent effective user)." msgstr "" -#: neutron/agent/metadata/driver.py:52 +#: neutron/agent/metadata/config.py:31 msgid "" "Group (gid or name) running metadata proxy after its initialization (if " -"empty: agent effective group)" +"empty: agent effective group)." msgstr "" -#: neutron/agent/metadata/driver.py:57 +#: neutron/agent/metadata/config.py:40 msgid "" "Enable/Disable log watch by metadata proxy. It should be disabled when " "metadata_proxy_user/group is not allowed to read/write its log file and " @@ -737,6 +700,65 @@ msgid "" " effective user id/name." msgstr "" +#: neutron/agent/metadata/config.py:54 +#: neutron/plugins/metaplugin/common/config.py:63 +msgid "Admin user" +msgstr "" + +#: neutron/agent/metadata/config.py:68 +msgid "Turn off verification of the certificate for ssl" +msgstr "" + +#: neutron/agent/metadata/config.py:71 +msgid "Certificate Authority public key (CA cert) file for ssl" +msgstr "" + +#: neutron/agent/metadata/config.py:78 +msgid "IP address used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/config.py:81 +msgid "TCP Port used by Nova metadata server." +msgstr "" + +#: neutron/agent/metadata/config.py:84 +msgid "Shared secret to sign instance-id request" +msgstr "" + +#: neutron/agent/metadata/config.py:89 +msgid "Protocol to access nova metadata, http or https" +msgstr "" + +#: neutron/agent/metadata/config.py:91 +msgid "Allow to perform insecure SSL (https) requests to nova metadata" +msgstr "" + +#: neutron/agent/metadata/config.py:95 +msgid "Client certificate for nova metadata api server." +msgstr "" + +#: neutron/agent/metadata/config.py:98 +msgid "Private key of client certificate." +msgstr "" + +#: neutron/agent/metadata/config.py:112 +msgid "" +"Metadata Proxy UNIX domain socket mode, 3 values allowed: 'deduce': " +"deduce mode from metadata_proxy_user/group values, 'user': set metadata " +"proxy socket mode to 0o644, to use when metadata_proxy_user is agent " +"effective user or root, 'group': set metadata proxy socket mode to 0o664," +" to use when metadata_proxy_group is agent effective group or root, " +"'all': set metadata proxy socket mode to 0o666, to use otherwise." +msgstr "" + +#: neutron/agent/metadata/config.py:126 +msgid "Number of separate worker processes for metadata server" +msgstr "" + +#: neutron/agent/metadata/config.py:130 +msgid "Number of backlog requests to configure the metadata server socket with" +msgstr "" + #: neutron/agent/metadata/namespace_proxy.py:137 msgid "Network that will have instance metadata proxied." msgstr "" @@ -959,9 +981,10 @@ msgstr "" msgid "Duplicate hostroute '%s'" msgstr "" -#: neutron/api/v2/attributes.py:334 neutron/tests/unit/test_attributes.py:515 -#: neutron/tests/unit/test_attributes.py:529 -#: neutron/tests/unit/test_attributes.py:537 +#: neutron/api/v2/attributes.py:334 +#: neutron/tests/unit/api/v2/test_attributes.py:515 +#: neutron/tests/unit/api/v2/test_attributes.py:529 +#: neutron/tests/unit/api/v2/test_attributes.py:537 #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" @@ -1117,7 +1140,7 @@ msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" #: neutron/api/v2/resource.py:127 -#: neutron/tests/unit/test_api_v2_resource.py:248 +#: neutron/tests/unit/api/v2/test_resource.py:248 msgid "Request Failed: internal server error while processing your request." msgstr "" @@ -1136,39 +1159,43 @@ msgid "" "ports created by Neutron on integration and external network bridges." msgstr "" -#: neutron/cmd/sanity_check.py:143 +#: neutron/cmd/sanity_check.py:152 msgid "Check for OVS vxlan support" msgstr "" -#: neutron/cmd/sanity_check.py:145 +#: neutron/cmd/sanity_check.py:154 msgid "Check for iproute2 vxlan support" msgstr "" -#: neutron/cmd/sanity_check.py:147 +#: neutron/cmd/sanity_check.py:156 msgid "Check for patch port support" msgstr "" -#: neutron/cmd/sanity_check.py:149 +#: neutron/cmd/sanity_check.py:158 msgid "Check for nova notification support" msgstr "" -#: neutron/cmd/sanity_check.py:151 +#: neutron/cmd/sanity_check.py:160 msgid "Check for ARP responder support" msgstr "" -#: neutron/cmd/sanity_check.py:153 +#: neutron/cmd/sanity_check.py:162 +msgid "Check for ARP header match support" +msgstr "" + +#: neutron/cmd/sanity_check.py:164 msgid "Check for VF management support" msgstr "" -#: neutron/cmd/sanity_check.py:155 +#: neutron/cmd/sanity_check.py:166 msgid "Check netns permission settings" msgstr "" -#: neutron/cmd/sanity_check.py:157 +#: neutron/cmd/sanity_check.py:168 msgid "Check minimal dnsmasq version" msgstr "" -#: neutron/cmd/sanity_check.py:159 +#: neutron/cmd/sanity_check.py:170 msgid "Check ovsdb native interface support" msgstr "" @@ -1946,58 +1973,58 @@ msgstr "" msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1020 +#: neutron/db/db_base_plugin_v2.py:1030 #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1044 +#: neutron/db/db_base_plugin_v2.py:1054 msgid "Gateway is not valid on subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1064 neutron/db/db_base_plugin_v2.py:1078 +#: neutron/db/db_base_plugin_v2.py:1074 neutron/db/db_base_plugin_v2.py:1088 #: neutron/plugins/opencontrail/contrail_plugin.py:312 msgid "new subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1071 +#: neutron/db/db_base_plugin_v2.py:1081 #, python-format msgid "Error parsing dns address %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1087 +#: neutron/db/db_base_plugin_v2.py:1097 msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1091 +#: neutron/db/db_base_plugin_v2.py:1101 msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1224 +#: neutron/db/db_base_plugin_v2.py:1234 msgid "allocation_pools allowed only for specific subnet requests." msgstr "" -#: neutron/db/db_base_plugin_v2.py:1304 +#: neutron/db/db_base_plugin_v2.py:1314 msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1321 +#: neutron/db/db_base_plugin_v2.py:1331 msgid "cidr and prefixlen must not be supplied together" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1332 +#: neutron/db/db_base_plugin_v2.py:1342 msgid "A cidr must be specified in the absence of a subnet pool" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1583 +#: neutron/db/db_base_plugin_v2.py:1593 msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1650 +#: neutron/db/db_base_plugin_v2.py:1660 msgid "Subnet pool has existing allocations" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1657 +#: neutron/db/db_base_plugin_v2.py:1667 msgid "mac address update" msgstr "" @@ -2066,67 +2093,71 @@ msgstr "" msgid "Cannot specify both subnet-id and port-id" msgstr "" -#: neutron/db/l3_db.py:514 -msgid "Router port must have exactly one fixed IP" +#: neutron/db/l3_db.py:518 +#, python-format +msgid "" +"Cannot have multiple router ports with the same network id if both " +"contain IPv6 subnets. Existing port %(p)s has IPv6 subnet(s) and network " +"id %(nid)s" msgstr "" -#: neutron/db/l3_db.py:528 +#: neutron/db/l3_db.py:560 msgid "Subnet for router interface must have a gateway IP" msgstr "" -#: neutron/db/l3_db.py:532 +#: neutron/db/l3_db.py:564 #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot " "be added to Neutron Router." msgstr "" -#: neutron/db/l3_db.py:712 +#: neutron/db/l3_db.py:776 #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" -#: neutron/db/l3_db.py:753 +#: neutron/db/l3_db.py:817 #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" -#: neutron/db/l3_db.py:757 +#: neutron/db/l3_db.py:821 #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is " "owned by a different tenant." msgstr "" -#: neutron/db/l3_db.py:769 +#: neutron/db/l3_db.py:833 #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "" -#: neutron/db/l3_db.py:776 +#: neutron/db/l3_db.py:840 #, python-format msgid "Cannot add floating IP to port %s that has no fixed IP addresses" msgstr "" -#: neutron/db/l3_db.py:780 +#: neutron/db/l3_db.py:844 #, python-format msgid "" "Port %s has multiple fixed IPs. Must provide a specific IP when " "assigning a floating IP" msgstr "" -#: neutron/db/l3_db.py:809 +#: neutron/db/l3_db.py:873 msgid "fixed_ip_address cannot be specified without a port_id" msgstr "" -#: neutron/db/l3_db.py:849 +#: neutron/db/l3_db.py:913 #, python-format msgid "Network %s is not a valid external network" msgstr "" -#: neutron/db/l3_db.py:993 +#: neutron/db/l3_db.py:1057 #, python-format msgid "has device owner %s" msgstr "" @@ -2137,11 +2168,11 @@ msgid "" " Only admin can override." msgstr "" -#: neutron/db/l3_dvr_db.py:551 +#: neutron/db/l3_dvr_db.py:555 msgid "Unable to create the Agent Gateway Port" msgstr "" -#: neutron/db/l3_dvr_db.py:584 +#: neutron/db/l3_dvr_db.py:588 msgid "Unable to create the SNAT Interface Port" msgstr "" @@ -2793,6 +2824,10 @@ msgstr "" msgid "API for retrieving service providers for Neutron advanced services" msgstr "" +#: neutron/extensions/vlantransparent.py:27 +msgid "Backend does not support VLAN Transparency." +msgstr "" + #: neutron/ipam/subnet_alloc.py:106 #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" @@ -3676,27 +3711,34 @@ msgstr "" msgid "Update router-add-interface failed in SDN-VE: %s" msgstr "" -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:556 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:562 #: neutron/services/l3_router/l3_sdnve.py:128 msgid "No port ID" msgstr "" -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:562 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:568 #: neutron/services/l3_router/l3_sdnve.py:134 msgid "No fixed IP" msgstr "" -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:590 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:588 +#, python-format +msgid "" +"Update router-remove-interface failed SDN-VE: subnet %(sid) is not " +"associated with any ports on router %(rid)" +msgstr "" + +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:603 #, python-format msgid "Update router-remove-interface failed SDN-VE: %s" msgstr "" -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:622 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:640 #, python-format msgid "Creating floating ip operation failed in SDN-VE controller: %s" msgstr "" -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:650 +#: neutron/plugins/ibm/sdnve_neutron_plugin.py:668 #, python-format msgid "Update floating ip failed in SDN-VE: %s" msgstr "" @@ -3927,16 +3969,16 @@ msgid "" " network MTU value that differs from the default segment_mtu value." msgstr "" -#: neutron/plugins/ml2/managers.py:89 +#: neutron/plugins/ml2/managers.py:90 msgid "network_type required" msgstr "" -#: neutron/plugins/ml2/managers.py:192 neutron/plugins/ml2/managers.py:201 +#: neutron/plugins/ml2/managers.py:193 neutron/plugins/ml2/managers.py:202 #, python-format msgid "network_type value '%s' not supported" msgstr "" -#: neutron/plugins/ml2/plugin.py:219 +#: neutron/plugins/ml2/plugin.py:226 msgid "binding:profile value too large" msgstr "" @@ -3945,10 +3987,6 @@ msgstr "" msgid "%(method)s failed." msgstr "" -#: neutron/plugins/ml2/common/exceptions.py:28 -msgid "Backend does not support VLAN Transparency." -msgstr "" - #: neutron/plugins/ml2/drivers/type_flat.py:33 msgid "" "List of physical_network names with which flat networks can be created. " @@ -4568,23 +4606,23 @@ msgid "" "error: %(error)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1559 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1653 msgid "" "DVR deployments for VXLAN/GRE underlays require L2-pop to be enabled, in " "both the Agent and Server side." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1577 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1671 #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1598 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1693 #, python-format msgid "Invalid tunnel type specified: %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1601 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1696 msgid "Tunneling cannot be enabled without a valid local_ip." msgstr "" @@ -4648,15 +4686,26 @@ msgstr "" #: neutron/plugins/openvswitch/common/config.py:78 msgid "" +"Enable suppression of ARP responses that don't match an IP address that " +"belongs to the port from which they originate. Note: This prevents the " +"VMs attached to this agent from spoofing, it doesn't protect them from " +"other devices which have the capability to spoof (e.g. bare metal or VMs " +"attached to agents without this flag set to True). Spoofing rules will " +"not be added to any ports that have port security disabled. This requires" +" a version of OVS that supports matching ARP headers." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:89 +msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying " "GRE/VXLAN tunnel." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:81 +#: neutron/plugins/openvswitch/common/config.py:92 msgid "Make the l2 agent run in DVR mode." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:83 +#: neutron/plugins/openvswitch/common/config.py:94 msgid "" "Set new timeout in seconds for new rpc calls after agent receives " "SIGTERM. If value is set to 0, rpc timeout won't be changed" @@ -4978,19 +5027,8 @@ msgstr "" msgid "Keepalived didn't respawn" msgstr "" -#: neutron/tests/unit/test_api_v2_resource.py:157 -#: neutron/tests/unit/test_api_v2_resource.py:202 -msgid "Unmapped error" -msgstr "" - -#: neutron/tests/unit/test_api_v2_resource.py:261 -msgid "" -"The server has either erred or is incapable of performing the requested " -"operation." -msgstr "" - -#: neutron/tests/unit/test_iptables_manager.py:842 -#: neutron/tests/unit/test_iptables_manager.py:876 +#: neutron/tests/unit/agent/linux/test_iptables_manager.py:842 +#: neutron/tests/unit/agent/linux/test_iptables_manager.py:876 #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables " @@ -4998,31 +5036,42 @@ msgid "" "%s" msgstr "" -#: neutron/tests/unit/ml2/test_ml2_plugin.py:309 +#: neutron/tests/unit/api/v2/test_resource.py:157 +#: neutron/tests/unit/api/v2/test_resource.py:202 +msgid "Unmapped error" +msgstr "" + +#: neutron/tests/unit/api/v2/test_resource.py:261 +msgid "" +"The server has either erred or is incapable of performing the requested " +"operation." +msgstr "" + +#: neutron/tests/unit/plugins/ml2/test_plugin.py:310 #, python-format msgid "Deleting port %s" msgstr "" -#: neutron/tests/unit/ml2/test_ml2_plugin.py:310 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:311 #, python-format msgid "The port '%s' was deleted" msgstr "" -#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:33 +#: neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py:33 #, python-format msgid "" "%(method)s called with network settings %(current)s (original settings " "%(original)s) and network segments %(segments)s" msgstr "" -#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:60 +#: neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py:60 #, python-format msgid "" "%(method)s called with subnet settings %(current)s (original settings " "%(original)s)" msgstr "" -#: neutron/tests/unit/ml2/drivers/mechanism_logger.py:86 +#: neutron/tests/unit/plugins/ml2/drivers/mechanism_logger.py:86 #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " @@ -5031,12 +5080,12 @@ msgid "" "%(segments_to_bind)s" msgstr "" -#: neutron/tests/unit/ml2/extensions/test_extension.py:54 +#: neutron/tests/unit/plugins/ml2/extensions/fake_extension.py:54 msgid "Adds test attributes to core resources." msgstr "" -#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:955 -#: neutron/tests/unit/openvswitch/test_ovs_neutron_agent.py:972 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:961 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:978 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" diff --git a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index 321c8452d31..00000000000 --- a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,943 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-03-31 22:26+0000\n" -"Last-Translator: openstackjenkins \n" -"Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" -"neutron/language/pt_BR/)\n" -"Language: pt_BR\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=2; plural=(n > 1);\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "Serviço %s é suportado pelo plugin núcleo" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "Carregando Plug-in: %s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" -"Inserindo política: %(new_policy)s no lugar de política deprecada: " -"%(old_policy)s" - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "" - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "Serviço Neutron iniciado, escutando em %(host)s:%(port)s" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "Exceção de HTTP lançada: %s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s retornado com HTTP %(status)d" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s retornou uma falha: %(exception)s" - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "Extensão de grupo de segurança desativada." - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "Preparando filtros para dispositivos %s" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "Regra do grupo de segurança atualizada %r" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "Membro do grupo de segurança atualizado %r" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "Regra do provedor atualizada" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "Remover filtro de dispositivo para %r" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "Atualizar regras de firewall" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "Nenhuma porta aqui para atualizar firewall" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "Agente DHCP iniciado" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "Sincronizando estado" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated por lado do servidor %s!" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "Agente L3 iniciado" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "O dispositivo %s já existe" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "Tentou atualizar o filtro de porta que não foi filtrado %s" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "Tentou remover o filtro de porta que não foi filtrado %r" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "Inicializando o Extension Manager." - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "Extensão carregada: %s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "" -"Permitir que a classificação seja ativada porque a paginação nativa requer " -"classificação nativa" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "Limpeza de OVS concluída com êxito" - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "Agente inicializado com êxito; em execução agora... " - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "Criação de log ativada!" - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "Arquivo de colagem configurado: %s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr "" - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"A validação para CIDR: %(new_cidr)s falhou - se sobrepõe com a sub-rede " -"%(subnet_id)s (CIDR: %(cidr)s)" - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "Localizado endereço IP inválido no pool: %(start)s - %(end)s:" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "Endereços IP especificado não correspondem à versão do IP da sub-rede" - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "IP inicial (%(start)s) é maior que IP final (%(end)s)" - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "Localizado pool maior que a sub-rede CIDR:%(start)s - %(end)s" - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Localizados intervalos de sobreposição: %(l_range)s e %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "Ignorando a porta %s porque nenhum IP está configurado nela" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Backdoor de Eventlet escutando na porta %(port)s pelo processo %(pid)d" - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "Ignorando tarefa periódica %(task)s porque seu intervalo é negativo" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Ignorando tarefa periódica %(task)s porque ela está desativada" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "%s capturadas, saindo" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "Processo pai saiu inesperadamente, saindo" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "Filho capturado %s, terminando" - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "Bifurcação muito rápida, suspendendo" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "Filho %d iniciado" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "Iniciando %d trabalhadores" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "%(pid)d filho eliminado pelo sinal %(sig)d" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "Filho %(pid)s encerrando com status %(code)d" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "%s capturado, parando filhos" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "" - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "Aguardando em %d filhos para sair" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "alocada VLAN (%d) do pool" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "Nenhum %s Plug-in carregado" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s: %(function_name)s com args %(args)s ignorado" - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"A iteração do loop excedeu o intervalo (%(polling_interval)s vs. " -"%(elapsed)s)!" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "agent_id de RPC: %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Porta %(device)s atualizada. Detalhes: %(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "Dispositivo %s não definido no plug-in" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "Anexo %s removido" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "Porta %s atualizada." - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "Daemon RPC do Agente LinuxBridge Iniciado!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "Agente fora de sincronização com o plug-in!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "Mapeamentos da interface: %s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" -"Adicionado segmento %(id)s de tipo %(network_type)s para a rede " -"%(network_id)s" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "Configurado nomes para o driver de tipo: %s" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "Carregados nomes do driver de tipo: %s" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "Tipos registrados: %s" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "Tipos de network_types: %s" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "inicializando driver para o tipo '%s'" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "Configurados nomes para o driver de mecanismo: %s" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "Carregados nomes do driver de mecanismo: %s" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "Registrados drivers de mecanismo : %s" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "Inicializando driver de mecanismo '%s'" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "Inicialização de plug-in L2 modular concluída" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "Nomes arbitrários de rede flat physical_network permitidos" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "Nomes permitidos de rede flat physical_network : %s" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "Inicialização do ML2 FlatTypeDriver concluída" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "Inicialização do ML2 LocalTypeDriver concluída" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "%(type)s faixas de ID: %(range)s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Intervalos de VLAN de rede: %s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "Inicialização do VlanTypeDriver concluída" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "Nome da rede alterado para %s" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "" - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "Designando %(vlan_id)s como vlan local para net-id=%(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "Recuperando vlan = %(vlan_id)s a partir de net-id = %(net_uuid)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "Adicionando %s na lista de pontes." - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "Mapeamento de rede física %(physical_network)s para a ponte %(bridge)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "Porta auxiliar %s adicionada" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "Túnel do agente fora de sincronização com o plug-in!" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "" - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "Provedor padrão não foi especificado para o tipo de serviço %s" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "Carregando driver de medição %s" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "" diff --git a/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index 0b950ac7dc2..00000000000 --- a/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,936 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -# 汪军 , 2015 -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-03-31 22:26+0000\n" -"Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (China) (http://www.transifex.com/projects/p/neutron/" -"language/zh_CN/)\n" -"Language: zh_CN\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=1; plural=0;\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "加载核心插件: %s" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "服务%s由核心插件支持" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "正在装入插件:%s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "在被废弃的策略:%(old_policy)s位置上插入策略:%(new_policy)s " - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "" - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "Neutron服务启动,正在%(host)s:%(port)s上监听" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP 异常抛出:%s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s 随HTTP %(status)d返回" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s 返回了故障:%(exception)s" - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "已禁用安全组扩展。" - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "正在为设备 %s 准备过滤器" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "已更新安全组规则 %r" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "已更新安全组成员 %r" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "已更新提供程序规则" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "请为 %r 除去设备过滤器" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "请刷新防火墙规则" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "已启动 DHCP 代理" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "正在使状态同步" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "同步状态完成" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "服务器端 %s 已更新代理!" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "已启动 L3 代理" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "路由器%(router_id)s 转换为%(state)s" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "进程运行uid/gid: %(uid)s/%(gid)s" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "设备 %s 已存在" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "已尝试更新未过滤的端口过滤器 %s" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "已尝试除去未过滤的端口过滤器 %r" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "正在初始化扩展管理员。" - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "加载的扩展:%s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "已启用允许排序,因为本机分页需要本机排序" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "%(action)s 失败 (客户端错误): %(exc)s" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "正在删除端口: %s" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "OVS 清除已成功完成" - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "代理已成功初始化,现在正在运行..." - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "已启用日志记录!" - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "%(prog)s 版本 %(version)s" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "配置粘贴文件:%s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr "IPv6在本系统上未使能。" - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "在主机 %(host)s上添加网络%(net)s到代理%(agent)%s" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"针对 CIDR %(new_cidr)s 的验证失败 - 与子网 %(subnet_id)s(CIDR 为 %(cidr)s)" -"重叠" - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "在池中找到无效 IP 地址:%(start)s - %(end)s:" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "指定的 IP 地址与子网 IP 版本不匹配" - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "起始 IP (%(start)s) 大于结束 IP (%(end)s)" - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "找到超过子网 CIDR (%(start)s - %(end)s) 的池" - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "找到重叠范围:%(l_range)s 和 %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "正在跳过端口 %s,因为没有在该端口上配置任何 IP" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "SNAT 已经绑定到服务节点。" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "表 %(old_t)r 已经更名为 %(new_t)r" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet为进程 %(pid)d 在后台监听 %(port)s " - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "正在跳过周期性任务 %(task)s,因为其时间间隔为负" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "正在跳过周期性任务 %(task)s,因为它已禁用" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "捕获到 %s,正在退出" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "父进程已意外终止,正在退出" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "子代捕获 %s,正在退出" - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "派生速度太快,正在休眠" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "已启动子代 %d" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "正在启动 %d 工作程序" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "信号 %(sig)d 已终止子代 %(pid)d" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "子代 %(pid)s 已退出,状态为 %(code)d" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "捕获到 %s,正在停止子代" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "线程结束,正在清理" - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "正在等待 %d 个子代退出" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "已从池分配 vlan (%d)" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "未装入任何 %s 插件" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s:已忽略具有自变量 %(args)s 的 %(function_name)s" - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "SDN-VE 控制器 IP 地址: %s" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "循环迭代超过时间间隔(%(polling_interval)s 对 %(elapsed)s)!" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "RPC agent_id:%s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "端口 %(device)s 已更新。详细信息:%(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "未在插件上定义设备 %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "已除去附件 %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "端口 %s 已更新。" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "LinuxBridge 代理 RPC 守护程序已启动!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "代理与插件不同步!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "接口映射:%s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "增添segment%(id)s种类%(network_type)s在网络%(network_id)s" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "配置类型驱动名字: %s" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "已加载驱动程序: %s" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "已注册类型: %s" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "项目网络类型: %s" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "为类型 '%s'初始化驱动" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "配置装置驱动名称: %s" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "已加载的装置驱动名称: %s" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "初始化扩展驱动 '%s'" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "L2插件模块初始化完成" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "端口 %s 被同时删除" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "子网 %s 同时被删除 " - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "允许平面物理网络使用任意名字" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "可以使用的平面物理网络名字: %s" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "完成ML2 FlatTypeDriver的初始化" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "完成L2插件模块初始化" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "%(type)s ID 范围: %(range)s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "网络 VLAN 范围:%s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "Vlan类型驱动初始化完成" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "网络名改变为 %s" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "端口名改变为 %s" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "APIC 服务代理启动中 ..." - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "APIC 服务代理已启动" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "APIC 主机代理: 代理正启动在 %s" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "APIC 主机代理: 已启动在 %s" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "正在初始化CRD客户端 ..." - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "" - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "对于网络标识 %(net_uuid)s,正在将 %(vlan_id)s 分配为本地 vlan" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "正在从网络标识 %(net_uuid)s 恢复 vlan %(vlan_id)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "正在将物理网络 %(physical_network)s 映射至网桥 %(bridge)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "设备 %s 的配置已完成。" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "代理隧道与插件不同步!" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "物理设备映射:%s" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "" - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "正在加载接口驱动 %s" diff --git a/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po b/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po deleted file mode 100644 index a42231de67d..00000000000 --- a/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po +++ /dev/null @@ -1,934 +0,0 @@ -# Translations template for neutron. -# Copyright (C) 2015 ORGANIZATION -# This file is distributed under the same license as the neutron project. -# -# Translators: -msgid "" -msgstr "" -"Project-Id-Version: Neutron\n" -"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-03 06:13+0000\n" -"PO-Revision-Date: 2015-03-31 22:26+0000\n" -"Last-Translator: openstackjenkins \n" -"Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/neutron/" -"language/zh_TW/)\n" -"Language: zh_TW\n" -"MIME-Version: 1.0\n" -"Content-Type: text/plain; charset=UTF-8\n" -"Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 1.3\n" -"Plural-Forms: nplurals=1; plural=0;\n" - -#: neutron/manager.py:115 -#, python-format -msgid "Loading core plugin: %s" -msgstr "" - -#: neutron/manager.py:155 -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "" - -#: neutron/manager.py:173 -#, python-format -msgid "Loading Plugin: %s" -msgstr "正在載入外掛程式:%s" - -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" - -#: neutron/quota.py:215 -msgid "" -"ConfDriver is used as quota_driver because the loaded plugin does not " -"support 'quotas' table." -msgstr "" - -#: neutron/quota.py:220 -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "" - -#: neutron/service.py:178 -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "" - -#: neutron/wsgi.py:779 -#, python-format -msgid "%(method)s %(url)s" -msgstr "%(method)s %(url)s" - -#: neutron/wsgi.py:796 -#, python-format -msgid "HTTP exception thrown: %s" -msgstr "已擲出 HTTP 異常狀況:%s" - -#: neutron/wsgi.py:812 -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s 傳回了 HTTP %(status)d" - -#: neutron/wsgi.py:815 -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s 傳回了錯誤:%(exception)s" - -#: neutron/agent/securitygroups_rpc.py:82 -msgid "Disabled security-group extension." -msgstr "已停用安全群組延伸。" - -#: neutron/agent/securitygroups_rpc.py:84 -msgid "Disabled allowed-address-pairs extension." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:214 -#, python-format -msgid "" -"Skipping method %s as firewall is disabled or configured as " -"NoopFirewallDriver." -msgstr "" - -#: neutron/agent/securitygroups_rpc.py:226 -#, python-format -msgid "Preparing filters for devices %s" -msgstr "正在準備裝置 %s 的過濾器" - -#: neutron/agent/securitygroups_rpc.py:256 -#, python-format -msgid "Security group rule updated %r" -msgstr "安全群組規則已更新 %r" - -#: neutron/agent/securitygroups_rpc.py:263 -#, python-format -msgid "Security group member updated %r" -msgstr "安全群組成員已更新 %r" - -#: neutron/agent/securitygroups_rpc.py:285 -msgid "Provider rule updated" -msgstr "已更新提供者規則" - -#: neutron/agent/securitygroups_rpc.py:297 -#, python-format -msgid "Remove device filter for %r" -msgstr "移除 %r 的裝置過濾器" - -#: neutron/agent/securitygroups_rpc.py:307 -msgid "Refresh firewall rules" -msgstr "重新整理防火牆規則" - -#: neutron/agent/securitygroups_rpc.py:311 -msgid "No ports here to refresh firewall" -msgstr "" - -#: neutron/agent/common/ovs_lib.py:393 -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "" - -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:589 -msgid "DHCP agent started" -msgstr "已啟動 DHCP 代理程式" - -#: neutron/agent/dhcp/agent.py:144 -msgid "Synchronizing state" -msgstr "正在同步化狀態" - -#: neutron/agent/dhcp/agent.py:165 -msgid "Synchronizing state complete" -msgstr "" - -#: neutron/agent/dhcp/agent.py:586 neutron/agent/l3/agent.py:622 -#: neutron/services/metering/agents/metering_agent.py:286 -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated 是由伺服器端 %s 執行!" - -#: neutron/agent/l3/agent.py:551 -msgid "L3 agent started" -msgstr "已啟動 L3 代理程式" - -#: neutron/agent/l3/ha.py:114 -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "" - -#: neutron/agent/l3/ha.py:124 -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "" - -#: neutron/agent/linux/daemon.py:102 -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "" - -#: neutron/agent/linux/dhcp.py:656 -#, python-format -msgid "" -"Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is not " -"in port's address IP versions" -msgstr "" - -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 -#, python-format -msgid "Device %s already exists" -msgstr "裝置 %s 已存在" - -#: neutron/agent/linux/iptables_firewall.py:114 -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "已嘗試更新未過濾的埠過濾器 %s" - -#: neutron/agent/linux/iptables_firewall.py:125 -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "已嘗試移除未過濾的埠過濾器 %r" - -#: neutron/api/extensions.py:404 -msgid "Initializing extension manager." -msgstr "正在起始設定延伸管理程式。" - -#: neutron/api/extensions.py:562 -#, python-format -msgid "Loaded extension: %s" -msgstr "已載入延伸:%s" - -#: neutron/api/v2/base.py:93 -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "已啟用容許排序,因為原生分頁需要原生排序" - -#: neutron/api/v2/resource.py:94 -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "" - -#: neutron/callbacks/manager.py:135 -#, python-format -msgid "Notify callbacks for %(resource)s, %(event)s" -msgstr "" - -#: neutron/callbacks/manager.py:142 -#, python-format -msgid "Calling callback %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:73 -#, python-format -msgid "Deleting port: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:103 -#, python-format -msgid "Cleaning bridge: %s" -msgstr "" - -#: neutron/cmd/ovs_cleanup.py:110 -msgid "OVS cleanup completed successfully" -msgstr "已順利完成 OVS 清理" - -#: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1634 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 -msgid "Agent initialized successfully, now running... " -msgstr "已順利地起始設定代理程式,現正在執行中..." - -#: neutron/common/config.py:204 -msgid "Logging enabled!" -msgstr "已啟用記載!" - -#: neutron/common/config.py:205 -#, python-format -msgid "%(prog)s version %(version)s" -msgstr "" - -#: neutron/common/config.py:224 -#, python-format -msgid "Config paste file: %s" -msgstr "配置貼上檔案:%s" - -#: neutron/common/ipv6_utils.py:63 -msgid "IPv6 is not enabled on this system." -msgstr "" - -#: neutron/db/agentschedulers_db.py:161 -msgid "" -"Skipping periodic DHCP agent status check because automatic network " -"rescheduling is disabled." -msgstr "" - -#: neutron/db/agentschedulers_db.py:196 -#, python-format -msgid "Scheduling unhosted network %s" -msgstr "" - -#: neutron/db/agentschedulers_db.py:203 -#, python-format -msgid "" -"Failed to schedule network %s, no eligible agents or it might be already " -"scheduled by another server" -msgstr "" - -#: neutron/db/agentschedulers_db.py:211 -#, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:630 -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"驗證 CIDR %(new_cidr)s 失敗 - 與子網路 %(subnet_id)s (CIDR %(cidr)s) 重疊" - -#: neutron/db/db_base_plugin_v2.py:657 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "在儲存區中發現無效的 IP 位址:%(start)s - %(end)s:" - -#: neutron/db/db_base_plugin_v2.py:664 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "指定的 IP 位址與子網路 IP 版本不符" - -#: neutron/db/db_base_plugin_v2.py:668 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "起始 IP (%(start)s) 大於結尾 IP (%(end)s)" - -#: neutron/db/db_base_plugin_v2.py:673 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "找到的儲存區大於子網路 CIDR:%(start)s - %(end)s" - -#: neutron/db/db_base_plugin_v2.py:697 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "發現重疊的範圍:%(l_range)s 及 %(r_range)s" - -#: neutron/db/db_base_plugin_v2.py:1507 -#, python-format -msgid "Found IP allocation %(alloc)s on subnet %(subnet)s, cannot delete" -msgstr "" - -#: neutron/db/l3_agentschedulers_db.py:78 -msgid "" -"Skipping period L3 agent status check because automatic router rescheduling " -"is disabled." -msgstr "" - -#: neutron/db/l3_db.py:1114 -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "正在跳過埠 %s,因為其上沒有配置 IP" - -#: neutron/db/l3_dvr_db.py:86 -#, python-format -msgid "Centralizing distributed router %s is not supported" -msgstr "" - -#: neutron/db/l3_dvr_db.py:535 -#, python-format -msgid "Agent Gateway port does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvr_db.py:619 -#, python-format -msgid "SNAT interface port list does not exist, so create one: %s" -msgstr "" - -#: neutron/db/l3_dvrscheduler_db.py:312 -msgid "SNAT already bound to a service node." -msgstr "" - -#: neutron/db/l3_hamode_db.py:188 -#, python-format -msgid "" -"Attempt %(count)s to allocate a VRID in the network %(network)s for the " -"router %(router)s" -msgstr "" - -#: neutron/db/l3_hamode_db.py:271 -#, python-format -msgid "" -"Number of available agents lower than max_l3_agents_per_router. L3 agents " -"available: %s" -msgstr "" - -#: neutron/db/migration/alembic_migrations/heal_script.py:221 -#, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "" - -#: neutron/debug/commands.py:107 -#, python-format -msgid "%d probe(s) deleted" -msgstr "" - -#: neutron/notifiers/nova.py:266 -#, python-format -msgid "Nova event response: %s" -msgstr "" - -#: neutron/openstack/common/eventlet_backdoor.py:146 -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "" - -#: neutron/openstack/common/periodic_task.py:120 -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "正在跳過定期作業 %(task)s,因為其間隔為負數" - -#: neutron/openstack/common/periodic_task.py:125 -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "正在跳過定期作業 %(task)s,因為它已停用" - -#: neutron/openstack/common/service.py:173 -#, python-format -msgid "Caught %s, exiting" -msgstr "已捕捉到 %s,正在結束" - -#: neutron/openstack/common/service.py:231 -msgid "Parent process has died unexpectedly, exiting" -msgstr "母程序已非預期地當掉,正在結束" - -#: neutron/openstack/common/service.py:262 -#, python-format -msgid "Child caught %s, exiting" -msgstr "" - -#: neutron/openstack/common/service.py:301 -msgid "Forking too fast, sleeping" -msgstr "分岔太快,正在休眠" - -#: neutron/openstack/common/service.py:320 -#, python-format -msgid "Started child %d" -msgstr "已開始子行程 %d" - -#: neutron/openstack/common/service.py:330 -#, python-format -msgid "Starting %d workers" -msgstr "正在啟動 %d 個工作程式" - -#: neutron/openstack/common/service.py:347 -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "信號 %(sig)d 結束了子項 %(pid)d" - -#: neutron/openstack/common/service.py:351 -#, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "子項 %(pid)s 已結束,狀態為 %(code)d" - -#: neutron/openstack/common/service.py:390 -#, python-format -msgid "Caught %s, stopping children" -msgstr "已捕捉到 %s,正在停止子項" - -#: neutron/openstack/common/service.py:399 -msgid "Wait called after thread killed. Cleaning up." -msgstr "" - -#: neutron/openstack/common/service.py:415 -#, python-format -msgid "Waiting on %d children to exit" -msgstr "正在等待 %d 個子項結束" - -#: neutron/plugins/brocade/NeutronPlugin.py:307 -#, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "已從儲存區配置 VLAN (%d)" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:117 -#, python-format -msgid "No %s Plugin loaded" -msgstr "未載入 %s 外掛程式" - -#: neutron/plugins/cisco/models/virt_phy_sw_v2.py:118 -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s:已忽略帶有引數 %(args)s 的 %(function_name)s" - -#: neutron/plugins/embrane/common/utils.py:44 -msgid "No ip allocation set" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:77 -#, python-format -msgid "The IP addr of available SDN-VE controllers: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:80 -#, python-format -msgid "The SDN-VE controller IP address: %s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:236 -msgid "Bad resource for forming a list request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:246 -msgid "Bad resource for forming a show request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:256 -msgid "Bad resource for forming a create request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:268 -msgid "Bad resource for forming a update request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:279 -msgid "Bad resource for forming a delete request" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api.py:307 -#, python-format -msgid "Non matching tenant and network types: %(ttype)s %(ntype)s" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:32 -msgid "Fake SDNVE controller initialized" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:35 -msgid "Fake SDNVE controller: list" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:39 -msgid "Fake SDNVE controller: show" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:43 -msgid "Fake SDNVE controller: create" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:47 -msgid "Fake SDNVE controller: update" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:51 -msgid "Fake SDNVE controller: delete" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:55 -msgid "Fake SDNVE controller: get tenant by id" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:59 -msgid "Fake SDNVE controller: check and create tenant" -msgstr "" - -#: neutron/plugins/ibm/sdnve_api_fake.py:63 -msgid "Fake SDNVE controller: get controller" -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:147 -msgid "Set a new controller if needed." -msgstr "" - -#: neutron/plugins/ibm/sdnve_neutron_plugin.py:153 -#, python-format -msgid "Set the controller to a new controller: %s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 -#, python-format -msgid "" -"Mapping physical network %(physical_network)s to interface %(interface)s" -msgstr "" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "迴圈反覆運算已超出間隔(%(polling_interval)s 與 %(elapsed)s)!" - -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 -#, python-format -msgid "Controller IPs: %s" -msgstr "" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:793 -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 -#, python-format -msgid "RPC agent_id: %s" -msgstr "RPC agent_id:%s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:863 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1155 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "已更新埠 %(device)s。詳細資料:%(details)s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 -#, python-format -msgid "Device %s not defined on plugin" -msgstr "外掛程式上未定義裝置 %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:903 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1227 -#, python-format -msgid "Attachment %s removed" -msgstr "已移除連接裝置 %s" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:915 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1239 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 -#, python-format -msgid "Port %s updated." -msgstr "已更新埠 %s。" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:968 -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "已啟動「LinuxBridge 代理程式 RPC 常駐程式」!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:978 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1429 -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 -msgid "Agent out of sync with plugin!" -msgstr "代理程式與外掛程式不同步!" - -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1012 -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 -#, python-format -msgid "Interface mappings: %s" -msgstr "介面對映:%s" - -#: neutron/plugins/ml2/db.py:60 -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:43 -#, python-format -msgid "Configured type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:48 -#, python-format -msgid "Loaded type driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:64 -#, python-format -msgid "Registered types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:75 -#, python-format -msgid "Tenant network_types: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:161 -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:270 -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:276 -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:288 -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:293 -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:699 -#, python-format -msgid "Configured extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:705 -#, python-format -msgid "Loaded extension driver names: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:716 -#, python-format -msgid "Registered extension drivers: %s" -msgstr "" - -#: neutron/plugins/ml2/managers.py:722 -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:730 -#, python-format -msgid "Got %(alias)s extension from driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:779 -#, python-format -msgid "Extended network dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:786 -#, python-format -msgid "Extended subnet dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/managers.py:793 -#, python-format -msgid "Extended port dict for driver '%(drv)s'" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:144 -msgid "Modular L2 Plugin initialization complete" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:281 -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:678 -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:689 -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "" - -#: neutron/plugins/ml2/plugin.py:1329 -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:72 -msgid "Arbitrary flat physical_network names allowed" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:78 -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_flat.py:85 -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_local.py:37 -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_vlan.py:99 -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "網路 VLAN 範圍:%s" - -#: neutron/plugins/ml2/drivers/type_vlan.py:166 -msgid "VlanTypeDriver initialization complete" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:104 -#, python-format -msgid "Network %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:117 -#, python-format -msgid "Network name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:147 -#, python-format -msgid "Network %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:243 -#, python-format -msgid "VM %s is not created as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:257 -#, python-format -msgid "Port name changed to %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/arista/mechanism_arista.py:310 -#, python-format -msgid "VM %s is not updated as it is not found in Arista DB" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:78 -msgid "APIC service agent starting ..." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:95 -msgid "APIC service agent started" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:179 -#, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/apic/apic_topology.py:199 -#, python-format -msgid "APIC host agent: started on %s" -msgstr "" - -#: neutron/plugins/ml2/drivers/freescale/mechanism_fslsdn.py:40 -msgid "Initializing CRD client... " -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:54 -msgid "Agent initialised successfully, now running... " -msgstr "" - -#: neutron/plugins/ml2/extensions/port_security.py:33 -msgid "PortSecurityExtensionDriver initialization complete" -msgstr "" - -#: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:153 -msgid "NVSD Agent initialized successfully, now running... " -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 -#, python-format -msgid "L2 Agent operating in DVR Mode with MAC %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:518 -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "正在將 %(vlan_id)s 指派為 net-id = %(net_uuid)s 的本端 VLAN" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:619 -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "正在從 net-id = %(net_uuid)s 收回 VLAN = %(vlan_id)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:714 -#, python-format -msgid "port_unbound(): net_uuid %s not in local_vlan_map" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:783 -#, python-format -msgid "Adding %s to list of bridges." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:926 -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "正在將實體網路 %(physical_network)s 對映到橋接器 %(bridge)s" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 -#, python-format -msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1149 -#, python-format -msgid "" -"Port %s was not found on the integration bridge and will therefore not be " -"processed" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1179 -#, python-format -msgid "Configuration for device %s completed." -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1198 -#, python-format -msgid "Ancillary Port %s added" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1457 -msgid "Agent tunnel out of sync with plugin!" -msgstr "代理程式通道與外掛程式不同步!" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:216 -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:223 -#, python-format -msgid "Removing device with mac_address %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:244 -msgid "SRIOV NIC Agent RPC Daemon Started!" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:333 -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "" - -#: neutron/plugins/sriovnicagent/sriov_nic_agent.py:334 -#, python-format -msgid "Exclude Devices: %s" -msgstr "" - -#: neutron/scheduler/dhcp_agent_scheduler.py:110 -#, python-format -msgid "Agent %s already present" -msgstr "" - -#: neutron/server/__init__.py:50 -msgid "RPC was already started in parent process by plugin." -msgstr "" - -#: neutron/services/service_base.py:99 -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" - -#: neutron/services/l3_router/l3_arista.py:247 -msgid "Syncing Neutron Router DB <-> EOS" -msgstr "" - -#: neutron/services/metering/agents/metering_agent.py:96 -#, python-format -msgid "Loading Metering driver %s" -msgstr "" - -#: neutron/services/metering/drivers/iptables/iptables_driver.py:89 -#, python-format -msgid "Loading interface driver %s" -msgstr "" From b339391bcb223c0f03d30f36dea47d13adb12c71 Mon Sep 17 00:00:00 2001 From: mathieu-rohon Date: Sat, 7 Mar 2015 13:30:49 +0100 Subject: [PATCH 012/292] ML2: Change port status only when it's bound to the host Currently, nothing prevents the port status to be changed to BUILD state when get_device_details() is sent by a host that doesn't own the port. In some cases the port might stay in BUILD state. This could happen during a live-migration, or for multi-hosted ports such as HA ports. This commit allows the port status modification only if the port is bound to the host that is asking for it. Closes-Bug: #1439857 Closes-Bug: #1438040 Closes-Bug: #1416933 Change-Id: I9b3673f453abbafaaa4f78542fcfebe8dc93f2bb (cherry picked from commit 9b53b82ce7dad551ebc0f02ff667d5345fb7e139) --- neutron/plugins/ml2/rpc.py | 15 ++++++++------- neutron/tests/unit/plugins/ml2/test_rpc.py | 22 ++++++++++++++++++++-- 2 files changed, 28 insertions(+), 9 deletions(-) diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py index af4b6747e5a..1764cdf367a 100644 --- a/neutron/plugins/ml2/rpc.py +++ b/neutron/plugins/ml2/rpc.py @@ -96,13 +96,14 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): 'vif_type': port[portbindings.VIF_TYPE]}) return {'device': device} - new_status = (q_const.PORT_STATUS_BUILD if port['admin_state_up'] - else q_const.PORT_STATUS_DOWN) - if port['status'] != new_status: - plugin.update_port_status(rpc_context, - port_id, - new_status, - host) + if (not host or host == port_context.host): + new_status = (q_const.PORT_STATUS_BUILD if port['admin_state_up'] + else q_const.PORT_STATUS_DOWN) + if port['status'] != new_status: + plugin.update_port_status(rpc_context, + port_id, + new_status, + host) entry = {'device': device, 'network_id': port['network_id'], diff --git a/neutron/tests/unit/plugins/ml2/test_rpc.py b/neutron/tests/unit/plugins/ml2/test_rpc.py index facc9f9b56a..56cbdbcb979 100644 --- a/neutron/tests/unit/plugins/ml2/test_rpc.py +++ b/neutron/tests/unit/plugins/ml2/test_rpc.py @@ -88,6 +88,7 @@ class RpcCallbacksTestCase(base.BaseTestCase): def test_get_device_details_port_status_equal_new_status(self): port = collections.defaultdict(lambda: 'fake') self.plugin.get_bound_port_context().current = port + self.plugin.port_bound_to_host = mock.MagicMock(return_value=True) for admin_state_up in (True, False): new_status = (constants.PORT_STATUS_BUILD if admin_state_up else constants.PORT_STATUS_DOWN) @@ -98,8 +99,7 @@ class RpcCallbacksTestCase(base.BaseTestCase): port['admin_state_up'] = admin_state_up port['status'] = status self.plugin.update_port_status.reset_mock() - self.callbacks.get_device_details('fake_context', - host='fake_host') + self.callbacks.get_device_details('fake_context') self.assertEqual(status == new_status, not self.plugin.update_port_status.called) @@ -113,6 +113,24 @@ class RpcCallbacksTestCase(base.BaseTestCase): cached_networks=cached_networks) self.assertTrue('fake_port' in cached_networks) + def test_get_device_details_wrong_host(self): + port = collections.defaultdict(lambda: 'fake') + port_context = self.plugin.get_bound_port_context() + port_context.current = port + port_context.host = 'fake' + self.plugin.update_port_status.reset_mock() + self.callbacks.get_device_details('fake_context', + host='fake_host') + self.assertFalse(self.plugin.update_port_status.called) + + def test_get_device_details_port_no_host(self): + port = collections.defaultdict(lambda: 'fake') + port_context = self.plugin.get_bound_port_context() + port_context.current = port + self.plugin.update_port_status.reset_mock() + self.callbacks.get_device_details('fake_context') + self.assertTrue(self.plugin.update_port_status.called) + def test_get_devices_details_list(self): devices = [1, 2, 3, 4, 5] kwargs = {'host': 'fake_host', 'agent_id': 'fake_agent_id'} From 407be289360ec6dabbbe14d9b18dae7c9fa2db79 Mon Sep 17 00:00:00 2001 From: Kawaguchi Date: Tue, 21 Apr 2015 13:27:52 +0900 Subject: [PATCH 013/292] Fix typo acomplished => accomplished Change-Id: I73722e9984917a5a8c4e74207cf14d4040a7cf2f Related-Bug: #1390035 --- neutron/tests/functional/agent/l3/test_namespace_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neutron/tests/functional/agent/l3/test_namespace_manager.py b/neutron/tests/functional/agent/l3/test_namespace_manager.py index 51922e6ff17..74941bb790a 100755 --- a/neutron/tests/functional/agent/l3/test_namespace_manager.py +++ b/neutron/tests/functional/agent/l3/test_namespace_manager.py @@ -46,7 +46,7 @@ class NamespaceManagerTestFramework(base.BaseSudoTestCase): namespace.delete() except RuntimeError as e: # If the namespace didn't exist when delete was attempted, mission - # acomplished. Otherwise, re-raise the exception + # accomplished. Otherwise, re-raise the exception if 'No such file or directory' not in e.message: raise e From d506b50badda7af30d1154561acd45caef44ca11 Mon Sep 17 00:00:00 2001 From: Andy Hill Date: Mon, 20 Apr 2015 13:00:28 -0400 Subject: [PATCH 014/292] Add use_slave DB api support The example configuration refers to slave_connection[1] but slaves aren't used in the codebase. This change will enable plugin authors to use slave_connection by passing use_slave=True to the SQLAlchemy session[2][3]. [1] http://docs.openstack.org/juno/config-reference/content/section_neutron.conf.html [2] http://docs.openstack.org/developer/oslo.db/api/sqlalchemy/session.html#oslo_db.sqlalchemy.session.EngineFacade.get_session [3] https://wiki.openstack.org/wiki/Slave_usage Change-Id: I6f46c11fad5c58577654a4011cf82d19f6d3e1e3 --- neutron/db/api.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neutron/db/api.py b/neutron/db/api.py index 4418bbc2825..3a2752ae1bc 100644 --- a/neutron/db/api.py +++ b/neutron/db/api.py @@ -46,11 +46,12 @@ def dispose(): get_engine().pool.dispose() -def get_session(autocommit=True, expire_on_commit=False): +def get_session(autocommit=True, expire_on_commit=False, use_slave=False): """Helper method to grab session.""" facade = _create_facade_lazily() return facade.get_session(autocommit=autocommit, - expire_on_commit=expire_on_commit) + expire_on_commit=expire_on_commit, + use_slave=use_slave) @contextlib.contextmanager From aa57c364defec8519bbed06fbce5aae4b23f147f Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Wed, 22 Apr 2015 16:47:33 +0200 Subject: [PATCH 015/292] Create bridges in ovsdb monitor functional tests The test waits inside for output from 'ovsdb-client monitor Bridge' command but the command makes output only if there is a bridge present in ovsdb. This patch adds an OVSBridgeFixture to setup so every test case has a bridge for sure. Change-Id: I40e1c99ec956c92fa1c7763eb98f7f2ce6fcd226 Closes-Bug: 1447191 --- neutron/tests/functional/agent/linux/test_ovsdb_monitor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py index d1de2068f9d..450a26192e1 100644 --- a/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py +++ b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py @@ -48,6 +48,8 @@ class BaseMonitorTest(linux_base.BaseOVSLinuxTestCase): root_helper=" ".join([functional_base.SUDO_CMD] * 2)) self._check_test_requirements() + # ovsdb-client monitor needs to have a bridge to make any output + self.useFixture(net_helpers.OVSBridgeFixture()) def _check_test_requirements(self): self.check_command(['ovsdb-client', 'list-dbs'], From 9bc812e92fb27b297ccfe960267dcab173aea6c9 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 23 Apr 2015 02:15:06 +0000 Subject: [PATCH 016/292] Updated from global requirements Change-Id: I514c65fac38ef0e534e7401a5f3643b1906adea7 --- requirements.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index 47172ff25a9..de23f381f59 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,13 +12,13 @@ httplib2>=0.7.5 requests>=2.2.0,!=2.4.0 jsonrpclib Jinja2>=2.6 # BSD License (3 clause) -keystonemiddleware>=1.5.0 +keystonemiddleware>=1.5.0,<1.6.0 netaddr>=0.7.12 -python-neutronclient>=2.3.11,<3 +python-neutronclient>=2.3.11,<2.5.0 retrying>=1.2.3,!=1.3.0 # Apache-2.0 SQLAlchemy>=0.9.7,<=0.9.99 WebOb>=1.2.3 -python-keystoneclient>=1.1.0 +python-keystoneclient>=1.1.0,<1.4.0 alembic>=0.7.2 six>=1.9.0 stevedore>=1.3.0,<1.4.0 # Apache-2.0 @@ -34,4 +34,4 @@ oslo.rootwrap>=1.6.0,<1.7.0 # Apache-2.0 oslo.serialization>=1.4.0,<1.5.0 # Apache-2.0 oslo.utils>=1.4.0,<1.5.0 # Apache-2.0 -python-novaclient>=2.22.0 +python-novaclient>=2.22.0,<2.24.0 From cbfb3e487d97998ec49d7faa751bc26202da7d0e Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Mon, 20 Apr 2015 22:26:22 -0700 Subject: [PATCH 017/292] Only update MTU in update code for MTU The ML2 create_network_db was re-passing in the entire network with extensions like vlan_transparency present that was causing issues in the base update function it was calling. This corrects the behavior by having it only update the MTU, which is the only thing it was intending to update in the first place. Change-Id: I723c5c138e0830de98f6024c7635ec65065e9346 Closes-Bug: #1446784 (cherry picked from commit f85de393c469d1e649a1c1e5ee1b683246442351) --- neutron/plugins/ml2/plugin.py | 2 +- neutron/tests/unit/plugins/ml2/test_plugin.py | 22 +++++++++++++++++++ 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 3f734d9e315..a1caff03576 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -598,7 +598,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, if net_data.get(api.MTU, 0) > 0: res = super(Ml2Plugin, self).update_network(context, - result['id'], network) + result['id'], {'network': {api.MTU: net_data[api.MTU]}}) result[api.MTU] = res.get(api.MTU, 0) return result, mech_context diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index e9cd744064d..96817683af6 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -266,6 +266,28 @@ class TestMl2NetworksV2(test_plugin.TestNetworksV2, self.assertEqual(db_api.MAX_RETRIES + 1, f.call_count) +class TestMl2NetworksWithVlanTransparencyAndMTU(TestMl2NetworksV2): + def setUp(self, plugin=None): + config.cfg.CONF.set_override('path_mtu', 1000, group='ml2') + config.cfg.CONF.set_override('segment_mtu', 1000, group='ml2') + config.cfg.CONF.set_override('advertise_mtu', True) + config.cfg.CONF.set_override('vlan_transparent', True) + super(TestMl2NetworksWithVlanTransparencyAndMTU, self).setUp(plugin) + + def test_create_network_vlan_transparent_and_mtu(self): + data = {'network': {'name': 'net1', + mpnet.SEGMENTS: + [{pnet.NETWORK_TYPE: 'vlan', + pnet.PHYSICAL_NETWORK: 'physnet1'}], + 'tenant_id': 'tenant_one'}} + network_req = self.new_create_request('networks', data) + res = network_req.get_response(self.api) + self.assertEqual(201, res.status_int) + network = self.deserialize(self.fmt, res)['network'] + self.assertEqual(network['mtu'], 1000) + self.assertIn('vlan_transparent', network) + + class TestMl2SubnetsV2(test_plugin.TestSubnetsV2, Ml2PluginV2TestCase): pass From 0536ec113bc438265ba547bb8a8006aa96e646e3 Mon Sep 17 00:00:00 2001 From: "watanabe.isao" Date: Wed, 15 Apr 2015 15:48:08 +0900 Subject: [PATCH 018/292] Restrict subnet create/update to avoid DHCP resync As we know, IPs in subnet CIDR are used for 1) Broadcast port 2) Gateway port 3) DHCP port if enable_dhcp is True, or update to True 4) Others go into allocation_pools Above 1) to 3) are created by default, which means if CIDR doesn't have that much of IPs, subnet create/update will cause a DHCP resync. This fix is to add some restricts to the issue: A) When subnet create, if enable_dhcp is True, /31 and /32 cidrs are forbidden for IPv4 subnets while /127 and /128 cidrs are forbidden for IPv6 subnets. B) When subnet update, if enable_dhcp is changing to True and there are no more IPs in allocation_pools, the request should be denied. Change-Id: I2e4a4d5841b9ad908f02b7d0795cba07596c023d Co-authored-by: Andrew Boik Closes-Bug: #1443798 (cherry picked from commit 0c1f96ad5a6606c1205bd50ea944c3a383892cde) --- neutron/db/db_base_plugin_v2.py | 24 +++++++++ .../tests/unit/db/test_db_base_plugin_v2.py | 52 ++++++++++++++++++- 2 files changed, 74 insertions(+), 2 deletions(-) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index dcf7adc6f6f..24c251340b7 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -1052,6 +1052,30 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, if attributes.is_attr_set(s.get('cidr')): self._validate_ip_version(ip_ver, s['cidr'], 'cidr') + # TODO(watanabe.isao): After we found a way to avoid the re-sync + # from the agent side, this restriction could be removed. + if cur_subnet: + dhcp_was_enabled = cur_subnet.enable_dhcp + else: + dhcp_was_enabled = False + if s.get('enable_dhcp') and not dhcp_was_enabled: + subnet_prefixlen = netaddr.IPNetwork(s['cidr']).prefixlen + error_message = _("Subnet has a prefix length that is " + "incompatible with DHCP service enabled.") + if ((ip_ver == 4 and subnet_prefixlen > 30) or + (ip_ver == 6 and subnet_prefixlen > 126)): + raise n_exc.InvalidInput(error_message=error_message) + else: + # NOTE(watanabe.isao): The following restriction is necessary + # only when updating subnet. + if cur_subnet: + range_qry = context.session.query(models_v2. + IPAvailabilityRange).join(models_v2.IPAllocationPool) + ip_range = range_qry.filter_by(subnet_id=s['id']).first() + if not ip_range: + raise n_exc.IpAddressGenerationFailure( + net_id=cur_subnet.network_id) + if attributes.is_attr_set(s.get('gateway_ip')): self._validate_ip_version(ip_ver, s['gateway_ip'], 'gateway_ip') if (cfg.CONF.force_gateway_on_subnet and diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index e8ef97e8fdb..0804e9047d9 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -1339,7 +1339,7 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s data['port']['fixed_ips']) def test_no_more_port_exception(self): - with self.subnet(cidr='10.0.0.0/32', gateway_ip=None) as subnet: + with self.subnet(cidr='10.0.0.0/32', enable_dhcp=False) as subnet: id = subnet['subnet']['network_id'] res = self._create_port(self.fmt, id) data = self.deserialize(self.fmt, res) @@ -3223,7 +3223,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.subnet(cidr='14.129.122.5/22'), self.subnet(cidr='15.129.122.5/24'), self.subnet(cidr='16.129.122.5/28'), - self.subnet(cidr='17.129.122.5/32', gateway_ip=None) + self.subnet(cidr='17.129.122.5/32', enable_dhcp=False) ) as subs: # the API should accept and correct these for users self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8') @@ -3235,6 +3235,24 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.assertEqual(subs[6]['subnet']['cidr'], '16.129.122.0/28') self.assertEqual(subs[7]['subnet']['cidr'], '17.129.122.5/32') + def _test_create_subnet_with_invalid_netmask_returns_400(self, *args): + with self.network() as network: + for cidr in args: + ip_version = netaddr.IPNetwork(cidr).version + self._create_subnet(self.fmt, + network['network']['id'], + cidr, + webob.exc.HTTPClientError.code, + ip_version=ip_version) + + def test_create_subnet_with_invalid_netmask_returns_400_ipv4(self): + self._test_create_subnet_with_invalid_netmask_returns_400( + '10.0.0.0/31', '10.0.0.0/32') + + def test_create_subnet_with_invalid_netmask_returns_400_ipv6(self): + self._test_create_subnet_with_invalid_netmask_returns_400( + 'cafe:cafe::/127', 'cafe:cafe::/128') + def test_create_subnet_bad_ip_version(self): with self.network() as network: # Check bad IP version @@ -4153,6 +4171,36 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.assertEqual(res.status_int, webob.exc.HTTPClientError.code) + def _test_subnet_update_enable_dhcp_no_ip_available_returns_409( + self, allocation_pools, cidr): + ip_version = netaddr.IPNetwork(cidr).version + with self.network() as network: + with self.subnet(network=network, + allocation_pools=allocation_pools, + enable_dhcp=False, + cidr=cidr, + ip_version=ip_version) as subnet: + id = subnet['subnet']['network_id'] + self._create_port(self.fmt, id) + data = {'subnet': {'enable_dhcp': True}} + req = self.new_update_request('subnets', data, + subnet['subnet']['id']) + res = req.get_response(self.api) + self.assertEqual(res.status_int, + webob.exc.HTTPConflict.code) + + def test_subnet_update_enable_dhcp_no_ip_available_returns_409_ipv4(self): + allocation_pools = [{'start': '10.0.0.2', 'end': '10.0.0.2'}] + cidr = '10.0.0.0/30' + self._test_subnet_update_enable_dhcp_no_ip_available_returns_409( + allocation_pools, cidr) + + def test_subnet_update_enable_dhcp_no_ip_available_returns_409_ipv6(self): + allocation_pools = [{'start': '2001:db8::2', 'end': '2001:db8::2'}] + cidr = '2001:db8::/126' + self._test_subnet_update_enable_dhcp_no_ip_available_returns_409( + allocation_pools, cidr) + def test_show_subnet(self): with self.network() as network: with self.subnet(network=network) as subnet: From 53b3e751f3c7b32bed48c14742d3dd3a1178d00d Mon Sep 17 00:00:00 2001 From: Maru Newby Date: Thu, 9 Apr 2015 17:00:57 +0000 Subject: [PATCH 019/292] Double functional testing timeout to 180s The increase in ovs testing is resulting in job failure due to timeouts in test_killed_monitor_respawns. Giving the test more time to complete should reduce the failure rate. Change-Id: I2ba9b1eb388bfbbebbd6b0f3edb6d5a5ae0bfead Closes-Bug: #1442272 (cherry picked from commit 81098620c298394e1a98127ceeba7f297db2d906) --- tox.ini | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index 32cecacf8f1..6a9cc8ffb8f 100644 --- a/tox.ini +++ b/tox.ini @@ -32,7 +32,7 @@ setenv = OS_TEST_PATH=./neutron/tests/api [testenv:functional] setenv = OS_TEST_PATH=./neutron/tests/functional - OS_TEST_TIMEOUT=90 + OS_TEST_TIMEOUT=180 deps = {[testenv]deps} -r{toxinidir}/neutron/tests/functional/requirements.txt @@ -43,14 +43,14 @@ setenv = OS_TEST_PATH=./neutron/tests/functional OS_ROOTWRAP_CMD=sudo {envbindir}/neutron-rootwrap {envdir}/etc/neutron/rootwrap.conf OS_ROOTWRAP_DAEMON_CMD=sudo {envbindir}/neutron-rootwrap-daemon {envdir}/etc/neutron/rootwrap.conf OS_FAIL_ON_MISSING_DEPS=1 - OS_TEST_TIMEOUT=90 + OS_TEST_TIMEOUT=180 sitepackages=True deps = {[testenv:functional]deps} [testenv:fullstack] setenv = OS_TEST_PATH=./neutron/tests/fullstack - OS_TEST_TIMEOUT=90 + OS_TEST_TIMEOUT=180 deps = {[testenv]deps} -r{toxinidir}/neutron/tests/functional/requirements.txt @@ -61,7 +61,7 @@ setenv = OS_TEST_PATH=./neutron/tests/fullstack OS_ROOTWRAP_CMD=sudo {envbindir}/neutron-rootwrap {envdir}/etc/neutron/rootwrap.conf OS_ROOTWRAP_DAEMON_CMD=sudo {envbindir}/neutron-rootwrap-daemon {envdir}/etc/neutron/rootwrap.conf OS_FAIL_ON_MISSING_DEPS=1 - OS_TEST_TIMEOUT=90 + OS_TEST_TIMEOUT=180 sitepackages=True deps = {[testenv:functional]deps} From 38211ae67cb76ade85b08c028b6e88bfc867afc9 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Mon, 20 Apr 2015 17:06:38 +0200 Subject: [PATCH 020/292] tests: confirm that _output_hosts_file does not log too often I3ad7864eeb2f959549ed356a1e34fa18804395cc didn't include any regression unit tests to validate that the method won't ever log too often again, reintroducing performance drop in later patches. It didn't play well with stable backports of the fix, where context was lost when doing the backport, that left the bug unfixed in stable/juno even though the patch was merged there [1]. The patch adds an explicit note in the code that suggests not to add new log messages inside the loop to avoid regression, and a unit test was added to capture it. Once the test is merged in master, it will be proposed for stable/juno inclusion, with additional changes that would fix the regression again. Related-Bug: #1414218 Change-Id: I5d43021932d6a994638c348eda277dd8337cf041 (cherry picked from commit 3b74095a935f6d2027e6bf04cc4aa21f8a1b46f2) --- neutron/agent/linux/dhcp.py | 2 ++ neutron/tests/unit/agent/linux/test_dhcp.py | 10 ++++++++++ 2 files changed, 12 insertions(+) diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index 55509cb84b2..f594b775c0f 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -483,6 +483,8 @@ class Dnsmasq(DhcpLocalProcess): LOG.debug('Building host file: %s', filename) dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets if s.enable_dhcp] + # NOTE(ihrachyshka): the loop should not log anything inside it, to + # avoid potential performance drop when lots of hosts are dumped for (port, alloc, hostname, name) in self._iter_hosts(): if not alloc: if getattr(port, 'extra_dhcp_opts', False): diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py index 09a91a7caa5..fca35c1bb8b 100644 --- a/neutron/tests/unit/agent/linux/test_dhcp.py +++ b/neutron/tests/unit/agent/linux/test_dhcp.py @@ -1403,6 +1403,16 @@ class TestDnsmasq(TestBase): 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'], sorted(result)) + def test__output_hosts_file_log_only_twice(self): + dm = self._get_dnsmasq(FakeDualStackNetworkSingleDHCP()) + with mock.patch.object(dhcp.LOG, 'process') as process: + process.return_value = ('fake_message', {}) + dm._output_hosts_file() + # The method logs twice, at the start of and the end. There should be + # no other logs, no matter how many hosts there are to dump in the + # file. + self.assertEqual(2, process.call_count) + def test_only_populates_dhcp_enabled_subnets(self): exp_host_name = '/dhcp/eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee/host' exp_host_data = ('00:00:80:aa:bb:cc,host-192-168-0-2.openstacklocal,' From d37e566dcadf8a540eb5f84b668847fa192393a1 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Fri, 24 Apr 2015 00:35:31 -0700 Subject: [PATCH 021/292] Don't resync on DHCP agent setup failure There are various cases where the DHCP agent will try to create a DHCP port for a network and there will be a failure. This has primarily been caused by a lack of available IP addresses in the allocation pool. Trying to fix all availability corner cases on the server side will be very difficult due to race conditions between multiple ports being created, the dhcp_agents_per_network parameter, etc. This patch just stops the resync attempt on the agent side if a failure is caused by an IP address generation problem. Future updates to the subnet will cause another attempt so if the tenant does fix the issue they will get DHCP service. Change-Id: I0896730126d6dca13fe9284b4d812cfb081b6218 Closes-Bug: #1447883 (cherry picked from commit db9ac7e0110a0c2ef1b65213317ee8b7f1053ddc) --- neutron/agent/dhcp/agent.py | 7 ++++++- neutron/tests/unit/agent/dhcp/test_agent.py | 5 +++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index 214bfdff14d..c11c1f24ee5 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -120,7 +120,12 @@ class DhcpAgent(manager.Manager): 'still exist.'), {'net_id': network.id, 'action': action}) except Exception as e: - self.schedule_resync(e, network.id) + if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure': + # Don't resync if port could not be created because of an IP + # allocation failure. When the subnet is updated with a new + # allocation pool or a port is deleted to free up an IP, this + # will automatically be retried on the notification + self.schedule_resync(e, network.id) if (isinstance(e, oslo_messaging.RemoteError) and e.exc_type == 'NetworkNotFound' or isinstance(e, exceptions.NetworkNotFound)): diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index c618658f7c5..5aca05ffca4 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -300,6 +300,11 @@ class TestDhcpAgent(base.BaseTestCase): self.assertEqual(log.call_count, 1) self.assertEqual(expected_sync, schedule_resync.called) + def test_call_driver_ip_address_generation_failure(self): + error = oslo_messaging.RemoteError( + exc_type='IpAddressGenerationFailure') + self._test_call_driver_failure(exc=error, expected_sync=False) + def test_call_driver_failure(self): self._test_call_driver_failure() From 4e71c48bbfd5e8b8c59f0c45ade52ba8eddc8b63 Mon Sep 17 00:00:00 2001 From: rossella Date: Thu, 15 Jan 2015 16:15:23 +0100 Subject: [PATCH 022/292] Add devices to update in RPC call security_groups_provider_updated When a security_groups_provider_updated is received then a global refresh of the firewall is performed. This can be avoided if the plugins pass as parameter of the call the devices that belongs to the network updated. Partially-Implements: blueprint restructure-l2-agent Change-Id: I1e78f3a5ec7e5c5bcba338a0097566422411ef7e --- neutron/agent/securitygroups_rpc.py | 12 +++++------ .../api/rpc/handlers/securitygroups_rpc.py | 11 ++++++---- neutron/db/securitygroups_rpc_base.py | 17 +++++++++++----- .../agent/linuxbridge_neutron_agent.py | 3 ++- .../openvswitch/agent/ovs_neutron_agent.py | 3 ++- .../unit/agent/test_securitygroups_rpc.py | 20 +++++++++++++------ .../rpc/handlers/test_securitygroups_rpc.py | 2 +- neutron/tests/unit/plugins/ml2/test_plugin.py | 17 ++++++++++++---- 8 files changed, 57 insertions(+), 28 deletions(-) diff --git a/neutron/agent/securitygroups_rpc.py b/neutron/agent/securitygroups_rpc.py index 5b24dbe3af2..a7dc73123b9 100644 --- a/neutron/agent/securitygroups_rpc.py +++ b/neutron/agent/securitygroups_rpc.py @@ -202,15 +202,15 @@ class SecurityGroupAgentRpc(object): else: self.refresh_firewall(devices) - def security_groups_provider_updated(self): + def security_groups_provider_updated(self, devices_to_update): LOG.info(_LI("Provider rule updated")) if self.defer_refresh_firewall: - # NOTE(salv-orlando): A 'global refresh' might not be - # necessary if the subnet for which the provider rules - # were updated is known - self.global_refresh_firewall = True + if devices_to_update is None: + self.global_refresh_firewall = True + else: + self.devices_to_refilter |= set(devices_to_update) else: - self.refresh_firewall() + self.refresh_firewall(devices_to_update) def remove_devices_filter(self, device_ids): if not device_ids: diff --git a/neutron/api/rpc/handlers/securitygroups_rpc.py b/neutron/api/rpc/handlers/securitygroups_rpc.py index 58d9c7d3dcd..24e268065ce 100644 --- a/neutron/api/rpc/handlers/securitygroups_rpc.py +++ b/neutron/api/rpc/handlers/securitygroups_rpc.py @@ -153,12 +153,14 @@ class SecurityGroupAgentRpcApiMixin(object): cctxt.cast(context, 'security_groups_member_updated', security_groups=security_groups) - def security_groups_provider_updated(self, context): + def security_groups_provider_updated(self, context, + devices_to_update=None): """Notify provider updated security groups.""" - cctxt = self.client.prepare(version=self.SG_RPC_VERSION, + cctxt = self.client.prepare(version=1.3, topic=self._get_security_group_topic(), fanout=True) - cctxt.cast(context, 'security_groups_provider_updated') + cctxt.cast(context, 'security_groups_provider_updated', + devices_to_update=devices_to_update) class SecurityGroupAgentRpcCallbackMixin(object): @@ -205,6 +207,7 @@ class SecurityGroupAgentRpcCallbackMixin(object): def security_groups_provider_updated(self, context, **kwargs): """Callback for security group provider update.""" LOG.debug("Provider rule updated") + devices_to_update = kwargs.get('devices_to_update') if not self.sg_agent: return self._security_groups_agent_not_set() - self.sg_agent.security_groups_provider_updated() + self.sg_agent.security_groups_provider_updated(devices_to_update) diff --git a/neutron/db/securitygroups_rpc_base.py b/neutron/db/securitygroups_rpc_base.py index c47493599e1..9d448a9b96d 100644 --- a/neutron/db/securitygroups_rpc_base.py +++ b/neutron/db/securitygroups_rpc_base.py @@ -147,22 +147,29 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): occurs and the plugin agent fetches the update provider rule in the other RPC call (security_group_rules_for_devices). """ - security_groups_provider_updated = False + sg_provider_updated_networks = set() sec_groups = set() for port in ports: if port['device_owner'] == q_const.DEVICE_OWNER_DHCP: - security_groups_provider_updated = True + sg_provider_updated_networks.add( + port['network_id']) # For IPv6, provider rule need to be updated in case router # interface is created or updated after VM port is created. elif port['device_owner'] == q_const.DEVICE_OWNER_ROUTER_INTF: if any(netaddr.IPAddress(fixed_ip['ip_address']).version == 6 for fixed_ip in port['fixed_ips']): - security_groups_provider_updated = True + sg_provider_updated_networks.add( + port['network_id']) else: sec_groups |= set(port.get(ext_sg.SECURITYGROUPS)) - if security_groups_provider_updated: - self.notifier.security_groups_provider_updated(context) + if sg_provider_updated_networks: + ports_query = context.session.query(models_v2.Port.id).filter( + models_v2.Port.network_id.in_( + sg_provider_updated_networks)).all() + ports_to_update = [p.id for p in ports_query] + self.notifier.security_groups_provider_updated( + context, ports_to_update) if sec_groups: self.notifier.security_groups_member_updated( context, list(sec_groups)) diff --git a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py index 7b1f2861500..24f94d035e1 100644 --- a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -637,7 +637,8 @@ class LinuxBridgeRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # Set RPC API version to 1.0 by default. # history # 1.1 Support Security Group RPC - target = oslo_messaging.Target(version='1.1') + # 1.3 Added param devices_to_update to security_groups_provider_updated + target = oslo_messaging.Target(version='1.3') def __init__(self, context, agent, sg_agent): super(LinuxBridgeRpcCallbacks, self).__init__() diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index 9e510c70b36..8185d4d88de 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -117,7 +117,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # 1.0 Initial version # 1.1 Support Security Group RPC # 1.2 Support DVR (Distributed Virtual Router) RPC - target = oslo_messaging.Target(version='1.2') + # 1.3 Added param devices_to_update to security_groups_provider_updated + target = oslo_messaging.Target(version='1.3') def __init__(self, integ_br, tun_br, local_ip, bridge_mappings, polling_interval, tunnel_types=None, diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index 8dd6c90b0a6..0493540789f 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -1186,9 +1186,9 @@ class SecurityGroupAgentRpcTestCase(BaseSecurityGroupAgentRpcTestCase): def test_security_groups_provider_updated(self): self.agent.refresh_firewall = mock.Mock() - self.agent.security_groups_provider_updated() + self.agent.security_groups_provider_updated(None) self.agent.refresh_firewall.assert_has_calls( - [mock.call.refresh_firewall()]) + [mock.call.refresh_firewall(None)]) def test_refresh_firewall(self): self.agent.prepare_devices_filter(['fake_port_id']) @@ -1304,9 +1304,9 @@ class SecurityGroupAgentEnhancedRpcTestCase( def test_security_groups_provider_updated_enhanced_rpc(self): self.agent.refresh_firewall = mock.Mock() - self.agent.security_groups_provider_updated() + self.agent.security_groups_provider_updated(None) self.agent.refresh_firewall.assert_has_calls( - [mock.call.refresh_firewall()]) + [mock.call.refresh_firewall(None)]) def test_refresh_firewall_enhanced_rpc(self): self.agent.prepare_devices_filter(['fake_port_id']) @@ -1438,9 +1438,16 @@ class SecurityGroupAgentRpcWithDeferredRefreshTestCase( self.assertIn('fake_device_2', self.agent.devices_to_refilter) def test_security_groups_provider_updated(self): - self.agent.security_groups_provider_updated() + self.agent.security_groups_provider_updated(None) self.assertTrue(self.agent.global_refresh_firewall) + def test_security_groups_provider_updated_devices_specified(self): + self.agent.security_groups_provider_updated( + ['fake_device_1', 'fake_device_2']) + self.assertFalse(self.agent.global_refresh_firewall) + self.assertIn('fake_device_1', self.agent.devices_to_refilter) + self.assertIn('fake_device_2', self.agent.devices_to_refilter) + def test_setup_port_filters_new_ports_only(self): self.agent.prepare_devices_filter = mock.Mock() self.agent.refresh_firewall = mock.Mock() @@ -1593,7 +1600,8 @@ class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase): def test_security_groups_provider_updated(self): self.notifier.security_groups_provider_updated(None) self.mock_cast.assert_has_calls( - [mock.call(None, 'security_groups_provider_updated')]) + [mock.call(None, 'security_groups_provider_updated', + devices_to_update=None)]) def test_security_groups_rule_updated(self): self.notifier.security_groups_rule_updated( diff --git a/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py index 7c8b79f67d1..a3bc79cf1d2 100644 --- a/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py +++ b/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py @@ -61,4 +61,4 @@ class SGAgentRpcCallBackMixinTestCase(base.BaseTestCase): def test_security_groups_provider_updated(self): self.rpc.security_groups_provider_updated(None) self.rpc.sg_agent.assert_has_calls( - [mock.call.security_groups_provider_updated()]) + [mock.call.security_groups_provider_updated(None)]) diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index cc51029fdfa..f20a2202462 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -468,6 +468,15 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): m_upd.assert_called_once_with(ctx, used_sg) self.assertFalse(p_upd.called) + def _check_security_groups_provider_updated_args(self, p_upd_mock, net_id): + query_params = "network_id=%s" % net_id + network_ports = self._list('ports', query_params=query_params) + network_ports_ids = [port['id'] for port in network_ports['ports']] + self.assertTrue(p_upd_mock.called) + p_upd_args = p_upd_mock.call_args + ports_ids = p_upd_args[0][1] + self.assertEqual(sorted(network_ports_ids), sorted(ports_ids)) + def test_create_ports_bulk_with_sec_grp_member_provider_update(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() @@ -496,15 +505,14 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): ports = self.deserialize(self.fmt, res) used_sg = ports['ports'][0]['security_groups'] m_upd.assert_called_once_with(ctx, used_sg) - p_upd.assert_called_once_with(ctx) - + self._check_security_groups_provider_updated_args(p_upd, net_id) m_upd.reset_mock() p_upd.reset_mock() data[0]['device_owner'] = constants.DEVICE_OWNER_DHCP self._create_bulk_from_list(self.fmt, 'port', data, context=ctx) self.assertFalse(m_upd.called) - p_upd.assert_called_once_with(ctx) + self._check_security_groups_provider_updated_args(p_upd, net_id) def test_create_ports_bulk_with_sec_grp_provider_update_ipv6(self): ctx = context.get_admin_context() @@ -532,7 +540,8 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): self._create_bulk_from_list(self.fmt, 'port', data, context=ctx) self.assertFalse(m_upd.called) - p_upd.assert_called_once_with(ctx) + self._check_security_groups_provider_updated_args( + p_upd, net_id) def test_delete_port_no_notify_in_disassociate_floatingips(self): ctx = context.get_admin_context() From 7260e0e3fc2ea479e80e0962624aca7fd38a1f60 Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Mon, 27 Apr 2015 09:59:21 -0400 Subject: [PATCH 023/292] Run radvd as root During the refactoring of external process management radvd lost its root privileges. Closes-bug: 1448813 Change-Id: I84883fe81684afafac9b024282a03f447c8f825a (cherry picked from commit a5e54338770fc074e01fa88dbf909ee1af1b66b2) --- neutron/agent/linux/ra.py | 3 ++- neutron/tests/unit/agent/l3/test_agent.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/neutron/agent/linux/ra.py b/neutron/agent/linux/ra.py index 7f800c26961..d9eca8d6475 100644 --- a/neutron/agent/linux/ra.py +++ b/neutron/agent/linux/ra.py @@ -103,7 +103,8 @@ class DaemonMonitor(object): default_cmd_callback=callback, namespace=self._router_ns, service=RADVD_SERVICE_NAME, - conf=cfg.CONF) + conf=cfg.CONF, + run_as_root=True) def _spawn_radvd(self, radvd_conf): def callback(pid_file): diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index 4c6682bd8aa..a5016f815f9 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -1399,7 +1399,8 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): service=process, default_cmd_callback=mock.ANY, namespace=ri.ns_name, - conf=mock.ANY)] + conf=mock.ANY, + run_as_root=True)] def _process_router_ipv6_subnet_added( self, router, ipv6_subnet_modes=None): From a377b4fc95f58749178b881d0104ecfaa0b26714 Mon Sep 17 00:00:00 2001 From: Yalei Wang Date: Tue, 21 Apr 2015 15:17:13 +0800 Subject: [PATCH 024/292] Fix port creation verification of the port-security extension When port is created, we should check the content of the security-group and address-pairs like we do when port updated. This patch also updates address-pairs testing unskipping some port-security-related tests. Change-Id: Ia27881a34ff99cad34c84764d2bf8a6cdf77af9c Closes-Bug: #1446087 --- neutron/plugins/ml2/plugin.py | 5 ++--- .../tests/unit/db/test_allowedaddresspairs_db.py | 12 ++++++++++++ .../unit/plugins/ml2/test_ext_portsecurity.py | 16 ++++++++++++++++ neutron/tests/unit/plugins/ml2/test_plugin.py | 5 +++++ 4 files changed, 35 insertions(+), 3 deletions(-) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index c668454ac8e..726fcdf8e9c 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -63,7 +63,6 @@ from neutron.extensions import extra_dhcp_opt as edo_ext from neutron.extensions import portbindings from neutron.extensions import portsecurity as psec from neutron.extensions import providernet as provider -from neutron.extensions import securitygroup as ext_sg from neutron.extensions import vlantransparent from neutron.i18n import _LE, _LI, _LW from neutron import manager @@ -946,7 +945,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, port_data[psec.PORTSECURITY]) # allowed address pair checks - if attributes.is_attr_set(attrs.get(addr_pair.ADDRESS_PAIRS)): + if self._check_update_has_allowed_address_pairs(port): if not port_security: raise addr_pair.AddressPairAndPortSecurityRequired() else: @@ -955,7 +954,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, if port_security: self._ensure_default_security_group_on_port(context, port) - elif attributes.is_attr_set(attrs.get(ext_sg.SECURITYGROUPS)): + elif self._check_update_has_security_groups(port): raise psec.PortSecurityAndIPRequiredForSecurityGroups() def _create_port_db(self, context, port): diff --git a/neutron/tests/unit/db/test_allowedaddresspairs_db.py b/neutron/tests/unit/db/test_allowedaddresspairs_db.py index 6185d2e1880..2af639f7089 100644 --- a/neutron/tests/unit/db/test_allowedaddresspairs_db.py +++ b/neutron/tests/unit/db/test_allowedaddresspairs_db.py @@ -142,6 +142,18 @@ class TestAllowedAddressPairs(AllowedAddressPairDBTestCase): self.deserialize(self.fmt, res) self.assertEqual(res.status_int, 409) + address_pairs = [] + res = self._create_port(self.fmt, net['network']['id'], + arg_list=('port_security_enabled', + addr_pair.ADDRESS_PAIRS,), + port_security_enabled=False, + allowed_address_pairs=address_pairs) + port = self.deserialize(self.fmt, res) + self.assertFalse(port['port'][psec.PORTSECURITY]) + self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], + address_pairs) + self._delete('ports', port['port']['id']) + def test_create_port_bad_mac(self): address_pairs = [{'mac_address': 'invalid_mac', 'ip_address': '10.0.0.1'}] diff --git a/neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py b/neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py index 6180ff10e86..0def93842e3 100644 --- a/neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py +++ b/neutron/tests/unit/plugins/ml2/test_ext_portsecurity.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron.extensions import portsecurity as psec from neutron.plugins.ml2 import config from neutron.tests.unit.extensions import test_portsecurity as test_psec from neutron.tests.unit.plugins.ml2 import test_plugin @@ -27,3 +28,18 @@ class PSExtDriverTestCase(test_plugin.Ml2PluginV2TestCase, self._extension_drivers, group='ml2') super(PSExtDriverTestCase, self).setUp() + + def test_create_port_with_secgroup_none_and_port_security_false(self): + if self._skip_security_group: + self.skipTest("Plugin does not support security groups") + with self.network() as net: + with self.subnet(network=net): + res = self._create_port('json', net['network']['id'], + arg_list=('security_groups', + 'port_security_enabled'), + security_groups=[], + port_security_enabled=False) + self.assertEqual(res.status_int, 201) + port = self.deserialize('json', res) + self.assertFalse(port['port'][psec.PORTSECURITY]) + self.assertEqual(port['port']['security_groups'], []) diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index cc51029fdfa..aa6bde45849 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -1133,7 +1133,12 @@ class TestMultiSegmentNetworks(Ml2PluginV2TestCase): class TestMl2AllowedAddressPairs(Ml2PluginV2TestCase, test_pair.TestAllowedAddressPairs): + _extension_drivers = ['port_security'] + def setUp(self, plugin=None): + config.cfg.CONF.set_override('extension_drivers', + self._extension_drivers, + group='ml2') super(test_pair.TestAllowedAddressPairs, self).setUp( plugin=PLUGIN_NAME) From b85cfa96118d73ad87b150e488f295cbf9a2c140 Mon Sep 17 00:00:00 2001 From: Darragh O'Reilly Date: Fri, 27 Feb 2015 08:23:24 +0000 Subject: [PATCH 025/292] Fix dhcp _test_sync_state_helper asserting calls wrong It was using a non-existing method that did nothing and that masked other problems with the tests that used it. Changed to use assert_has_calls() and fixed the problems that fell out. Change-Id: I4a64c3365f9958b14c2384932a31da2ce191e2e2 Closes-Bug: 1426265 --- neutron/hacking/checks.py | 10 +++++- neutron/tests/unit/agent/dhcp/test_agent.py | 36 +++++++-------------- neutron/tests/unit/hacking/test_checks.py | 16 +++++++++ 3 files changed, 37 insertions(+), 25 deletions(-) diff --git a/neutron/hacking/checks.py b/neutron/hacking/checks.py index 392f09d1526..0c925cc39e1 100644 --- a/neutron/hacking/checks.py +++ b/neutron/hacking/checks.py @@ -107,14 +107,22 @@ def check_assert_called_once_with(logical_line, filename): # Try to detect unintended calls of nonexistent mock methods like: # assert_called_once # assertCalledOnceWith + # assert_has_called if 'neutron/tests/' in filename: if '.assert_called_once_with(' in logical_line: return - if '.assertcalledonce' in logical_line.lower().replace('_', ''): + uncased_line = logical_line.lower().replace('_', '') + + if '.assertcalledonce' in uncased_line: msg = ("N322: Possible use of no-op mock method. " "please use assert_called_once_with.") yield (0, msg) + if '.asserthascalled' in uncased_line: + msg = ("N322: Possible use of no-op mock method. " + "please use assert_has_calls.") + yield (0, msg) + def check_oslo_namespace_imports(logical_line): if re.match(oslo_namespace_imports_from_dot, logical_line): diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index 24c0d10564c..7a8c138b023 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -330,7 +330,9 @@ class TestDhcpAgent(base.BaseTestCase): trace_level='warning', expected_sync=False) - def _test_sync_state_helper(self, known_networks, active_networks): + def _test_sync_state_helper(self, known_net_ids, active_net_ids): + active_networks = set(mock.Mock(id=netid) for netid in active_net_ids) + with mock.patch(DHCP_PLUGIN) as plug: mock_plugin = mock.Mock() mock_plugin.get_active_networks_info.return_value = active_networks @@ -338,23 +340,18 @@ class TestDhcpAgent(base.BaseTestCase): dhcp = dhcp_agent.DhcpAgent(HOSTNAME) - attrs_to_mock = dict( - [(a, mock.DEFAULT) for a in - ['refresh_dhcp_helper', 'disable_dhcp_helper', 'cache']]) + attrs_to_mock = dict([(a, mock.DEFAULT) + for a in ['disable_dhcp_helper', 'cache', + 'safe_configure_dhcp_for_network']]) with mock.patch.multiple(dhcp, **attrs_to_mock) as mocks: - mocks['cache'].get_network_ids.return_value = known_networks + mocks['cache'].get_network_ids.return_value = known_net_ids dhcp.sync_state() - exp_refresh = [ - mock.call(net_id) for net_id in active_networks] - - diff = set(known_networks) - set(active_networks) + diff = set(known_net_ids) - set(active_net_ids) exp_disable = [mock.call(net_id) for net_id in diff] - mocks['cache'].assert_has_calls([mock.call.get_network_ids()]) - mocks['refresh_dhcp_helper'].assert_has_called(exp_refresh) - mocks['disable_dhcp_helper'].assert_has_called(exp_disable) + mocks['disable_dhcp_helper'].assert_has_calls(exp_disable) def test_sync_state_initial(self): self._test_sync_state_helper([], ['a']) @@ -366,19 +363,10 @@ class TestDhcpAgent(base.BaseTestCase): self._test_sync_state_helper(['b'], ['a']) def test_sync_state_waitall(self): - class mockNetwork(object): - id = '0' - admin_state_up = True - subnets = [] - - def __init__(self, id): - self.id = id with mock.patch.object(dhcp_agent.eventlet.GreenPool, 'waitall') as w: - active_networks = [mockNetwork('1'), mockNetwork('2'), - mockNetwork('3'), mockNetwork('4'), - mockNetwork('5')] - known_networks = ['1', '2', '3', '4', '5'] - self._test_sync_state_helper(known_networks, active_networks) + active_net_ids = ['1', '2', '3', '4', '5'] + known_net_ids = ['1', '2', '3', '4', '5'] + self._test_sync_state_helper(known_net_ids, active_net_ids) w.assert_called_once_with() def test_sync_state_plugin_error(self): diff --git a/neutron/tests/unit/hacking/test_checks.py b/neutron/tests/unit/hacking/test_checks.py index b87ad18bcb6..4ebb4d81525 100644 --- a/neutron/tests/unit/hacking/test_checks.py +++ b/neutron/tests/unit/hacking/test_checks.py @@ -92,11 +92,21 @@ class HackingTestCase(base.BaseTestCase): mock.method(1, 2, 3, test='wow') mock.method.assertCalledOnceWith() """ + fail_code3 = """ + mock = Mock() + mock.method(1, 2, 3, test='wow') + mock.method.assert_has_called() + """ pass_code = """ mock = Mock() mock.method(1, 2, 3, test='wow') mock.method.assert_called_once_with() """ + pass_code2 = """ + mock = Mock() + mock.method(1, 2, 3, test='wow') + mock.method.assert_has_calls() + """ self.assertEqual( 1, len(list(checks.check_assert_called_once_with(fail_code1, "neutron/tests/test_assert.py")))) @@ -106,6 +116,12 @@ class HackingTestCase(base.BaseTestCase): self.assertEqual( 0, len(list(checks.check_assert_called_once_with(pass_code, "neutron/tests/test_assert.py")))) + self.assertEqual( + 1, len(list(checks.check_assert_called_once_with(fail_code3, + "neutron/tests/test_assert.py")))) + self.assertEqual( + 0, len(list(checks.check_assert_called_once_with(pass_code2, + "neutron/tests/test_assert.py")))) def test_check_oslo_namespace_imports(self): def check(s, fail=True): From 5509839e0af89467eb14ee178807e2898202101b Mon Sep 17 00:00:00 2001 From: Yalei Wang Date: Thu, 26 Mar 2015 15:49:13 +0800 Subject: [PATCH 026/292] Add port-security extension API test cases Netron ml2 plugin introduces a new extension port-security from Kilo cycle, this patch add the API test cases for it. It verifies the default value of the attribute, for network and port. And It also verifies the confict between setting port-security and sec-group/address-pairs. Change-Id: Ie0ec090e8fdce7dbdbce14ef47f38e8e57f262d4 Partially Implements: blueprint ml2-ovs-portsecurity Depends-On: I3035317c83d22804855517434bd8578719ce0436 --- .../test_extension_driver_port_security.py | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 neutron/tests/api/test_extension_driver_port_security.py diff --git a/neutron/tests/api/test_extension_driver_port_security.py b/neutron/tests/api/test_extension_driver_port_security.py new file mode 100644 index 00000000000..10ccb224dbb --- /dev/null +++ b/neutron/tests/api/test_extension_driver_port_security.py @@ -0,0 +1,98 @@ +# Copyright 2015 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest_lib.common.utils import data_utils +from tempest_lib import exceptions as lib_exc + +from neutron.tests.api import base_security_groups as base +from neutron.tests.tempest import config +from neutron.tests.tempest import test + + +CONF = config.CONF +FAKE_IP = '10.0.0.1' +FAKE_MAC = '00:25:64:e8:19:dd' + + +class PortSecTest(base.BaseSecGroupTest): + + @classmethod + def resource_setup(cls): + super(PortSecTest, cls).resource_setup() + + def _create_network(self, network_name=None, port_security_enabled=True): + """Wrapper utility that returns a test network.""" + network_name = network_name or data_utils.rand_name('test-network') + + body = self.client.create_network( + name=network_name, port_security_enabled=port_security_enabled) + network = body['network'] + self.networks.append(network) + return network + + @test.attr(type='smoke') + @test.idempotent_id('7c338ddf-e64e-4118-bd33-e49a1f2f1495') + @test.requires_ext(extension='port-security', service='network') + def test_port_sec_default_value(self): + # Default port-sec value is True, and the attr of the port will inherit + # from the port-sec of the network when it not be specified in API + network = self.create_network() + self.create_subnet(network) + self.assertTrue(network['port_security_enabled']) + port = self.create_port(network) + self.assertTrue(port['port_security_enabled']) + + @test.attr(type='smoke') + @test.idempotent_id('e60eafd2-31de-4c38-8106-55447d033b57') + @test.requires_ext(extension='port-security', service='network') + def test_port_sec_specific_value(self): + network = self.create_network() + + self.assertTrue(network['port_security_enabled']) + self.create_subnet(network) + port = self.create_port(network, port_security_enabled=False) + self.assertFalse(port['port_security_enabled']) + + # Create a network with port-sec set to False + network = self._create_network(port_security_enabled=False) + + self.assertFalse(network['port_security_enabled']) + self.create_subnet(network) + port = self.create_port(network, port_security_enabled=True) + self.assertTrue(port['port_security_enabled']) + + @test.attr(type=['negative', 'smoke']) + @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') + @test.requires_ext(extension='port-security', service='network') + def test_port_sec_update_port_failed(self): + network = self.create_network() + self.create_subnet(network) + port = self.create_port(network) + + # Exception when set port-sec to False with sec-group defined + self.assertRaises(lib_exc.Conflict, + self.update_port, port, port_security_enabled=False) + + updated_port = self.update_port( + port, security_groups=[], port_security_enabled=False) + self.assertFalse(updated_port['port_security_enabled']) + + allowed_address_pairs = [{'ip_address': FAKE_IP, + 'mac_address': FAKE_MAC}] + + # Exception when set address-pairs with port-sec is False + self.assertRaises(lib_exc.Conflict, + self.update_port, port, + allowed_address_pairs=allowed_address_pairs) From 8e24c7ef566fac7a41b40a12d3480fbdd0ab252b Mon Sep 17 00:00:00 2001 From: Miguel Angel Ajo Date: Thu, 16 Apr 2015 06:28:38 +0000 Subject: [PATCH 027/292] Reduce prefix and suffix length in ipsets The new NET prefix introduced by I8177699b157cd3eac46e2f481f47b5d966c49b07 increases collision chances by trimming the sg_id by 3 more chars. This patch reduces the prefix to 1 single char and also reduces the swap suffix to reduce the chances of collision. Change-Id: I8a1559e173a05b2297c5cd2efa9fee7627b88a4f Related-Bug: #1439817 Related-Bug: #1444397 --- neutron/agent/linux/ipset_manager.py | 5 +++-- neutron/tests/unit/agent/linux/test_ipset_manager.py | 4 ++-- neutron/tests/unit/agent/test_securitygroups_rpc.py | 10 +++++----- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/neutron/agent/linux/ipset_manager.py b/neutron/agent/linux/ipset_manager.py index f80cba979b2..f0dc7de4ad0 100644 --- a/neutron/agent/linux/ipset_manager.py +++ b/neutron/agent/linux/ipset_manager.py @@ -17,7 +17,8 @@ from neutron.agent.linux import utils as linux_utils from neutron.common import utils IPSET_ADD_BULK_THRESHOLD = 5 -SWAP_SUFFIX = '-new' +NET_PREFIX = 'N' +SWAP_SUFFIX = '-n' IPSET_NAME_MAX_LENGTH = 31 - len(SWAP_SUFFIX) @@ -38,7 +39,7 @@ class IpsetManager(object): """Returns the given ipset name for an id+ethertype pair. This reference can be used from iptables. """ - name = 'NET' + ethertype + id + name = NET_PREFIX + ethertype + id return name[:IPSET_NAME_MAX_LENGTH] def set_exists(self, id, ethertype): diff --git a/neutron/tests/unit/agent/linux/test_ipset_manager.py b/neutron/tests/unit/agent/linux/test_ipset_manager.py index 44840086f60..1e22c304221 100644 --- a/neutron/tests/unit/agent/linux/test_ipset_manager.py +++ b/neutron/tests/unit/agent/linux/test_ipset_manager.py @@ -36,8 +36,8 @@ class BaseIpsetManagerTest(base.BaseTestCase): self.execute.assert_has_calls(self.expected_calls, any_order=False) def expect_set(self, addresses): - temp_input = ['create NETIPv4fake_sgid-new hash:net family inet'] - temp_input.extend('add NETIPv4fake_sgid-new %s' % ip + temp_input = ['create %s hash:net family inet' % TEST_SET_NAME_NEW] + temp_input.extend('add %s %s' % (TEST_SET_NAME_NEW, ip) for ip in addresses) input = '\n'.join(temp_input) self.expected_calls.extend([ diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index 783c08c5c51..fe6474ba90a 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -1775,7 +1775,7 @@ IPSET_FILTER_1 = """# Generated by iptables_manager [0:0] -A %(bn)s-i_port1 -s 10.0.0.2/32 -p udp -m udp --sport 67 --dport 68 \ -j RETURN [0:0] -A %(bn)s-i_port1 -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_port1 -m set --match-set NETIPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_port1 -m set --match-set NIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_port1 -j %(bn)s-sg-fallback [0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_port1 \ @@ -1934,7 +1934,7 @@ IPSET_FILTER_2 = """# Generated by iptables_manager [0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_%(port1)s -m set --match-set NETIPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_%(port1)s -m set --match-set NIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback [0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \ @@ -1962,7 +1962,7 @@ RETURN [0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_%(port2)s -m set --match-set NETIPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_%(port2)s -m set --match-set NIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback [0:0] -A %(bn)s-FORWARD %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \ @@ -2017,7 +2017,7 @@ IPSET_FILTER_2_3 = """# Generated by iptables_manager [0:0] -A %(bn)s-i_%(port1)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_%(port1)s -m set --match-set NETIPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_%(port1)s -m set --match-set NIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_%(port1)s -p icmp -j RETURN [0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback @@ -2046,7 +2046,7 @@ RETURN [0:0] -A %(bn)s-i_%(port2)s -s 10.0.0.2/32 -p udp -m udp --sport 67 \ --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN -[0:0] -A %(bn)s-i_%(port2)s -m set --match-set NETIPv4security_group1 src -j \ +[0:0] -A %(bn)s-i_%(port2)s -m set --match-set NIPv4security_group1 src -j \ RETURN [0:0] -A %(bn)s-i_%(port2)s -p icmp -j RETURN [0:0] -A %(bn)s-i_%(port2)s -j %(bn)s-sg-fallback From 5099c884c6b09ad0979a560648e8806eaa4642f7 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Wed, 6 May 2015 22:49:17 -0700 Subject: [PATCH 028/292] VMware: update supported plugins Add all of the supported plugins to the VMware plugin.py file. The supported plugins are: - NSX MH - NSXv - Simple DVS (this does not support security groups and any layer functionality). Useful for testing of Neutron with VC. Change-Id: I9e5c779127ff44674cb60f3c19d4b5c8bdae6101 --- neutron/plugins/vmware/plugin.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/neutron/plugins/vmware/plugin.py b/neutron/plugins/vmware/plugin.py index c4efeb22001..c841d15594a 100644 --- a/neutron/plugins/vmware/plugin.py +++ b/neutron/plugins/vmware/plugin.py @@ -16,8 +16,12 @@ # from vmware_nsx.neutron.plugins.vmware.plugins import base as nsx_mh +from vmware_nsx.neutron.plugins.vmware.plugins import dvs +from vmware_nsx.neutron.plugins.vmware.plugins import nsx_v NsxMhPlugin = nsx_mh.NsxPluginV2 # The 'NsxPlugin' name will be deprecated in Liberty # and replaced by the 'NsxMhPlugin' name NsxPlugin = NsxMhPlugin +NsxVPlugin = nsx_v.NsxVPluginV2 +NsxDvsPlugin = dvs.NsxDvsV2 From 6d15bf48ee27ceab64e88f81ba6433058313759a Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Sat, 9 May 2015 00:52:29 +0200 Subject: [PATCH 029/292] Remove from BridgeDevice homemade execute in namespace Currently BridgeDevice[1] defines homemade execute with namespace support but could use IPWrapper. This change replaces homemade implementation with IPWrapper use to respect DRY principle. [1] neutron.agent.linux.bridge_lib Change-Id: I12d4d40432e57ce8b6960276c41321c1efd98705 --- neutron/agent/linux/bridge_lib.py | 9 +++----- .../tests/unit/agent/linux/test_bridge_lib.py | 21 ++++++++----------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/neutron/agent/linux/bridge_lib.py b/neutron/agent/linux/bridge_lib.py index 5db2e5b5bc0..2bbc9f2cefa 100644 --- a/neutron/agent/linux/bridge_lib.py +++ b/neutron/agent/linux/bridge_lib.py @@ -17,16 +17,13 @@ # under the License. from neutron.agent.linux import ip_lib -from neutron.agent.linux import utils class BridgeDevice(ip_lib.IPDevice): - def _brctl(self, cmd, log_fail_as_error=True): + def _brctl(self, cmd): cmd = ['brctl'] + cmd - if self.namespace: - cmd = ['ip', 'netns', 'exec', self.namespace] + cmd - return utils.execute(cmd, run_as_root=True, - log_fail_as_error=log_fail_as_error) + ip_wrapper = ip_lib.IPWrapper(self.namespace) + return ip_wrapper.netns.execute(cmd, run_as_root=True) @classmethod def addbr(cls, name, namespace=None): diff --git a/neutron/tests/unit/agent/linux/test_bridge_lib.py b/neutron/tests/unit/agent/linux/test_bridge_lib.py index c85539b1cb3..768c276b298 100644 --- a/neutron/tests/unit/agent/linux/test_bridge_lib.py +++ b/neutron/tests/unit/agent/linux/test_bridge_lib.py @@ -18,7 +18,6 @@ import mock from neutron.agent.linux import bridge_lib -from neutron.agent.linux import utils from neutron.tests import base @@ -30,30 +29,28 @@ class BridgeLibTest(base.BaseTestCase): def setUp(self): super(BridgeLibTest, self).setUp() - self.execute = mock.patch.object( - utils, "execute", spec=utils.execute).start() + ip_wrapper = mock.patch('neutron.agent.linux.ip_lib.IPWrapper').start() + self.execute = ip_wrapper.return_value.netns.execute - def _verify_bridge_mock(self, cmd, namespace=None): - if namespace is not None: - cmd = ['ip', 'netns', 'exec', namespace] + cmd - self.execute.assert_called_once_with(cmd, run_as_root=True, - log_fail_as_error=True) + def _verify_bridge_mock(self, cmd): + self.execute.assert_called_once_with(cmd, run_as_root=True) self.execute.reset_mock() def _test_br(self, namespace=None): br = bridge_lib.BridgeDevice.addbr(self._BR_NAME, namespace) - self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME], namespace) + self.assertEqual(namespace, br.namespace) + self._verify_bridge_mock(['brctl', 'addbr', self._BR_NAME]) br.addif(self._IF_NAME) self._verify_bridge_mock( - ['brctl', 'addif', self._BR_NAME, self._IF_NAME], namespace) + ['brctl', 'addif', self._BR_NAME, self._IF_NAME]) br.delif(self._IF_NAME) self._verify_bridge_mock( - ['brctl', 'delif', self._BR_NAME, self._IF_NAME], namespace) + ['brctl', 'delif', self._BR_NAME, self._IF_NAME]) br.delbr() - self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME], namespace) + self._verify_bridge_mock(['brctl', 'delbr', self._BR_NAME]) def test_addbr_with_namespace(self): self._test_br(self._NAMESPACE) From 0ace88fd4a75ff213dc36fd16c1f8e7080ab7d6d Mon Sep 17 00:00:00 2001 From: Robert Li Date: Fri, 8 May 2015 11:08:45 -0400 Subject: [PATCH 030/292] Add VIF_DELETED notification event to Nova It's possible to delete a neutron port that is currently associated with an instance. When it happens, neutron should notify nova of the port deletion event so that Nova can take proper actions. Refer to I998b6bb80cc0a81d665b61b8c4a424d7219c666f for the nova patch that handles the event. Change-Id: Iff88cd12ae18017ef3e776821bcf3ecf3b4f052f Related-Bug: #1333365 Related-Bug: #1448148 --- neutron/notifiers/nova.py | 13 +++++++++++-- neutron/tests/unit/notifiers/test_nova.py | 15 +++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/neutron/notifiers/nova.py b/neutron/notifiers/nova.py index 4bad6dcbadd..86e4a74088c 100644 --- a/neutron/notifiers/nova.py +++ b/neutron/notifiers/nova.py @@ -35,6 +35,7 @@ LOG = logging.getLogger(__name__) VIF_UNPLUGGED = 'network-vif-unplugged' VIF_PLUGGED = 'network-vif-plugged' +VIF_DELETED = 'network-vif-deleted' NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed', constants.PORT_STATUS_ERROR: 'failed', constants.PORT_STATUS_DOWN: 'completed'} @@ -121,6 +122,11 @@ class Notifier(object): return {'name': 'network-changed', 'server_uuid': device_id} + def _get_port_delete_event(self, port): + return {'server_uuid': port['device_id'], + 'name': VIF_DELETED, + 'tag': port['id']} + @property def _plugin(self): # NOTE(arosen): this cannot be set in __init__ currently since @@ -160,7 +166,7 @@ class Notifier(object): def create_port_changed_event(self, action, original_obj, returned_obj): port = None - if action == 'update_port': + if action in ['update_port', 'delete_port']: port = returned_obj['port'] elif action in ['update_floatingip', 'create_floatingip', @@ -178,7 +184,10 @@ class Notifier(object): port = self._plugin.get_port(ctx, port_id) if port and self._is_compute_port(port): - return self._get_network_changed_event(port['device_id']) + if action == 'delete_port': + return self._get_port_delete_event(port) + else: + return self._get_network_changed_event(port['device_id']) def record_port_status_changed(self, port, current_port_status, previous_port_status, initiator): diff --git a/neutron/tests/unit/notifiers/test_nova.py b/neutron/tests/unit/notifiers/test_nova.py index 49ccb975ae7..b04e2625781 100644 --- a/neutron/tests/unit/notifiers/test_nova.py +++ b/neutron/tests/unit/notifiers/test_nova.py @@ -290,3 +290,18 @@ class TestNovaNotify(base.BaseTestCase): self.nova_notifier.batch_notifier.pending_events[0], event_dis) self.assertEqual( self.nova_notifier.batch_notifier.pending_events[1], event_assoc) + + def test_delete_port_notify(self): + device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' + port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' + returned_obj = {'port': + {'device_owner': 'compute:dfd', + 'id': port_id, + 'device_id': device_id}} + + expected_event = {'server_uuid': device_id, + 'name': nova.VIF_DELETED, + 'tag': port_id} + event = self.nova_notifier.create_port_changed_event('delete_port', + {}, returned_obj) + self.assertEqual(expected_event, event) From 2acdbf3bac7f4a967e2ef8f98b2ac14fa0f7f861 Mon Sep 17 00:00:00 2001 From: "watanabe.isao" Date: Fri, 20 Feb 2015 17:38:16 +0900 Subject: [PATCH 031/292] When disabling dhcp, delete fixed ip properly When setting enable_dhcp parameter of subnet to False, the fixed ip of dhcp port of this subnet is not been removed. Also a resync will be triggered. Change-Id: Iebd2c7922978bec0ef154866f24319e899e3b88e Closes-Bug: 1417379 --- neutron/agent/linux/dhcp.py | 10 +++++++--- neutron/tests/unit/agent/dhcp/test_agent.py | 9 +++++++++ 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index f594b775c0f..70453d265f7 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -861,15 +861,19 @@ class DeviceManager(object): port_device_id = getattr(port, 'device_id', None) if port_device_id == device_id: port_fixed_ips = [] + ips_needs_removal = False for fixed_ip in port.fixed_ips: - port_fixed_ips.append({'subnet_id': fixed_ip.subnet_id, - 'ip_address': fixed_ip.ip_address}) if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: + port_fixed_ips.append( + {'subnet_id': fixed_ip.subnet_id, + 'ip_address': fixed_ip.ip_address}) dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id) + else: + ips_needs_removal = True # If there are dhcp_enabled_subnet_ids here that means that # we need to add those to the port and call update. - if dhcp_enabled_subnet_ids: + if dhcp_enabled_subnet_ids or ips_needs_removal: port_fixed_ips.extend( [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) dhcp_port = self.plugin.update_dhcp_port( diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index 24c0d10564c..69c71a32caa 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -1340,6 +1340,15 @@ class TestDeviceManager(base.BaseTestCase): self.assertFalse(plugin.setup_dhcp_port.called) self.assertFalse(plugin.update_dhcp_port.called) + def test_setup_dhcp_port_with_non_enable_dhcp_subnet(self): + plugin = mock.Mock() + dh = dhcp.DeviceManager(cfg.CONF, plugin) + fake_network_copy = copy.deepcopy(fake_network) + fake_network_copy.ports[0].device_id = dh.get_device_id(fake_network) + plugin.update_dhcp_port.return_value = fake_port1 + self.assertEqual(fake_subnet1.id, + dh.setup_dhcp_port(fake_network_copy).fixed_ips[0].subnet_id) + def test_destroy(self): fake_net = dhcp.NetModel( True, dict(id=FAKE_NETWORK_UUID, From dca3cdfae936ae20fcbac392cfde45c7f4d87e45 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 24 Apr 2015 13:49:02 +0900 Subject: [PATCH 032/292] OVS-DVR: Improve an error log about csnat port Use a single LOG.error per message rather than per lines. Also, print both of old and new subnets. Change-Id: I162d3d178fec8b84b66fdfd5a037c2d858c47e30 --- .../plugins/openvswitch/agent/ovs_dvr_neutron_agent.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py index 80a7d92c852..43f4ac5e7e0 100644 --- a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -600,9 +600,13 @@ class OVSDVRNeutronAgent(object): # dvr routed subnet ovsport = self.local_ports[port.vif_id] subs = list(ovsport.get_subnets()) - LOG.error(_LE("Centralized-SNAT port %s already seen on "), - port.vif_id) - LOG.error(_LE("a different subnet %s"), subs[0]) + LOG.error(_LE("Centralized-SNAT port %(port)s on subnet " + "%(port_subnet)s already seen on a different " + "subnet %(orig_subnet)s"), { + "port": port.vif_id, + "port_subnet": fixed_ips[0]['subnet_id'], + "orig_subnet": subs[0], + }) return # since centralized-SNAT (CSNAT) port must have only one fixed # IP, directly use fixed_ips[0] From 8dbacebf6752e7654afbf9451388b42d4d6355a9 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 15 May 2015 11:33:51 +0900 Subject: [PATCH 033/292] OVS-DVR: Suppress a confusing error log about csnat port Complain only when the port was seen on a different subnet. Change-Id: If4a310da06f9b0076a9f62926a16b574a8c109ce --- neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py index 43f4ac5e7e0..db7a41cd4dc 100644 --- a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -600,6 +600,8 @@ class OVSDVRNeutronAgent(object): # dvr routed subnet ovsport = self.local_ports[port.vif_id] subs = list(ovsport.get_subnets()) + if subs[0] == fixed_ips[0]['subnet_id']: + return LOG.error(_LE("Centralized-SNAT port %(port)s on subnet " "%(port_subnet)s already seen on a different " "subnet %(orig_subnet)s"), { From cc1efd085524cd30d6a238a584df193133d27015 Mon Sep 17 00:00:00 2001 From: John Schwarz Date: Thu, 7 May 2015 15:22:41 +0300 Subject: [PATCH 034/292] Support for concurrent full-stack tests This patch introduces the last step of complete test separation for full-stack tests - separate rabbitmq queues - in the form of rabbitmq virtual hosts. * This patch also renames EnvironmentFixture to FullstackFixture, for clarity. Change-Id: I24776e3970a73fdd3271023da7967f2c7261621b Closes-bug: #1452737 --- neutron/tests/fullstack/config_fixtures.py | 7 ++-- neutron/tests/fullstack/fullstack_fixtures.py | 36 ++++++++++++++++--- neutron/tests/fullstack/test_l3_agent.py | 2 +- 3 files changed, 36 insertions(+), 9 deletions(-) diff --git a/neutron/tests/fullstack/config_fixtures.py b/neutron/tests/fullstack/config_fixtures.py index 1666bd5d421..77756878fc9 100644 --- a/neutron/tests/fullstack/config_fixtures.py +++ b/neutron/tests/fullstack/config_fixtures.py @@ -97,7 +97,7 @@ class ConfigFixture(fixtures.Fixture): class NeutronConfigFixture(ConfigFixture): - def __init__(self, temp_dir, connection): + def __init__(self, temp_dir, connection, rabbitmq_environment): super(NeutronConfigFixture, self).__init__( temp_dir, base_filename='neutron.conf') @@ -112,9 +112,10 @@ class NeutronConfigFixture(ConfigFixture): 'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin', 'service_plugins': ('neutron.services.l3_router.' 'l3_router_plugin.L3RouterPlugin'), - 'rabbit_userid': 'stackrabbit', - 'rabbit_password': '127.0.0.1', + 'rabbit_userid': rabbitmq_environment.user, + 'rabbit_password': rabbitmq_environment.password, 'rabbit_hosts': '127.0.0.1', + 'rabbit_virtual_host': rabbitmq_environment.vhost, 'auth_strategy': 'noauth', 'verbose': 'True', 'debug': 'True', diff --git a/neutron/tests/fullstack/fullstack_fixtures.py b/neutron/tests/fullstack/fullstack_fixtures.py index 526cffaccec..c06d450cb3d 100644 --- a/neutron/tests/fullstack/fullstack_fixtures.py +++ b/neutron/tests/fullstack/fullstack_fixtures.py @@ -64,15 +64,39 @@ class ProcessFixture(fixtures.Fixture): super(ProcessFixture, self).cleanUp(*args, **kwargs) -class EnvironmentFixture(fixtures.Fixture): +class RabbitmqEnvironmentFixture(fixtures.Fixture): + def setUp(self): + super(RabbitmqEnvironmentFixture, self).setUp() + + self.user = base.get_rand_name(prefix='user') + self.password = base.get_rand_name(prefix='pass') + self.vhost = base.get_rand_name(prefix='vhost') + + self._execute('add_user', self.user, self.password) + self.addCleanup(self._execute, 'delete_user', self.user) + + self._execute('add_vhost', self.vhost) + self.addCleanup(self._execute, 'delete_vhost', self.vhost) + + self._execute('set_permissions', '-p', self.vhost, self.user, + '.*', '.*', '.*') + + def _execute(self, *args): + cmd = ['rabbitmqctl'] + cmd.extend(args) + utils.execute(cmd, run_as_root=True) + + +class FullstackFixture(fixtures.Fixture): def setUp(self): - super(EnvironmentFixture, self).setUp() + super(FullstackFixture, self).setUp() self.temp_dir = self.useFixture(fixtures.TempDir()).path + rabbitmq_environment = self.useFixture(RabbitmqEnvironmentFixture()) self.neutron_server = self.useFixture( - NeutronServerFixture(self.temp_dir)) + NeutronServerFixture(self.temp_dir, rabbitmq_environment)) def wait_until_env_is_up(self, agents_count=0): utils.wait_until_true( @@ -92,14 +116,16 @@ class NeutronServerFixture(fixtures.Fixture): NEUTRON_SERVER = "neutron-server" - def __init__(self, temp_dir): + def __init__(self, temp_dir, rabbitmq_environment): self.temp_dir = temp_dir + self.rabbitmq_environment = rabbitmq_environment def setUp(self): super(NeutronServerFixture, self).setUp() self.neutron_cfg_fixture = config_fixtures.NeutronConfigFixture( - self.temp_dir, cfg.CONF.database.connection) + self.temp_dir, cfg.CONF.database.connection, + self.rabbitmq_environment) self.plugin_cfg_fixture = config_fixtures.ML2ConfigFixture( self.temp_dir) diff --git a/neutron/tests/fullstack/test_l3_agent.py b/neutron/tests/fullstack/test_l3_agent.py index 29ae03f67fd..9b5e2476552 100644 --- a/neutron/tests/fullstack/test_l3_agent.py +++ b/neutron/tests/fullstack/test_l3_agent.py @@ -21,7 +21,7 @@ from neutron.tests.fullstack import base from neutron.tests.fullstack import fullstack_fixtures as f_fixtures -class SingleNodeEnvironment(f_fixtures.EnvironmentFixture): +class SingleNodeEnvironment(f_fixtures.FullstackFixture): def setUp(self): super(SingleNodeEnvironment, self).setUp() From b91c4e3f874407de07feef338aa2f9a0faafdadd Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Fri, 15 May 2015 18:10:09 +0200 Subject: [PATCH 035/292] Use namespace names in NetcatTester Currently NetcatTester requests namespace IPWrapper instances as client/server_namespace arguments but functional tests commonly use namespace names as arguments not IPWrapper instances (because IPWrapper is cheap to instantiate) and NetcatTester needs only namespace names. This change requests names as NetcatTester client/server_namespace arguments in order to simplify NetcatTester and its use. Change-Id: Ic4a297efdeaef00e70892d1c871ce1c9174055b8 --- neutron/tests/functional/agent/linux/helpers.py | 4 ++-- neutron/tests/functional/agent/linux/test_iptables.py | 4 +--- neutron/tests/functional/agent/test_l3_agent.py | 5 ++--- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/neutron/tests/functional/agent/linux/helpers.py b/neutron/tests/functional/agent/linux/helpers.py index 6e003b515a0..ed4d1b0eade 100644 --- a/neutron/tests/functional/agent/linux/helpers.py +++ b/neutron/tests/functional/agent/linux/helpers.py @@ -171,7 +171,7 @@ class NetcatTester(object): if not self._server_process: self._spawn_server_process() self._client_process = self._spawn_nc_in_namespace( - self.client_namespace.namespace, + self.client_namespace, address=self.client_address) return self._client_process @@ -183,7 +183,7 @@ class NetcatTester(object): def _spawn_server_process(self): self._server_process = self._spawn_nc_in_namespace( - self.server_namespace.namespace, + self.server_namespace, address=self.server_address, listen=True) diff --git a/neutron/tests/functional/agent/linux/test_iptables.py b/neutron/tests/functional/agent/linux/test_iptables.py index 43a8dc7b4f4..7b2b0a722d4 100644 --- a/neutron/tests/functional/agent/linux/test_iptables.py +++ b/neutron/tests/functional/agent/linux/test_iptables.py @@ -16,7 +16,6 @@ import os.path import testtools -from neutron.agent.linux import ip_lib from neutron.agent.linux import iptables_manager from neutron.agent.linux import utils from neutron.tests import base @@ -80,8 +79,7 @@ class IptablesManagerTestCase(functional_base.BaseSudoTestCase): def _test_with_nc(self, fw_manager, direction, port, udp): netcat = helpers.NetcatTester( - ip_lib.IPWrapper(self.client.namespace), - ip_lib.IPWrapper(self.server.namespace), + self.client.namespace, self.server.namespace, self.server.ip, self.port, run_as_root=True, udp=udp) self.addCleanup(netcat.stop_processes) protocol = 'tcp' diff --git a/neutron/tests/functional/agent/test_l3_agent.py b/neutron/tests/functional/agent/test_l3_agent.py index cda969ced4f..98e5d661e11 100644 --- a/neutron/tests/functional/agent/test_l3_agent.py +++ b/neutron/tests/functional/agent/test_l3_agent.py @@ -409,7 +409,7 @@ class L3AgentTestCase(L3AgentTestFramework): router.process(self.agent) router_ns = ip_lib.IPWrapper(namespace=router.ns_name) - netcat = helpers.NetcatTester(router_ns, router_ns, + netcat = helpers.NetcatTester(router.ns_name, router.ns_name, server_address, port, client_address=client_address, run_as_root=True, @@ -708,8 +708,7 @@ class L3AgentTestCase(L3AgentTestFramework): protocol_port = helpers.get_free_namespace_port(dst_machine.namespace) # client sends to fip netcat = helpers.NetcatTester( - ip_lib.IPWrapper(src_machine.namespace), - ip_lib.IPWrapper(dst_machine.namespace), + src_machine.namespace, dst_machine.namespace, dst_machine.ip, protocol_port, client_address=dst_fip, run_as_root=True, udp=False) self.addCleanup(netcat.stop_processes) From 676db821ebaf3cce5ce89f4d5d55fcbd772c079b Mon Sep 17 00:00:00 2001 From: Darragh O'Reilly Date: Mon, 18 May 2015 20:49:05 +0000 Subject: [PATCH 036/292] ovs-agent: prevent ARP requests with faked IP addresses This patch extends the existing ARP protection to ensure that ARP requests also have valid IP addresses. Closes-Bug: 1456333 Change-Id: I0b2ba21611c9fd9e304bce8cfb00259db1dceaa2 --- .../openvswitch/agent/ovs_neutron_agent.py | 17 +++++++---------- .../tests/functional/agent/test_ovs_flows.py | 17 +++++++++++++++++ .../openvswitch/agent/test_ovs_neutron_agent.py | 9 +++------ 3 files changed, 27 insertions(+), 16 deletions(-) diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index 182a8fd8c59..0ffc5bba4c2 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -783,20 +783,18 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, addresses += [p['ip_address'] for p in port_details['allowed_address_pairs']] - # allow ARP replies as long as they match addresses that actually + # allow ARPs as long as they match addresses that actually # belong to the port. for ip in addresses: if netaddr.IPNetwork(ip).version != 4: continue - bridge.add_flow( - table=constants.ARP_SPOOF_TABLE, priority=2, - proto='arp', arp_op=constants.ARP_REPLY, arp_spa=ip, - in_port=vif.ofport, actions="NORMAL") + bridge.add_flow(table=constants.ARP_SPOOF_TABLE, priority=2, + proto='arp', arp_spa=ip, in_port=vif.ofport, + actions="NORMAL") - # drop any ARP replies in this table that aren't explicitly allowed - bridge.add_flow( - table=constants.ARP_SPOOF_TABLE, priority=1, proto='arp', - arp_op=constants.ARP_REPLY, actions="DROP") + # drop any ARPs in this table that aren't explicitly allowed + bridge.add_flow(table=constants.ARP_SPOOF_TABLE, priority=1, + proto='arp', actions="DROP") # Now that the rules are ready, direct ARP traffic from the port into # the anti-spoof table. @@ -804,7 +802,6 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # on ARP headers will just process traffic normally. bridge.add_flow(table=constants.LOCAL_SWITCHING, priority=10, proto='arp', in_port=vif.ofport, - arp_op=constants.ARP_REPLY, actions=("resubmit(,%s)" % constants.ARP_SPOOF_TABLE)) def port_unbound(self, vif_id, net_uuid=None): diff --git a/neutron/tests/functional/agent/test_ovs_flows.py b/neutron/tests/functional/agent/test_ovs_flows.py index 410cfe186a8..73719d1c448 100644 --- a/neutron/tests/functional/agent/test_ovs_flows.py +++ b/neutron/tests/functional/agent/test_ovs_flows.py @@ -13,12 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. +from neutron.agent.linux import ip_lib from neutron.cmd.sanity import checks from neutron.plugins.openvswitch.agent import ovs_neutron_agent as ovsagt from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent import test_ovs_lib from neutron.tests.functional import base +from neutron.tests import tools class ARPSpoofTestCase(test_ovs_lib.OVSBridgeTestBase, @@ -73,6 +75,21 @@ class ARPSpoofTestCase(test_ovs_lib.OVSBridgeTestBase, self.dst_p.addr.add('%s/24' % self.dst_addr) self.pinger.assert_no_ping(self.dst_addr) + def test_arp_spoof_blocks_request(self): + # this will prevent the source from sending an ARP + # request with its own address + self._setup_arp_spoof_for_port(self.src_p.name, ['192.168.0.3']) + self.src_p.addr.add('%s/24' % self.src_addr) + self.dst_p.addr.add('%s/24' % self.dst_addr) + ns_ip_wrapper = ip_lib.IPWrapper(self.src_namespace) + try: + ns_ip_wrapper.netns.execute(['arping', '-I', self.src_p.name, + '-c1', self.dst_addr]) + tools.fail("arping should have failed. The arp request should " + "have been blocked.") + except RuntimeError: + pass + def test_arp_spoof_allowed_address_pairs(self): self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3', self.dst_addr]) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index 6e585c82c20..c21d7da93d9 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -1143,13 +1143,11 @@ class TestOvsNeutronAgent(base.BaseTestCase): # make sure redirect into spoof table is installed int_br.add_flow.assert_any_call( table=constants.LOCAL_SWITCHING, in_port=vif.ofport, - arp_op=constants.ARP_REPLY, proto='arp', actions=mock.ANY, - priority=10) + proto='arp', actions=mock.ANY, priority=10) # make sure drop rule for replies is installed int_br.add_flow.assert_any_call( table=constants.ARP_SPOOF_TABLE, - proto='arp', arp_op=constants.ARP_REPLY, actions='DROP', - priority=mock.ANY) + proto='arp', actions='DROP', priority=mock.ANY) def test_arp_spoofing_fixed_and_allowed_addresses(self): vif = FakeVif() @@ -1167,8 +1165,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): '192.168.44.103/32'): int_br.add_flow.assert_any_call( table=constants.ARP_SPOOF_TABLE, in_port=vif.ofport, - proto='arp', arp_op=constants.ARP_REPLY, actions='NORMAL', - arp_spa=addr, priority=mock.ANY) + proto='arp', actions='NORMAL', arp_spa=addr, priority=mock.ANY) def test__get_ofport_moves(self): previous = {'port1': 1, 'port2': 2} From b892df11264de5737e1247862b093ede5d2f5954 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Fri, 16 Jan 2015 10:00:42 -0800 Subject: [PATCH 037/292] IPAM reference driver An alternate pluggable IPAM implementation from the built-in one in db_base_plugin_v2. Modifies IPAM interface to allow passing context to driver and introduces new interface method 'associate_neutron_subnet'. Implements blueprint reference-ipam-driver Change-Id: I2e1e9fc7994bf1157bcd34b7ea500eb30c61d9ab --- neutron/common/ipv6_utils.py | 8 + neutron/db/db_base_plugin_v2.py | 32 +- .../versions/599c6a226151_neutrodb_ipam.py | 72 +++ .../alembic_migrations/versions/HEAD | 2 +- neutron/db/migration/models/head.py | 1 + neutron/ipam/__init__.py | 66 ++- neutron/ipam/driver.py | 30 +- neutron/ipam/drivers/__init__.py | 0 .../ipam/drivers/neutrondb_ipam/__init__.py | 0 neutron/ipam/drivers/neutrondb_ipam/db_api.py | 218 +++++++++ .../ipam/drivers/neutrondb_ipam/db_models.py | 111 +++++ neutron/ipam/drivers/neutrondb_ipam/driver.py | 438 +++++++++++++++++ neutron/ipam/exceptions.py | 62 +++ neutron/ipam/subnet_alloc.py | 77 +-- neutron/ipam/utils.py | 48 ++ neutron/tests/unit/common/test_ipv6_utils.py | 26 ++ neutron/tests/unit/ipam/__init__.py | 0 neutron/tests/unit/ipam/drivers/__init__.py | 0 .../ipam/drivers/neutrondb_ipam/__init__.py | 0 .../drivers/neutrondb_ipam/test_db_api.py | 170 +++++++ .../drivers/neutrondb_ipam/test_driver.py | 442 ++++++++++++++++++ neutron/tests/unit/ipam/test_subnet_alloc.py | 38 +- neutron/tests/unit/test_ipam.py | 40 +- 23 files changed, 1781 insertions(+), 100 deletions(-) create mode 100644 neutron/db/migration/alembic_migrations/versions/599c6a226151_neutrodb_ipam.py create mode 100644 neutron/ipam/drivers/__init__.py create mode 100644 neutron/ipam/drivers/neutrondb_ipam/__init__.py create mode 100644 neutron/ipam/drivers/neutrondb_ipam/db_api.py create mode 100644 neutron/ipam/drivers/neutrondb_ipam/db_models.py create mode 100644 neutron/ipam/drivers/neutrondb_ipam/driver.py create mode 100644 neutron/ipam/exceptions.py create mode 100644 neutron/ipam/utils.py create mode 100644 neutron/tests/unit/ipam/__init__.py create mode 100644 neutron/tests/unit/ipam/drivers/__init__.py create mode 100644 neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py create mode 100644 neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py create mode 100644 neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py diff --git a/neutron/common/ipv6_utils.py b/neutron/common/ipv6_utils.py index ad531baf387..96d0153f161 100644 --- a/neutron/common/ipv6_utils.py +++ b/neutron/common/ipv6_utils.py @@ -69,3 +69,11 @@ def is_auto_address_subnet(subnet): modes = [constants.IPV6_SLAAC, constants.DHCPV6_STATELESS] return (subnet['ipv6_address_mode'] in modes or subnet['ipv6_ra_mode'] in modes) + + +def is_eui64_address(ip_address): + """Check if ip address is EUI64.""" + ip = netaddr.IPAddress(ip_address) + # '0xfffe' addition is used to build EUI-64 from MAC (RFC4291) + # Look for it in the middle of the EUI-64 part of address + return ip.version == 6 and not ((ip & 0xffff000000) ^ 0xfffe000000) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 74dabca7ffa..e577a8fec12 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -38,6 +38,7 @@ from neutron.extensions import l3 from neutron.i18n import _LE, _LI from neutron import ipam from neutron.ipam import subnet_alloc +from neutron.ipam import utils as ipam_utils from neutron import manager from neutron import neutron_plugin_base_v2 from neutron.openstack.common import uuidutils @@ -332,22 +333,9 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, """Validate that the gateway is on the subnet.""" ip = netaddr.IPAddress(gateway) if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()): - return cls._check_subnet_ip(cidr, gateway) + return ipam_utils.check_subnet_ip(cidr, gateway) return True - @classmethod - def _check_subnet_ip(cls, cidr, ip_address): - """Validate that the IP address is on the subnet.""" - ip = netaddr.IPAddress(ip_address) - net = netaddr.IPNetwork(cidr) - # Check that the IP is valid on subnet. This cannot be the - # network or the broadcast address - if (ip != net.network and - ip != net.broadcast and - net.netmask & ip == net.network): - return True - return False - @staticmethod def _check_ip_in_allocation_pool(context, subnet_id, gateway_ip, ip_address): @@ -395,8 +383,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, filter = {'network_id': [network_id]} subnets = self.get_subnets(context, filters=filter) for subnet in subnets: - if self._check_subnet_ip(subnet['cidr'], - fixed['ip_address']): + if ipam_utils.check_subnet_ip(subnet['cidr'], + fixed['ip_address']): found = True subnet_id = subnet['id'] break @@ -425,8 +413,8 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, # Ensure that the IP is valid on the subnet if (not found and - not self._check_subnet_ip(subnet['cidr'], - fixed['ip_address'])): + not ipam_utils.check_subnet_ip(subnet['cidr'], + fixed['ip_address'])): raise n_exc.InvalidIpForSubnet( ip_address=fixed['ip_address']) if (is_auto_addr_subnet and @@ -1228,10 +1216,10 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, 'name': subnet['name'], 'network_id': subnet['network_id'], 'ip_version': subnet['ip_version'], - 'cidr': str(detail.subnet.cidr), + 'cidr': str(detail.subnet_cidr), 'subnetpool_id': subnetpool_id, 'enable_dhcp': subnet['enable_dhcp'], - 'gateway_ip': self._gateway_ip_str(subnet, detail.subnet), + 'gateway_ip': self._gateway_ip_str(subnet, detail.subnet_cidr), 'shared': shared} if subnet['ip_version'] == 6 and subnet['enable_dhcp']: if attributes.is_attr_set(subnet['ipv6_ra_mode']): @@ -1290,10 +1278,10 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, raise n_exc.BadRequest(resource='subnets', msg=reason) network = self._get_network(context, s["network_id"]) - allocator = subnet_alloc.SubnetAllocator(subnetpool) + allocator = subnet_alloc.SubnetAllocator(subnetpool, context) req = self._make_subnet_request(tenant_id, s, subnetpool) - ipam_subnet = allocator.allocate_subnet(context.session, req) + ipam_subnet = allocator.allocate_subnet(req) detail = ipam_subnet.get_details() subnet = self._save_subnet(context, network, diff --git a/neutron/db/migration/alembic_migrations/versions/599c6a226151_neutrodb_ipam.py b/neutron/db/migration/alembic_migrations/versions/599c6a226151_neutrodb_ipam.py new file mode 100644 index 00000000000..cea591d8298 --- /dev/null +++ b/neutron/db/migration/alembic_migrations/versions/599c6a226151_neutrodb_ipam.py @@ -0,0 +1,72 @@ +# Copyright 2015 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""neutrodb_ipam + +Revision ID: 599c6a226151 +Revises: 354db87e3225 +Create Date: 2015-03-08 18:12:08.962378 + +""" + +# revision identifiers, used by Alembic. +revision = '599c6a226151' +down_revision = '354db87e3225' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.create_table( + 'ipamsubnets', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('neutron_subnet_id', sa.String(length=36), nullable=True), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'ipamallocations', + sa.Column('ip_address', sa.String(length=64), nullable=False), + sa.Column('status', sa.String(length=36), nullable=True), + sa.Column('ipam_subnet_id', sa.String(length=36), nullable=False), + sa.ForeignKeyConstraint(['ipam_subnet_id'], + ['ipamsubnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('ip_address', 'ipam_subnet_id')) + + op.create_table( + 'ipamallocationpools', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('ipam_subnet_id', sa.String(length=36), nullable=False), + sa.Column('first_ip', sa.String(length=64), nullable=False), + sa.Column('last_ip', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['ipam_subnet_id'], + ['ipamsubnets.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('id')) + + op.create_table( + 'ipamavailabilityranges', + sa.Column('allocation_pool_id', sa.String(length=36), nullable=False), + sa.Column('first_ip', sa.String(length=64), nullable=False), + sa.Column('last_ip', sa.String(length=64), nullable=False), + sa.ForeignKeyConstraint(['allocation_pool_id'], + ['ipamallocationpools.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('allocation_pool_id', 'first_ip', 'last_ip'), + sa.Index('ix_ipamavailabilityranges_first_ip_allocation_pool_id', + 'first_ip', 'allocation_pool_id'), + sa.Index('ix_ipamavailabilityranges_last_ip_allocation_pool_id', + 'last_ip', 'allocation_pool_id')) diff --git a/neutron/db/migration/alembic_migrations/versions/HEAD b/neutron/db/migration/alembic_migrations/versions/HEAD index 821a57093ce..054926f3afd 100644 --- a/neutron/db/migration/alembic_migrations/versions/HEAD +++ b/neutron/db/migration/alembic_migrations/versions/HEAD @@ -1 +1 @@ -354db87e3225 +599c6a226151 diff --git a/neutron/db/migration/models/head.py b/neutron/db/migration/models/head.py index 56de64239a7..a2649a12237 100644 --- a/neutron/db/migration/models/head.py +++ b/neutron/db/migration/models/head.py @@ -42,6 +42,7 @@ from neutron.db import portsecurity_db # noqa from neutron.db import quota_db # noqa from neutron.db import securitygroups_db # noqa from neutron.db import servicetype_db # noqa +from neutron.ipam.drivers.neutrondb_ipam import db_models # noqa from neutron.plugins.bigswitch.db import consistency_db # noqa from neutron.plugins.bigswitch import routerrule_db # noqa from neutron.plugins.brocade.db import models as brocade_models # noqa diff --git a/neutron/ipam/__init__.py b/neutron/ipam/__init__.py index 4f7d216ccc7..4a8e6d1c3c2 100644 --- a/neutron/ipam/__init__.py +++ b/neutron/ipam/__init__.py @@ -13,9 +13,12 @@ import abc import netaddr +from oslo_config import cfg import six from neutron.common import constants +from neutron.common import ipv6_utils +from neutron.ipam import exceptions as ipam_exc @six.add_metaclass(abc.ABCMeta) @@ -37,8 +40,8 @@ class SubnetRequest(object): :param tenant_id: The tenant id who will own the subnet :type tenant_id: str uuid - :param subnet_id: Neutron's subnet id - :type subnet_id: str uuid + :param subnet_id: Neutron's subnet ID + :type subnet_id: srt uuid :param gateway_ip: An IP to reserve for the subnet gateway. :type gateway_ip: None or convertible to netaddr.IPAddress :param allocation_pools: The pool from which IPAM should allocate @@ -96,16 +99,19 @@ class SubnetRequest(object): def allocation_pools(self): return self._allocation_pools - def _validate_with_subnet(self, subnet): - if self.gateway_ip: - if self.gateway_ip not in subnet: - raise ValueError("gateway_ip is not in the subnet") + def _validate_with_subnet(self, subnet_cidr): + if self.gateway_ip and cfg.CONF.force_gateway_on_subnet: + gw_ip = netaddr.IPAddress(self.gateway_ip) + if (gw_ip.version == 4 or (gw_ip.version == 6 + and not gw_ip.is_link_local())): + if self.gateway_ip not in subnet_cidr: + raise ValueError("gateway_ip is not in the subnet") if self.allocation_pools: - if subnet.version != self.allocation_pools[0].version: + if subnet_cidr.version != self.allocation_pools[0].version: raise ValueError("allocation_pools use the wrong ip version") for pool in self.allocation_pools: - if pool not in subnet: + if pool not in subnet_cidr: raise ValueError("allocation_pools are not in the subnet") @@ -151,7 +157,7 @@ class SpecificSubnetRequest(SubnetRequest): allocation, even overlapping ones. This can be expanded on by future blueprints. """ - def __init__(self, tenant_id, subnet_id, subnet, + def __init__(self, tenant_id, subnet_id, subnet_cidr, gateway_ip=None, allocation_pools=None): """ :param subnet: The subnet requested. Can be IPv4 or IPv6. However, @@ -165,16 +171,16 @@ class SpecificSubnetRequest(SubnetRequest): gateway_ip=gateway_ip, allocation_pools=allocation_pools) - self._subnet = netaddr.IPNetwork(subnet) - self._validate_with_subnet(self._subnet) + self._subnet_cidr = netaddr.IPNetwork(subnet_cidr) + self._validate_with_subnet(self._subnet_cidr) @property - def subnet(self): - return self._subnet + def subnet_cidr(self): + return self._subnet_cidr @property def prefixlen(self): - return self._subnet.prefixlen + return self._subnet_cidr.prefixlen @six.add_metaclass(abc.ABCMeta) @@ -201,5 +207,37 @@ class AnyAddressRequest(AddressRequest): """Used to request any available address from the pool.""" +class AutomaticAddressRequest(SpecificAddressRequest): + """Used to create auto generated addresses, such as EUI64""" + EUI64 = 'eui64' + + def _generate_eui64_address(self, **kwargs): + if set(kwargs) != set(['prefix', 'mac']): + raise ipam_exc.AddressCalculationFailure( + address_type='eui-64', + reason='must provide exactly 2 arguments - cidr and MAC') + prefix = kwargs['prefix'] + mac_address = kwargs['mac'] + return ipv6_utils.get_ipv6_addr_by_EUI64(prefix, mac_address) + + _address_generators = {EUI64: _generate_eui64_address} + + def __init__(self, address_type=EUI64, **kwargs): + """ + This constructor builds an automatic IP address. Parameter needed for + generating it can be passed as optional keyword arguments. + + :param address_type: the type of address to generate. + It could be a eui-64 address, a random IPv6 address, or + a ipv4 link-local address. + For the Kilo release only eui-64 addresses will be supported. + """ + address_generator = self._address_generators.get(address_type) + if not address_generator: + raise ipam_exc.InvalidAddressType(address_type=address_type) + address = address_generator(self, **kwargs) + super(AutomaticAddressRequest, self).__init__(address) + + class RouterGatewayAddressRequest(AddressRequest): """Used to request allocating the special router gateway address.""" diff --git a/neutron/ipam/driver.py b/neutron/ipam/driver.py index 6968d313395..ed40b5eee8d 100644 --- a/neutron/ipam/driver.py +++ b/neutron/ipam/driver.py @@ -14,6 +14,10 @@ import abc import six +from oslo_log import log + +LOG = log.getLogger(__name__) + @six.add_metaclass(abc.ABCMeta) class Pool(object): @@ -22,20 +26,21 @@ class Pool(object): There should be an instance of the driver for every subnet pool. """ - def __init__(self, subnet_pool_id): + def __init__(self, subnetpool, context): """Initialize pool - :param subnet_pool_id: SubnetPool ID of the address space to use. - :type subnet_pool_id: str uuid + :param subnetpool: SubnetPool of the address space to use. + :type subnetpool: dict """ - self._subnet_pool_id = subnet_pool_id + self._subnetpool = subnetpool + self._context = context @classmethod - def get_instance(cls, subnet_pool_id): + def get_instance(cls, subnet_pool, context): """Returns an instance of the configured IPAM driver - :param subnet_pool_id: Subnet pool ID of the address space to use. - :type subnet_pool_id: str uuid + :param subnet_pool: Subnet pool of the address space to use. + :type subnet_pool: dict :returns: An instance of Driver for the given subnet pool """ raise NotImplementedError @@ -121,3 +126,14 @@ class Subnet(object): :returns: An instance of SpecificSubnetRequest with the subnet detail. """ + + @abc.abstractmethod + def associate_neutron_subnet(self, subnet_id): + """Associate the IPAM subnet with a neutron subnet. + + This operation should be performed to attach a neutron subnet to the + current subnet instance. In some cases IPAM subnets may be created + independently of neutron subnets and associated at a later stage. + + :param subnet_id: neutron subnet identifier. + """ diff --git a/neutron/ipam/drivers/__init__.py b/neutron/ipam/drivers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/ipam/drivers/neutrondb_ipam/__init__.py b/neutron/ipam/drivers/neutrondb_ipam/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/ipam/drivers/neutrondb_ipam/db_api.py b/neutron/ipam/drivers/neutrondb_ipam/db_api.py new file mode 100644 index 00000000000..0f8cf1f57c0 --- /dev/null +++ b/neutron/ipam/drivers/neutrondb_ipam/db_api.py @@ -0,0 +1,218 @@ +# Copyright 2015 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_log import log + +from neutron.ipam.drivers.neutrondb_ipam import db_models +from neutron.openstack.common import uuidutils + +LOG = log.getLogger(__name__) +# Database operations for Neutron's DB-backed IPAM driver + + +class IpamSubnetManager(object): + + @classmethod + def load_by_neutron_subnet_id(cls, session, neutron_subnet_id): + return session.query(db_models.IpamSubnet).filter_by( + neutron_subnet_id=neutron_subnet_id).first() + + def __init__(self, ipam_subnet_id, neutron_subnet_id): + self._ipam_subnet_id = ipam_subnet_id + self._neutron_subnet_id = neutron_subnet_id + + @property + def neutron_id(self): + return self._neutron_subnet_id + + def create(self, session): + """Create database models for an IPAM subnet. + + This method creates a subnet resource for the IPAM driver and + associates it with its neutron identifier, if specified. + + :param session: database sesssion. + :returns: the idenfier of created IPAM subnet + """ + if not self._ipam_subnet_id: + self._ipam_subnet_id = uuidutils.generate_uuid() + ipam_subnet = db_models.IpamSubnet( + id=self._ipam_subnet_id, + neutron_subnet_id=self._neutron_subnet_id) + session.add(ipam_subnet) + return self._ipam_subnet_id + + def associate_neutron_id(self, session, neutron_subnet_id): + session.query(db_models.IpamSubnet).filter_by( + id=self._ipam_subnet_id).update( + {'neutron_subnet_id': neutron_subnet_id}) + self._neutron_subnet_id = neutron_subnet_id + + def create_pool(self, session, pool_start, pool_end): + """Create an allocation pool and availability ranges for the subnet. + + This method does not perform any validation on parameters; it simply + persist data on the database. + + :param pool_start: string expressing the start of the pool + :param pool_end: string expressing the end of the pool + :return: the newly created pool object. + """ + ip_pool = db_models.IpamAllocationPool( + ipam_subnet_id=self._ipam_subnet_id, + first_ip=pool_start, + last_ip=pool_end) + session.add(ip_pool) + ip_range = db_models.IpamAvailabilityRange( + allocation_pool=ip_pool, + first_ip=pool_start, + last_ip=pool_end) + session.add(ip_range) + return ip_pool + + def delete_allocation_pools(self, session): + """Remove all allocation pools for the current subnet. + + :param session: database session + """ + session.query(db_models.IpamAllocationPool).filter_by( + ipam_subnet_id=self._ipam_subnet_id).delete() + + def list_pools(self, session): + """Return pools for the current subnet.""" + return session.query( + db_models.IpamAllocationPool).filter_by( + ipam_subnet_id=self._ipam_subnet_id) + + def _range_query(self, session, locking): + range_qry = session.query( + db_models.IpamAvailabilityRange).join( + db_models.IpamAllocationPool).filter_by( + ipam_subnet_id=self._ipam_subnet_id) + if locking: + range_qry = range_qry.with_lockmode('update') + return range_qry + + def get_first_range(self, session, locking=False): + """Return the first availability range for the subnet + + :param session: database session + :param locking: specifies whether a write-intent lock should be + performed on the database operation + :return: first available range as instance of + neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange + """ + return self._range_query(session, locking).first() + + def list_ranges_by_subnet_id(self, session, locking=False): + """Return availability ranges for a given ipam subnet + + :param session: database session + :param locking: specifies whether a write-intent lock should be + acquired with this database operation. + :return: list of availability ranges as instances of + neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange + """ + return self._range_query(session, locking) + + def list_ranges_by_allocation_pool(self, session, allocation_pool_id, + locking=False): + """Return availability ranges for a given pool. + + :param session: database session + :param allocation_pool_id: allocation pool identifier + :param locking: specifies whether a write-intent lock should be + acquired with this database operation. + :return: list of availability ranges as instances of + neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange + """ + return session.query( + db_models.IpamAvailabilityRange).join( + db_models.IpamAllocationPool).filter_by( + id=allocation_pool_id) + + def create_range(self, session, allocation_pool_id, + range_start, range_end): + """Create an availabilty range for a given pool. + + This method does not perform any validation on parameters; it simply + persist data on the database. + + :param session: database session + :param allocation_pool_id: allocation pool identifier + :param range_start: first ip address in the range + :param range_end: last ip address in the range + :return: the newly created availability range as an instance of + neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAvailabilityRange + """ + new_ip_range = db_models.IpamAvailabilityRange( + allocation_pool_id=allocation_pool_id, + first_ip=range_start, + last_ip=range_end) + session.add(new_ip_range) + return new_ip_range + + def check_unique_allocation(self, session, ip_address): + """Validate that the IP address on the subnet is not in use.""" + iprequest = session.query(db_models.IpamAllocation).filter_by( + ipam_subnet_id=self._ipam_subnet_id, status='ALLOCATED', + ip_address=ip_address).first() + if iprequest: + return False + return True + + def list_allocations(self, session, status='ALLOCATED', locking=False): + """Return current allocations for the subnet. + + :param session: database session + :param status: IP allocation status + :param locking: specifies whether a write-intent lock should be + performed on the database operation + :returns: a list of IP allocation as instance of + neutron.ipam.drivers.neutrondb_ipam.db_models.IpamAllocation + """ + ip_qry = session.query( + db_models.IpamAllocation).filter_by( + ipam_subnet_id=self._ipam_subnet_id, + status=status) + if locking: + ip_qry = ip_qry.with_lockmode('update') + return ip_qry + + def create_allocation(self, session, ip_address, + status='ALLOCATED'): + """Create an IP allocation entry. + + :param session: database session + :param ip_address: the IP address to allocate + :param status: IP allocation status + """ + ip_request = db_models.IpamAllocation( + ip_address=ip_address, + status=status, + ipam_subnet_id=self._ipam_subnet_id) + session.add(ip_request) + + def delete_allocation(self, session, ip_address): + """Remove an IP allocation for this subnet. + + :param session: database session + :param ip_address: IP address for which the allocation entry should + be removed. + """ + return session.query(db_models.IpamAllocation).filter_by( + ip_address=ip_address, + ipam_subnet_id=self._ipam_subnet_id).delete( + synchronize_session=False) diff --git a/neutron/ipam/drivers/neutrondb_ipam/db_models.py b/neutron/ipam/drivers/neutrondb_ipam/db_models.py new file mode 100644 index 00000000000..819cf653ea4 --- /dev/null +++ b/neutron/ipam/drivers/neutrondb_ipam/db_models.py @@ -0,0 +1,111 @@ +# Copyright 2015 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import sqlalchemy as sa +from sqlalchemy import orm as sa_orm + +from neutron.db import model_base +from neutron.db import models_v2 + +# Database models used by the neutron DB IPAM driver + + +# NOTE(salv-orlando): This is meant to replace the class +# neutron.db.models_v2.IPAvailabilityRange. +class IpamAvailabilityRange(model_base.BASEV2): + """Internal representation of available IPs for Neutron subnets. + + Allocation - first entry from the range will be allocated. + If the first entry is equal to the last entry then this row + will be deleted. + Recycling ips involves reading the IPAllocationPool and IPAllocation tables + and inserting ranges representing available ips. This happens after the + final allocation is pulled from this table and a new ip allocation is + requested. Any contiguous ranges of available ips will be inserted as a + single range. + """ + + allocation_pool_id = sa.Column(sa.String(36), + sa.ForeignKey('ipamallocationpools.id', + ondelete="CASCADE"), + nullable=False, + primary_key=True) + first_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) + last_ip = sa.Column(sa.String(64), nullable=False, primary_key=True) + __table_args__ = ( + sa.Index('ix_ipamavailabilityranges_first_ip_allocation_pool_id', + 'first_ip', 'allocation_pool_id'), + sa.Index('ix_ipamavailabilityranges_last_ip_allocation_pool_id', + 'last_ip', 'allocation_pool_id'), + model_base.BASEV2.__table_args__ + ) + + def __repr__(self): + return "%s - %s" % (self.first_ip, self.last_ip) + + +# NOTE(salv-orlando): The following data model creates redundancy with +# models_v2.IPAllocationPool. This level of data redundancy could be tolerated +# considering that the following model is specific to the IPAM driver logic. +# It therefore represents an internal representation of a subnet allocation +# pool and can therefore change in the future, where as +# models_v2.IPAllocationPool is the representation of IP allocation pools in +# the management layer and therefore its evolution is subject to APIs backward +# compatibility policies +class IpamAllocationPool(model_base.BASEV2, models_v2.HasId): + """Representation of an allocation pool in a Neutron subnet.""" + + ipam_subnet_id = sa.Column(sa.String(36), + sa.ForeignKey('ipamsubnets.id', + ondelete="CASCADE"), + nullable=False) + first_ip = sa.Column(sa.String(64), nullable=False) + last_ip = sa.Column(sa.String(64), nullable=False) + available_ranges = sa_orm.relationship(IpamAvailabilityRange, + backref='allocation_pool', + lazy="joined", + cascade='all, delete-orphan') + + def __repr__(self): + return "%s - %s" % (self.first_ip, self.last_ip) + + +class IpamSubnet(model_base.BASEV2, models_v2.HasId): + """Association between IPAM entities and neutron subnets. + + For subnet data persistency - such as cidr and gateway IP, the IPAM + driver relies on Neutron's subnet model as source of truth to limit + data redundancy. + """ + neutron_subnet_id = sa.Column(sa.String(36), + nullable=True) + allocation_pools = sa_orm.relationship(IpamAllocationPool, + backref='subnet', + lazy="joined", + cascade='delete') + + +class IpamAllocation(model_base.BASEV2): + """Model class for IP Allocation requests. """ + ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True) + status = sa.Column(sa.String(36)) + # The subnet identifier is redundant but come handy for looking up + # IP addresses to remove. + ipam_subnet_id = sa.Column(sa.String(36), + sa.ForeignKey('ipamsubnets.id', + ondelete="CASCADE"), + primary_key=True, + nullable=False) diff --git a/neutron/ipam/drivers/neutrondb_ipam/driver.py b/neutron/ipam/drivers/neutrondb_ipam/driver.py new file mode 100644 index 00000000000..d1002827c57 --- /dev/null +++ b/neutron/ipam/drivers/neutrondb_ipam/driver.py @@ -0,0 +1,438 @@ +# Copyright 2015 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo_log import log + +from neutron.common import exceptions as n_exc +from neutron.common import ipv6_utils +from neutron.db import api as db_api +from neutron.i18n import _LE +from neutron import ipam +from neutron.ipam import driver as ipam_base +from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api +from neutron.ipam import exceptions as ipam_exc +from neutron.ipam import subnet_alloc +from neutron.ipam import utils as ipam_utils +from neutron import manager +from neutron.openstack.common import uuidutils + + +LOG = log.getLogger(__name__) + + +class NeutronDbSubnet(ipam_base.Subnet): + """Manage IP addresses for Neutron DB IPAM driver. + + This class implements the strategy for IP address allocation and + deallocation for the Neutron DB IPAM driver. + Allocation for IP addresses is based on the concept of availability + ranges, which were already used in Neutron's DB base class for handling + IPAM operations. + """ + + @classmethod + def create_allocation_pools(cls, subnet_manager, session, pools): + for pool in pools: + subnet_manager.create_pool( + session, + netaddr.IPAddress(pool.first).format(), + netaddr.IPAddress(pool.last).format()) + + @classmethod + def create_from_subnet_request(cls, subnet_request, ctx): + ipam_subnet_id = uuidutils.generate_uuid() + subnet_manager = ipam_db_api.IpamSubnetManager( + ipam_subnet_id, + None) + # Create subnet resource + session = ctx.session + subnet_manager.create(session) + # If allocation pools are not specified, define them around + # the subnet's gateway IP + if not subnet_request.allocation_pools: + pools = ipam_utils.generate_pools(subnet_request.subnet_cidr, + subnet_request.gateway_ip) + else: + pools = subnet_request.allocation_pools + # Create IPAM allocation pools and availability ranges + cls.create_allocation_pools(subnet_manager, session, pools) + + return cls(ipam_subnet_id, + ctx, + cidr=subnet_request.subnet_cidr, + allocation_pools=pools, + gateway_ip=subnet_request.gateway_ip, + tenant_id=subnet_request.tenant_id, + subnet_id=subnet_request.subnet_id, + subnet_id_not_set=True) + + @classmethod + def load(cls, neutron_subnet_id, ctx): + """Load an IPAM subnet from the database given its neutron ID. + + :param neutron_subnet_id: neutron subnet identifier. + """ + ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id( + ctx.session, neutron_subnet_id) + if not ipam_subnet: + LOG.error(_LE("Unable to retrieve IPAM subnet as the referenced " + "Neutron subnet %s does not exist"), + neutron_subnet_id) + raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id) + pools = [] + for pool in ipam_subnet.allocation_pools: + pools.append(netaddr.IPRange(pool['first_ip'], pool['last_ip'])) + + neutron_subnet = cls._fetch_subnet(ctx, neutron_subnet_id) + + return cls(ipam_subnet['id'], + ctx, + cidr=neutron_subnet['cidr'], + allocation_pools=pools, + gateway_ip=neutron_subnet['gateway_ip'], + tenant_id=neutron_subnet['tenant_id'], + subnet_id=neutron_subnet_id) + + @classmethod + def _fetch_subnet(cls, context, id): + plugin = manager.NeutronManager.get_plugin() + return plugin._get_subnet(context, id) + + def __init__(self, internal_id, ctx, cidr=None, + allocation_pools=None, gateway_ip=None, tenant_id=None, + subnet_id=None, subnet_id_not_set=False): + # NOTE: In theory it could have been possible to grant the IPAM + # driver direct access to the database. While this is possible, + # it would have led to duplicate code and/or non-trivial + # refactorings in neutron.db.db_base_plugin_v2. + # This is because in the Neutron V2 plugin logic DB management is + # encapsulated within the plugin. + self._cidr = cidr + self._pools = allocation_pools + self._gateway_ip = gateway_ip + self._tenant_id = tenant_id + self._subnet_id = None if subnet_id_not_set else subnet_id + self.subnet_manager = ipam_db_api.IpamSubnetManager(internal_id, + self._subnet_id) + self._context = ctx + + def _verify_ip(self, session, ip_address): + """Verify whether IP address can be allocated on subnet. + + :param session: database session + :param ip_address: String representing the IP address to verify + :raises: InvalidInput, IpAddressAlreadyAllocated + """ + # Ensure that the IP's are unique + if not self.subnet_manager.check_unique_allocation(session, + ip_address): + raise ipam_exc.IpAddressAlreadyAllocated( + subnet_id=self.subnet_manager.neutron_id, + ip=ip_address) + + # Ensure that the IP is valid on the subnet + if not ipam_utils.check_subnet_ip(self._cidr, ip_address): + raise ipam_exc.InvalidIpForSubnet( + subnet_id=self.subnet_manager.neutron_id, + ip=ip_address) + + def _allocate_specific_ip(self, session, ip_address, + allocation_pool_id=None): + """Remove an IP address from subnet's availability ranges. + + This method is supposed to be called from within a database + transaction, otherwise atomicity and integrity might not be + enforced and the operation might result in incosistent availability + ranges for the subnet. + + :param session: database session + :param ip_address: ip address to mark as allocated + :param allocation_pool_id: identifier of the allocation pool from + which the ip address has been extracted. If not specified this + routine will scan all allocation pools. + :returns: list of IP ranges as instances of IPAvailabilityRange + """ + # Return immediately for EUI-64 addresses. For this + # class of subnets availability ranges do not apply + if ipv6_utils.is_eui64_address(ip_address): + return + + LOG.debug("Removing %(ip_address)s from availability ranges for " + "subnet id:%(subnet_id)s", + {'ip_address': ip_address, + 'subnet_id': self.subnet_manager.neutron_id}) + # Netaddr's IPRange and IPSet objects work very well even with very + # large subnets, including IPv6 ones. + final_ranges = [] + if allocation_pool_id: + av_ranges = self.subnet_manager.list_ranges_by_allocation_pool( + session, allocation_pool_id, locking=True) + else: + av_ranges = self.subnet_manager.list_ranges_by_subnet_id( + session, locking=True) + for db_range in av_ranges: + initial_ip_set = netaddr.IPSet(netaddr.IPRange( + db_range['first_ip'], db_range['last_ip'])) + final_ip_set = initial_ip_set - netaddr.IPSet([ip_address]) + if not final_ip_set: + # Range exhausted - bye bye + session.delete(db_range) + continue + if initial_ip_set == final_ip_set: + # IP address does not fall within the current range, move + # to the next one + final_ranges.append(db_range) + continue + for new_range in final_ip_set.iter_ipranges(): + # store new range in database + # use netaddr.IPAddress format() method which is equivalent + # to str(...) but also enables us to use different + # representation formats (if needed) for IPv6. + first_ip = netaddr.IPAddress(new_range.first) + last_ip = netaddr.IPAddress(new_range.last) + if (db_range['first_ip'] == first_ip.format() or + db_range['last_ip'] == last_ip.format()): + db_range['first_ip'] = first_ip.format() + db_range['last_ip'] = last_ip.format() + LOG.debug("Adjusted availability range for pool %s", + db_range['allocation_pool_id']) + final_ranges.append(db_range) + else: + new_ip_range = self.subnet_manager.create_range( + session, + db_range['allocation_pool_id'], + first_ip.format(), + last_ip.format()) + LOG.debug("Created availability range for pool %s", + new_ip_range['allocation_pool_id']) + final_ranges.append(new_ip_range) + # Most callers might ignore this return value, which is however + # useful for testing purposes + LOG.debug("Availability ranges for subnet id %(subnet_id)s " + "modified: %(new_ranges)s", + {'subnet_id': self.subnet_manager.neutron_id, + 'new_ranges': ", ".join(["[%s; %s]" % + (r['first_ip'], r['last_ip']) for + r in final_ranges])}) + return final_ranges + + def _rebuild_availability_ranges(self, session): + """Rebuild availability ranges. + + This method should be called only when the availability ranges are + exhausted or when the subnet's allocation pools are updated, + which may trigger a deletion of the availability ranges. + + For this operation to complete successfully, this method uses a + locking query to ensure that no IP is allocated while the regeneration + of availability ranges is in progress. + + :param session: database session + """ + # List all currently allocated addresses, and prevent further + # allocations with a write-intent lock. + # NOTE: because of this driver's logic the write intent lock is + # probably unnecessary as this routine is called when the availability + # ranges for a subnet are exhausted and no further address can be + # allocated. + # TODO(salv-orlando): devise, if possible, a more efficient solution + # for building the IPSet to ensure decent performances even with very + # large subnets. + allocations = netaddr.IPSet( + [netaddr.IPAddress(allocation['ip_address']) for + allocation in self.subnet_manager.list_allocations( + session, locking=True)]) + + # MEH MEH + # There should be no need to set a write intent lock on the allocation + # pool table. Indeed it is not important for the correctness of this + # operation if the allocation pools are updated by another operation, + # which will result in the generation of new availability ranges. + # NOTE: it might be argued that an allocation pool update should in + # theory preempt rebuilding the availability range. This is an option + # to consider for future developments. + LOG.debug("Rebuilding availability ranges for subnet %s", + self.subnet_manager.neutron_id) + + for pool in self.subnet_manager.list_pools(session): + # Create a set of all addresses in the pool + poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'], + pool['last_ip'])) + # Use set difference to find free addresses in the pool + available = poolset - allocations + # Write the ranges to the db + for ip_range in available.iter_ipranges(): + av_range = self.subnet_manager.create_range( + session, + pool['id'], + netaddr.IPAddress(ip_range.first).format(), + netaddr.IPAddress(ip_range.last).format()) + session.add(av_range) + + def _generate_ip(self, session): + try: + return self._try_generate_ip(session) + except ipam_exc.IpAddressGenerationFailure: + self._rebuild_availability_ranges(session) + + return self._try_generate_ip(session) + + def _try_generate_ip(self, session): + """Generate an IP address from availability ranges.""" + ip_range = self.subnet_manager.get_first_range(session, locking=True) + if not ip_range: + LOG.debug("All IPs from subnet %(subnet_id)s allocated", + {'subnet_id': self.subnet_manager.neutron_id}) + raise ipam_exc.IpAddressGenerationFailure( + subnet_id=self.subnet_manager.neutron_id) + # A suitable range was found. Return IP address. + ip_address = ip_range['first_ip'] + LOG.debug("Allocated IP - %(ip_address)s from range " + "[%(first_ip)s; %(last_ip)s]", + {'ip_address': ip_address, + 'first_ip': ip_address, + 'last_ip': ip_range['last_ip']}) + return ip_address, ip_range['allocation_pool_id'] + + def allocate(self, address_request): + # NOTE(salv-orlando): Creating a new db session might be a rather + # dangerous thing to do, if executed from within another database + # transaction. Therefore the IPAM driver should never be + # called from within a database transaction, which is also good + # practice since in the general case these drivers may interact + # with remote backends + session = self._context.session + all_pool_id = None + # NOTE(salv-orlando): It would probably better to have a simpler + # model for address requests and just check whether there is a + # specific IP address specified in address_request + if isinstance(address_request, ipam.SpecificAddressRequest): + # This handles both specific and automatic address requests + # Check availability of requested IP + ip_address = str(address_request.address) + self._verify_ip(session, ip_address) + else: + ip_address, all_pool_id = self._generate_ip(session) + self._allocate_specific_ip(session, ip_address, all_pool_id) + # Create IP allocation request object + # The only defined status at this stage is 'ALLOCATED'. + # More states will be available in the future - e.g.: RECYCLABLE + self.subnet_manager.create_allocation(session, ip_address) + return ip_address + + def deallocate(self, address): + # This is almost a no-op because the Neutron DB IPAM driver does not + # delete IPAllocation objects, neither rebuilds availability ranges + # at every deallocation. The only operation it performs is to delete + # an IPRequest entry. + session = self._context.session + + count = self.subnet_manager.delete_allocation( + session, address) + # count can hardly be greater than 1, but it can be 0... + if not count: + raise ipam_exc.IpAddressAllocationNotFound( + subnet_id=self.subnet_manager.neutron_id, + ip_address=address) + + def update_allocation_pools(self, pools): + # Pools have already been validated in the subnet request object which + # was sent to the subnet pool driver. Further validation should not be + # required. + session = db_api.get_session() + self.subnet_manager.delete_allocation_pools(session) + self.create_allocation_pools(self.subnet_manager, session, pools) + self._pools = pools + + def get_details(self): + """Return subnet data as a SpecificSubnetRequest""" + return ipam.SpecificSubnetRequest( + self._tenant_id, self.subnet_manager.neutron_id, + self._cidr, self._gateway_ip, self._pools) + + def associate_neutron_subnet(self, subnet_id): + """Set neutron identifier for this subnet""" + session = self._context.session + if self._subnet_id: + raise + # IPAMSubnet does not have foreign key to Subnet, + # so need verify subnet existence. + NeutronDbSubnet._fetch_subnet(self._context, subnet_id) + self.subnet_manager.associate_neutron_id(session, subnet_id) + self._subnet_id = subnet_id + + +class NeutronDbPool(subnet_alloc.SubnetAllocator): + """Subnet pools backed by Neutron Database. + + As this driver does not implement yet the subnet pool concept, most + operations are either trivial or no-ops. + """ + + def get_subnet(self, subnet_id): + """Retrieve an IPAM subnet. + + :param subnet_id: Neutron subnet identifier + :returns: a NeutronDbSubnet instance + """ + return NeutronDbSubnet.load(subnet_id, self._context) + + def allocate_subnet(self, subnet_request): + """Create an IPAMSubnet object for the provided cidr. + + This method does not actually do any operation in the driver, given + its simplified nature. + + :param cidr: subnet's CIDR + :returns: a NeutronDbSubnet instance + """ + if self._subnetpool: + subnet = super(NeutronDbPool, self).allocate_subnet(subnet_request) + subnet_request = subnet.get_details() + + # SubnetRequest must be an instance of SpecificSubnet + if not isinstance(subnet_request, ipam.SpecificSubnetRequest): + raise ipam_exc.InvalidSubnetRequestType( + subnet_type=type(subnet_request)) + return NeutronDbSubnet.create_from_subnet_request(subnet_request, + self._context) + + def update_subnet(self, subnet_request): + """Update subnet info the in the IPAM driver. + + The only update subnet information the driver needs to be aware of + are allocation pools. + """ + if not subnet_request.subnet_id: + raise ipam_exc.InvalidSubnetRequest( + reason=("An identifier must be specified when updating " + "a subnet")) + if not subnet_request.allocation_pools: + LOG.debug("Update subnet request for subnet %s did not specify " + "new allocation pools, there is nothing to do", + subnet_request.subnet_id) + return + subnet = NeutronDbSubnet.load(subnet_request.subnet_id, self._context) + subnet.update_allocation_pools(subnet_request.allocation_pools) + return subnet + + def remove_subnet(self, subnet): + """Remove data structures for a given subnet. + + All the IPAM-related data are cleared when a subnet is deleted thanks + to cascaded foreign key relationships. + """ + pass diff --git a/neutron/ipam/exceptions.py b/neutron/ipam/exceptions.py new file mode 100644 index 00000000000..4400e557e3c --- /dev/null +++ b/neutron/ipam/exceptions.py @@ -0,0 +1,62 @@ +# Copyright 2015 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.common import exceptions + + +class InvalidSubnetRequestType(exceptions.BadRequest): + message = _("Cannot handle subnet of type %(subnet_type)s") + + +class AddressCalculationFailure(exceptions.NeutronException): + message = _("Unable to calculate %(address_type)s address because of:" + "%(reason)s") + + +class InvalidAddressType(exceptions.NeutronException): + message = _("Unknown address type %(address_type)s") + + +class IpAddressAllocationNotFound(exceptions.NeutronException): + message = _("Unable to find IP address %(ip_address)s on subnet " + "%(subnet_id)s") + + +class IpAddressAlreadyAllocated(exceptions.Conflict): + message = _("IP address %(ip)s already allocated in subnet %(subnet_id)s") + + +class InvalidIpForSubnet(exceptions.BadRequest): + message = _("IP address %(ip)s does not belong to subnet %(subnet_id)s") + + +class InvalidAddressRequest(exceptions.BadRequest): + message = _("The address allocation request could not be satisfied " + "because: %(reason)s") + + +class InvalidSubnetRequest(exceptions.BadRequest): + message = _("The subnet request could not be satisfied because: " + "%(reason)s") + + +class AllocationOnAutoAddressSubnet(exceptions.NeutronException): + message = (_("IPv6 address %(ip)s cannot be directly " + "assigned to a port on subnet %(subnet_id)s as the " + "subnet is configured for automatic addresses")) + + +class IpAddressGenerationFailure(exceptions.Conflict): + message = _("No more IP addresses available for subnet %(subnet_id)s.") diff --git a/neutron/ipam/subnet_alloc.py b/neutron/ipam/subnet_alloc.py index d4b93321066..49b6eda2ab5 100644 --- a/neutron/ipam/subnet_alloc.py +++ b/neutron/ipam/subnet_alloc.py @@ -23,6 +23,7 @@ from neutron.common import exceptions as n_exc from neutron.db import models_v2 import neutron.ipam as ipam from neutron.ipam import driver +from neutron.ipam import utils as ipam_utils from neutron.openstack.common import uuidutils @@ -33,19 +34,19 @@ class SubnetAllocator(driver.Pool): make merging into IPAM framework easier in future cycles. """ - def __init__(self, subnetpool): - self._subnetpool = subnetpool + def __init__(self, subnetpool, context): + super(SubnetAllocator, self).__init__(subnetpool, context) self._sp_helper = SubnetPoolHelper() - def _get_allocated_cidrs(self, session): - query = session.query( + def _get_allocated_cidrs(self): + query = self._context.session.query( models_v2.Subnet).with_lockmode('update') subnets = query.filter_by(subnetpool_id=self._subnetpool['id']) return (x.cidr for x in subnets) - def _get_available_prefix_list(self, session): + def _get_available_prefix_list(self): prefixes = (x.cidr for x in self._subnetpool.prefixes) - allocations = self._get_allocated_cidrs(session) + allocations = self._get_allocated_cidrs() prefix_set = netaddr.IPSet(iterable=prefixes) allocation_set = netaddr.IPSet(iterable=allocations) available_set = prefix_set.difference(allocation_set) @@ -57,11 +58,11 @@ class SubnetAllocator(driver.Pool): def _num_quota_units_in_prefixlen(self, prefixlen, quota_unit): return math.pow(2, quota_unit - prefixlen) - def _allocations_used_by_tenant(self, session, quota_unit): + def _allocations_used_by_tenant(self, quota_unit): subnetpool_id = self._subnetpool['id'] tenant_id = self._subnetpool['tenant_id'] - with session.begin(subtransactions=True): - qry = session.query( + with self._context.session.begin(subtransactions=True): + qry = self._context.session.query( models_v2.Subnet).with_lockmode('update') allocations = qry.filter_by(subnetpool_id=subnetpool_id, tenant_id=tenant_id) @@ -72,60 +73,60 @@ class SubnetAllocator(driver.Pool): quota_unit) return value - def _check_subnetpool_tenant_quota(self, session, tenant_id, prefixlen): + def _check_subnetpool_tenant_quota(self, tenant_id, prefixlen): quota_unit = self._sp_helper.ip_version_subnetpool_quota_unit( self._subnetpool['ip_version']) quota = self._subnetpool.get('default_quota') if quota: - used = self._allocations_used_by_tenant(session, quota_unit) + used = self._allocations_used_by_tenant(quota_unit) requested_units = self._num_quota_units_in_prefixlen(prefixlen, quota_unit) if used + requested_units > quota: raise n_exc.SubnetPoolQuotaExceeded() - def _allocate_any_subnet(self, session, request): - with session.begin(subtransactions=True): - self._check_subnetpool_tenant_quota(session, - request.tenant_id, + def _allocate_any_subnet(self, request): + with self._context.session.begin(subtransactions=True): + self._check_subnetpool_tenant_quota(request.tenant_id, request.prefixlen) - prefix_pool = self._get_available_prefix_list(session) + prefix_pool = self._get_available_prefix_list() for prefix in prefix_pool: if request.prefixlen >= prefix.prefixlen: subnet = prefix.subnet(request.prefixlen).next() gateway_ip = request.gateway_ip if not gateway_ip: gateway_ip = subnet.network + 1 + pools = ipam_utils.generate_pools(subnet.cidr, + gateway_ip) return IpamSubnet(request.tenant_id, request.subnet_id, subnet.cidr, gateway_ip=gateway_ip, - allocation_pools=None) + allocation_pools=pools) msg = _("Insufficient prefix space to allocate subnet size /%s") raise n_exc.SubnetAllocationError(reason=msg % str(request.prefixlen)) - def _allocate_specific_subnet(self, session, request): - with session.begin(subtransactions=True): - self._check_subnetpool_tenant_quota(session, - request.tenant_id, + def _allocate_specific_subnet(self, request): + with self._context.session.begin(subtransactions=True): + self._check_subnetpool_tenant_quota(request.tenant_id, request.prefixlen) - subnet = request.subnet - available = self._get_available_prefix_list(session) - matched = netaddr.all_matching_cidrs(subnet, available) - if len(matched) is 1 and matched[0].prefixlen <= subnet.prefixlen: + cidr = request.subnet_cidr + available = self._get_available_prefix_list() + matched = netaddr.all_matching_cidrs(cidr, available) + if len(matched) is 1 and matched[0].prefixlen <= cidr.prefixlen: return IpamSubnet(request.tenant_id, request.subnet_id, - subnet.cidr, + cidr, gateway_ip=request.gateway_ip, allocation_pools=request.allocation_pools) msg = _("Cannot allocate requested subnet from the available " "set of prefixes") raise n_exc.SubnetAllocationError(reason=msg) - def allocate_subnet(self, session, request): + def allocate_subnet(self, request): max_prefixlen = int(self._subnetpool['max_prefixlen']) min_prefixlen = int(self._subnetpool['min_prefixlen']) if request.prefixlen > max_prefixlen: @@ -138,20 +139,20 @@ class SubnetAllocator(driver.Pool): min_prefixlen=min_prefixlen) if isinstance(request, ipam.AnySubnetRequest): - return self._allocate_any_subnet(session, request) + return self._allocate_any_subnet(request) elif isinstance(request, ipam.SpecificSubnetRequest): - return self._allocate_specific_subnet(session, request) + return self._allocate_specific_subnet(request) else: msg = _("Unsupported request type") raise n_exc.SubnetAllocationError(reason=msg) - def get_subnet(self, subnet, subnet_id): + def get_subnet(self, subnet_id): raise NotImplementedError() def update_subnet(self, request): raise NotImplementedError() - def remove_subnet(self, subnet, subnet_id): + def remove_subnet(self, subnet_id): raise NotImplementedError() @@ -163,11 +164,12 @@ class IpamSubnet(driver.Subnet): cidr, gateway_ip=None, allocation_pools=None): - self._req = ipam.SpecificSubnetRequest(tenant_id, - subnet_id, - cidr, - gateway_ip=gateway_ip, - allocation_pools=None) + self._req = ipam.SpecificSubnetRequest( + tenant_id, + subnet_id, + cidr, + gateway_ip=gateway_ip, + allocation_pools=allocation_pools) def allocate(self, address_request): raise NotImplementedError() @@ -178,6 +180,9 @@ class IpamSubnet(driver.Subnet): def get_details(self): return self._req + def associate_neutron_subnet(self, subnet_id): + pass + class SubnetPoolReader(object): '''Class to assist with reading a subnetpool, loading defaults, and diff --git a/neutron/ipam/utils.py b/neutron/ipam/utils.py new file mode 100644 index 00000000000..74927769ad7 --- /dev/null +++ b/neutron/ipam/utils.py @@ -0,0 +1,48 @@ +# Copyright 2015 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr + + +def check_subnet_ip(cidr, ip_address): + """Validate that the IP address is on the subnet.""" + ip = netaddr.IPAddress(ip_address) + net = netaddr.IPNetwork(cidr) + # Check that the IP is valid on subnet. This cannot be the + # network or the broadcast address + return (ip != net.network and ip != net.broadcast + and net.netmask & ip == net.network) + + +def generate_pools(cidr, gateway_ip): + """Create IP allocation pools for a specified subnet + + The Neutron API defines a subnet's allocation pools as a list of + IPRange objects for defining the pool range. + """ + pools = [] + # Auto allocate the pool around gateway_ip + net = netaddr.IPNetwork(cidr) + first_ip = net.first + 1 + last_ip = net.last - 1 + gw_ip = int(netaddr.IPAddress(gateway_ip or net.last)) + # Use the gw_ip to find a point for splitting allocation pools + # for this subnet + split_ip = min(max(gw_ip, net.first), net.last) + if split_ip > first_ip: + pools.append(netaddr.IPRange(first_ip, split_ip - 1)) + if split_ip < last_ip: + pools.append(netaddr.IPRange(split_ip + 1, last_ip)) + return pools diff --git a/neutron/tests/unit/common/test_ipv6_utils.py b/neutron/tests/unit/common/test_ipv6_utils.py index 0788d1a80af..9ec11c8fafa 100644 --- a/neutron/tests/unit/common/test_ipv6_utils.py +++ b/neutron/tests/unit/common/test_ipv6_utils.py @@ -124,3 +124,29 @@ class TestIsAutoAddressSubnet(base.BaseTestCase): self.subnet['ipv6_ra_mode'] = subnet.ra_mode self.assertEqual(subnet.is_auto_address, ipv6_utils.is_auto_address_subnet(self.subnet)) + + +class TestIsEui64Address(base.BaseTestCase): + + def _test_eui_64(self, ips, expected): + for ip in ips: + self.assertEqual(expected, ipv6_utils.is_eui64_address(ip), + "Error on %s" % ip) + + def test_valid_eui64_addresses(self): + ips = ('fffe::0cad:12ff:fe44:5566', + ipv6_utils.get_ipv6_addr_by_EUI64('2001:db8::', + '00:16:3e:33:44:55')) + self._test_eui_64(ips, True) + + def test_invalid_eui64_addresses(self): + ips = ('192.168.1.1', + '192.168.1.0', + '255.255.255.255', + '0.0.0.0', + 'fffe::', + 'ff80::1', + 'fffe::0cad:12ff:ff44:5566', + 'fffe::0cad:12fe:fe44:5566', + 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff') + self._test_eui_64(ips, False) diff --git a/neutron/tests/unit/ipam/__init__.py b/neutron/tests/unit/ipam/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/ipam/drivers/__init__.py b/neutron/tests/unit/ipam/drivers/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py b/neutron/tests/unit/ipam/drivers/neutrondb_ipam/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py b/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py new file mode 100644 index 00000000000..c632efb978c --- /dev/null +++ b/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_db_api.py @@ -0,0 +1,170 @@ +# Copyright 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron import context +from neutron.ipam.drivers.neutrondb_ipam import db_api +from neutron.ipam.drivers.neutrondb_ipam import db_models +from neutron.openstack.common import uuidutils +from neutron.tests.unit import testlib_api + + +class TestIpamSubnetManager(testlib_api.SqlTestCase): + """Test case for SubnetManager DB helper class""" + + def setUp(self): + super(TestIpamSubnetManager, self).setUp() + self.ctx = context.get_admin_context() + self.neutron_subnet_id = uuidutils.generate_uuid() + self.ipam_subnet_id = uuidutils.generate_uuid() + self.subnet_ip = '1.2.3.4' + self.single_pool = ('1.2.3.4', '1.2.3.10') + self.multi_pool = (('1.2.3.2', '1.2.3.12'), ('1.2.3.15', '1.2.3.24')) + self.subnet_manager = db_api.IpamSubnetManager(self.ipam_subnet_id, + self.neutron_subnet_id) + self.subnet_manager_id = self.subnet_manager.create(self.ctx.session) + self.ctx.session.flush() + + def test_create(self): + self.assertEqual(self.ipam_subnet_id, self.subnet_manager_id) + subnets = self.ctx.session.query(db_models.IpamSubnet).filter_by( + id=self.ipam_subnet_id).all() + self.assertEqual(1, len(subnets)) + + def test_associate_neutron_id(self): + self.subnet_manager.associate_neutron_id(self.ctx.session, + 'test-id') + subnet = self.ctx.session.query(db_models.IpamSubnet).filter_by( + id=self.ipam_subnet_id).first() + self.assertEqual('test-id', subnet['neutron_subnet_id']) + + def _create_pools(self, pools): + db_pools = [] + for pool in pools: + db_pool = self.subnet_manager.create_pool(self.ctx.session, + pool[0], + pool[1]) + db_pools.append(db_pool) + return db_pools + + def _validate_ips(self, pool, db_pool): + self.assertEqual(pool[0], db_pool.first_ip) + self.assertEqual(pool[1], db_pool.last_ip) + + def test_create_pool(self): + db_pools = self._create_pools([self.single_pool]) + + ipam_pool = self.ctx.session.query(db_models.IpamAllocationPool).\ + filter_by(ipam_subnet_id=self.ipam_subnet_id).first() + self._validate_ips(self.single_pool, ipam_pool) + + range = self.ctx.session.query(db_models.IpamAvailabilityRange).\ + filter_by(allocation_pool_id=db_pools[0].id).first() + self._validate_ips(self.single_pool, range) + + def _test_get_first_range(self, locking): + self._create_pools(self.multi_pool) + range = self.subnet_manager.get_first_range(self.ctx.session, + locking=locking) + self._validate_ips(self.multi_pool[0], range) + + def test_get_first_range(self): + self._test_get_first_range(False) + + def test_get_first_range_locking(self): + self._test_get_first_range(True) + + def test_list_ranges_by_subnet_id(self): + self._create_pools(self.multi_pool) + + db_ranges = self.subnet_manager.list_ranges_by_subnet_id( + self.ctx.session, + self.ipam_subnet_id).all() + self.assertEqual(2, len(db_ranges)) + self.assertEqual(db_models.IpamAvailabilityRange, type(db_ranges[0])) + + def test_list_ranges_by_allocation_pool(self): + db_pools = self._create_pools([self.single_pool]) + # generate ids for allocation pools on flush + self.ctx.session.flush() + db_ranges = self.subnet_manager.list_ranges_by_allocation_pool( + self.ctx.session, + db_pools[0].id).all() + self.assertEqual(1, len(db_ranges)) + self.assertEqual(db_models.IpamAvailabilityRange, type(db_ranges[0])) + self._validate_ips(self.single_pool, db_ranges[0]) + + def test_create_range(self): + self._create_pools([self.single_pool]) + pool = self.ctx.session.query(db_models.IpamAllocationPool).\ + filter_by(ipam_subnet_id=self.ipam_subnet_id).first() + self._validate_ips(self.single_pool, pool) + allocation_pool_id = pool.id + + # delete the range + db_range = self.subnet_manager.list_ranges_by_allocation_pool( + self.ctx.session, + pool.id).first() + self._validate_ips(self.single_pool, db_range) + self.ctx.session.delete(db_range) + + # create a new range + range_start = '1.2.3.5' + range_end = '1.2.3.9' + new_range = self.subnet_manager.create_range(self.ctx.session, + allocation_pool_id, + range_start, + range_end) + self.assertEqual(range_start, new_range.first_ip) + self.assertEqual(range_end, new_range.last_ip) + + def test_check_unique_allocation(self): + self.assertTrue(self.subnet_manager.check_unique_allocation( + self.ctx.session, self.subnet_ip)) + + def test_check_unique_allocation_negative(self): + self.subnet_manager.create_allocation(self.ctx.session, + self.subnet_ip) + self.assertFalse(self.subnet_manager.check_unique_allocation( + self.ctx.session, self.subnet_ip)) + + def test_list_allocations(self): + ips = ['1.2.3.4', '1.2.3.6', '1.2.3.7'] + for ip in ips: + self.subnet_manager.create_allocation(self.ctx.session, ip) + allocs = self.subnet_manager.list_allocations(self.ctx.session).all() + self.assertEqual(len(ips), len(allocs)) + for allocation in allocs: + self.assertIn(allocation.ip_address, ips) + + def _test_create_allocation(self): + self.subnet_manager.create_allocation(self.ctx.session, + self.subnet_ip) + alloc = self.ctx.session.query(db_models.IpamAllocation).filter_by( + ipam_subnet_id=self.ipam_subnet_id).all() + self.assertEqual(1, len(alloc)) + self.assertEqual(self.subnet_ip, alloc[0].ip_address) + return alloc + + def test_create_allocation(self): + self._test_create_allocation() + + def test_delete_allocation(self): + allocs = self._test_create_allocation() + self.subnet_manager.delete_allocation(self.ctx.session, + allocs[0].ip_address) + + allocs = self.ctx.session.query(db_models.IpamAllocation).filter_by( + ipam_subnet_id=self.ipam_subnet_id).all() + self.assertEqual(0, len(allocs)) diff --git a/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py b/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py new file mode 100644 index 00000000000..4719e5e04f0 --- /dev/null +++ b/neutron/tests/unit/ipam/drivers/neutrondb_ipam/test_driver.py @@ -0,0 +1,442 @@ +# Copyright 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron import context +from neutron import ipam +from neutron.ipam.drivers.neutrondb_ipam import driver +from neutron.ipam import exceptions as ipam_exc +from neutron import manager + +from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin +from neutron.tests.unit import testlib_api + + +def convert_firstip_to_ipaddress(range_item): + return netaddr.IPAddress(range_item['first_ip']) + + +class TestNeutronDbIpamMixin(object): + + def _create_network(self, plugin, ctx, shared=False): + network = {'network': {'name': 'net', + 'shared': shared, + 'admin_state_up': True, + 'tenant_id': self._tenant_id}} + created_network = plugin.create_network(ctx, network) + return (created_network, created_network['id']) + + def _create_subnet(self, plugin, ctx, network_id, cidr, ip_version=4, + v6_address_mode=attributes.ATTR_NOT_SPECIFIED, + allocation_pools=attributes.ATTR_NOT_SPECIFIED): + subnet = {'subnet': {'name': 'sub', + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_ip': attributes.ATTR_NOT_SPECIFIED, + 'allocation_pools': allocation_pools, + 'enable_dhcp': True, + 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED, + 'host_routes': attributes.ATTR_NOT_SPECIFIED, + 'ipv6_address_mode': v6_address_mode, + 'ipv6_ra_mode': attributes.ATTR_NOT_SPECIFIED, + 'network_id': network_id, + 'tenant_id': self._tenant_id}} + return plugin.create_subnet(ctx, subnet) + + +class TestNeutronDbIpamPool(testlib_api.SqlTestCase, + TestNeutronDbIpamMixin): + """Test case for the Neutron's DB IPAM driver subnet pool interface.""" + + def setUp(self): + super(TestNeutronDbIpamPool, self).setUp() + self._tenant_id = 'test-tenant' + + # Configure plugin for tests + self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS) + + # Prepare environment for tests + self.plugin = manager.NeutronManager.get_plugin() + self.ctx = context.get_admin_context() + self.network, self.net_id = self._create_network(self.plugin, + self.ctx) + + # Allocate IPAM driver + self.ipam_pool = driver.NeutronDbPool(None, self.ctx) + + def _verify_ipam_subnet_details(self, ipam_subnet, + cidr=None, + tenant_id=None, + gateway_ip=None, + allocation_pools=None): + ipam_subnet_details = ipam_subnet.get_details() + gateway_ip_address = None + cidr_ip_network = None + if gateway_ip: + gateway_ip_address = netaddr.IPAddress(gateway_ip) + if cidr: + cidr_ip_network = netaddr.IPNetwork(cidr) + self.assertEqual(tenant_id, ipam_subnet_details.tenant_id) + self.assertEqual(gateway_ip_address, ipam_subnet_details.gateway_ip) + self.assertEqual(cidr_ip_network, ipam_subnet_details.subnet_cidr) + self.assertEqual(allocation_pools, + ipam_subnet_details.allocation_pools) + + def test_allocate_ipam_subnet_no_neutron_subnet_id(self): + cidr = '10.0.0.0/24' + allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'), + netaddr.IPRange('10.0.0.200', '10.0.0.250')] + subnet_req = ipam.SpecificSubnetRequest( + self._tenant_id, + None, + cidr, + allocation_pools=allocation_pools, + gateway_ip='10.0.0.101') + ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) + self._verify_ipam_subnet_details(ipam_subnet, + cidr, + self._tenant_id, + '10.0.0.101', + allocation_pools) + + def _prepare_specific_subnet_request(self, cidr): + subnet = self._create_subnet( + self.plugin, self.ctx, self.net_id, cidr) + subnet_req = ipam.SpecificSubnetRequest( + self._tenant_id, + subnet['id'], + cidr, + gateway_ip=subnet['gateway_ip']) + return subnet, subnet_req + + def test_allocate_ipam_subnet_with_neutron_subnet_id(self): + cidr = '10.0.0.0/24' + subnet, subnet_req = self._prepare_specific_subnet_request(cidr) + ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) + self._verify_ipam_subnet_details( + ipam_subnet, + cidr, self._tenant_id, subnet['gateway_ip'], + [netaddr.IPRange('10.0.0.2', '10.0.0.254')]) + + def test_allocate_any_subnet_fails(self): + self.assertRaises( + ipam_exc.InvalidSubnetRequestType, + self.ipam_pool.allocate_subnet, + ipam.AnySubnetRequest(self._tenant_id, 'meh', constants.IPv4, 24)) + + def test_update_subnet_pools(self): + cidr = '10.0.0.0/24' + subnet, subnet_req = self._prepare_specific_subnet_request(cidr) + ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) + ipam_subnet.associate_neutron_subnet(subnet['id']) + allocation_pools = [netaddr.IPRange('10.0.0.100', '10.0.0.150'), + netaddr.IPRange('10.0.0.200', '10.0.0.250')] + update_subnet_req = ipam.SpecificSubnetRequest( + self._tenant_id, + subnet['id'], + cidr, + gateway_ip=subnet['gateway_ip'], + allocation_pools=allocation_pools) + ipam_subnet = self.ipam_pool.update_subnet(update_subnet_req) + self._verify_ipam_subnet_details( + ipam_subnet, + cidr, self._tenant_id, subnet['gateway_ip'], allocation_pools) + + def test_get_subnet(self): + cidr = '10.0.0.0/24' + subnet, subnet_req = self._prepare_specific_subnet_request(cidr) + ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) + ipam_subnet.associate_neutron_subnet(subnet['id']) + # Retrieve the subnet + ipam_subnet = self.ipam_pool.get_subnet(subnet['id']) + self._verify_ipam_subnet_details( + ipam_subnet, + cidr, self._tenant_id, subnet['gateway_ip'], + [netaddr.IPRange('10.0.0.2', '10.0.0.254')]) + + def test_get_non_existing_subnet_fails(self): + self.assertRaises(n_exc.SubnetNotFound, + self.ipam_pool.get_subnet, + 'boo') + + +class TestNeutronDbIpamSubnet(testlib_api.SqlTestCase, + TestNeutronDbIpamMixin): + """Test case for Subnet interface for Nuetron's DB IPAM driver. + + This test case exercises the reference IPAM driver. + Even if it loads a plugin, the unit tests in this class do not exercise + it at all; they simply perform white box testing on the IPAM driver. + The plugin is exclusively used to create the neutron objects on which + the IPAM driver will operate. + """ + + def _create_and_allocate_ipam_subnet( + self, cidr, allocation_pools=attributes.ATTR_NOT_SPECIFIED, + ip_version=4, v6_auto_address=False, tenant_id=None): + v6_address_mode = attributes.ATTR_NOT_SPECIFIED + if v6_auto_address: + # set ip version to 6 regardless of what's been passed to the + # method + ip_version = 6 + v6_address_mode = constants.IPV6_SLAAC + subnet = self._create_subnet( + self.plugin, self.ctx, self.net_id, cidr, + ip_version=ip_version, + allocation_pools=allocation_pools, + v6_address_mode=v6_address_mode) + # Build netaddr.IPRanges from allocation pools since IPAM SubnetRequest + # objects are strongly typed + allocation_pool_ranges = [netaddr.IPRange( + pool['start'], pool['end']) for pool in + subnet['allocation_pools']] + subnet_req = ipam.SpecificSubnetRequest( + tenant_id, + subnet['id'], + cidr, + gateway_ip=subnet['gateway_ip'], + allocation_pools=allocation_pool_ranges) + ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) + ipam_subnet.associate_neutron_subnet(subnet['id']) + return ipam_subnet, subnet + + def setUp(self): + super(TestNeutronDbIpamSubnet, self).setUp() + self._tenant_id = 'test-tenant' + + # Configure plugin for tests + self.setup_coreplugin(test_db_plugin.DB_PLUGIN_KLASS) + + # Prepare environment for tests + self.plugin = manager.NeutronManager.get_plugin() + self.ctx = context.get_admin_context() + self.network, self.net_id = self._create_network(self.plugin, + self.ctx) + + # Allocate IPAM driver + self.ipam_pool = driver.NeutronDbPool(None, self.ctx) + + def test__verify_ip_succeeds(self): + cidr = '10.0.0.0/24' + ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] + ipam_subnet._verify_ip(self.ctx.session, '10.0.0.2') + + def test__verify_ip_not_in_subnet_fails(self): + cidr = '10.0.0.0/24' + ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] + self.assertRaises(ipam_exc.InvalidIpForSubnet, + ipam_subnet._verify_ip, + self.ctx.session, + '192.168.0.2') + + def test__verify_ip_bcast_and_network_fail(self): + cidr = '10.0.0.0/24' + ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] + self.assertRaises(ipam_exc.InvalidIpForSubnet, + ipam_subnet._verify_ip, + self.ctx.session, + '10.0.0.255') + self.assertRaises(ipam_exc.InvalidIpForSubnet, + ipam_subnet._verify_ip, + self.ctx.session, + '10.0.0.0') + + def test__allocate_specific_ip(self): + cidr = '10.0.0.0/24' + ipam_subnet = self._create_and_allocate_ipam_subnet(cidr)[0] + with self.ctx.session.begin(): + ranges = ipam_subnet._allocate_specific_ip( + self.ctx.session, '10.0.0.33') + self.assertEqual(2, len(ranges)) + # 10.0.0.1 should be allocated for gateway ip + ranges.sort(key=convert_firstip_to_ipaddress) + self.assertEqual('10.0.0.2', ranges[0]['first_ip']) + self.assertEqual('10.0.0.32', ranges[0]['last_ip']) + self.assertEqual('10.0.0.34', ranges[1]['first_ip']) + self.assertEqual('10.0.0.254', ranges[1]['last_ip']) + # Limit test - first address in range + ranges = ipam_subnet._allocate_specific_ip( + self.ctx.session, '10.0.0.2') + self.assertEqual(2, len(ranges)) + ranges.sort(key=convert_firstip_to_ipaddress) + self.assertEqual('10.0.0.3', ranges[0]['first_ip']) + self.assertEqual('10.0.0.32', ranges[0]['last_ip']) + self.assertEqual('10.0.0.34', ranges[1]['first_ip']) + self.assertEqual('10.0.0.254', ranges[1]['last_ip']) + # Limit test - last address in range + ranges = ipam_subnet._allocate_specific_ip( + self.ctx.session, '10.0.0.254') + self.assertEqual(2, len(ranges)) + ranges.sort(key=convert_firstip_to_ipaddress) + self.assertEqual('10.0.0.3', ranges[0]['first_ip']) + self.assertEqual('10.0.0.32', ranges[0]['last_ip']) + self.assertEqual('10.0.0.34', ranges[1]['first_ip']) + self.assertEqual('10.0.0.253', ranges[1]['last_ip']) + + def test__allocate_specific_ips_multiple_ranges(self): + cidr = '10.0.0.0/24' + ipam_subnet = self._create_and_allocate_ipam_subnet( + cidr, + allocation_pools=[{'start': '10.0.0.10', 'end': '10.0.0.19'}, + {'start': '10.0.0.30', 'end': '10.0.0.39'}])[0] + with self.ctx.session.begin(): + ranges = ipam_subnet._allocate_specific_ip( + self.ctx.session, '10.0.0.33') + self.assertEqual(3, len(ranges)) + # 10.0.0.1 should be allocated for gateway ip + ranges.sort(key=convert_firstip_to_ipaddress) + self.assertEqual('10.0.0.10', ranges[0]['first_ip']) + self.assertEqual('10.0.0.19', ranges[0]['last_ip']) + self.assertEqual('10.0.0.30', ranges[1]['first_ip']) + self.assertEqual('10.0.0.32', ranges[1]['last_ip']) + self.assertEqual('10.0.0.34', ranges[2]['first_ip']) + self.assertEqual('10.0.0.39', ranges[2]['last_ip']) + + def test__allocate_specific_ip_out_of_range(self): + cidr = '10.0.0.0/24' + subnet = self._create_subnet( + self.plugin, self.ctx, self.net_id, cidr) + subnet_req = ipam.SpecificSubnetRequest( + 'tenant_id', subnet, cidr, gateway_ip=subnet['gateway_ip']) + ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) + with self.ctx.session.begin(): + ranges = ipam_subnet._allocate_specific_ip( + self.ctx.session, '192.168.0.1') + # In this case _allocate_specific_ips does not fail, but + # simply does not update availability ranges at all + self.assertEqual(1, len(ranges)) + # 10.0.0.1 should be allocated for gateway ip + ranges.sort(key=convert_firstip_to_ipaddress) + self.assertEqual('10.0.0.2', ranges[0]['first_ip']) + self.assertEqual('10.0.0.254', ranges[0]['last_ip']) + + def _allocate_address(self, cidr, ip_version, address_request): + ipam_subnet = self._create_and_allocate_ipam_subnet( + cidr, ip_version=ip_version)[0] + return ipam_subnet.allocate(address_request) + + def test_allocate_any_v4_address_succeeds(self): + ip_address = self._allocate_address( + '10.0.0.0/24', 4, ipam.AnyAddressRequest) + # As the DB IPAM driver allocation logic is strictly sequential, we can + # expect this test to allocate the .2 address as .1 is used by default + # as subnet gateway + self.assertEqual('10.0.0.2', ip_address) + + def test_allocate_any_v6_address_succeeds(self): + ip_address = self._allocate_address( + 'fde3:abcd:4321:1::/64', 6, ipam.AnyAddressRequest) + # As the DB IPAM driver allocation logic is strictly sequential, we can + # expect this test to allocate the .2 address as .1 is used by default + # as subnet gateway + self.assertEqual('fde3:abcd:4321:1::2', ip_address) + + def test_allocate_specific_v4_address_succeeds(self): + ip_address = self._allocate_address( + '10.0.0.0/24', 4, ipam.SpecificAddressRequest('10.0.0.33')) + self.assertEqual('10.0.0.33', ip_address) + + def test_allocate_specific_v6_address_succeeds(self): + ip_address = self._allocate_address( + 'fde3:abcd:4321:1::/64', 6, + ipam.SpecificAddressRequest('fde3:abcd:4321:1::33')) + self.assertEqual('fde3:abcd:4321:1::33', ip_address) + + def test_allocate_specific_v4_address_out_of_range_fails(self): + self.assertRaises(ipam_exc.InvalidIpForSubnet, + self._allocate_address, + '10.0.0.0/24', 4, + ipam.SpecificAddressRequest('192.168.0.1')) + + def test_allocate_specific_v6_address_out_of_range_fails(self): + self.assertRaises(ipam_exc.InvalidIpForSubnet, + self._allocate_address, + 'fde3:abcd:4321:1::/64', 6, + ipam.SpecificAddressRequest( + 'fde3:abcd:eeee:1::33')) + + def test_allocate_specific_address_in_use_fails(self): + ipam_subnet = self._create_and_allocate_ipam_subnet( + 'fde3:abcd:4321:1::/64', ip_version=6)[0] + addr_req = ipam.SpecificAddressRequest('fde3:abcd:4321:1::33') + ipam_subnet.allocate(addr_req) + self.assertRaises(ipam_exc.IpAddressAlreadyAllocated, + ipam_subnet.allocate, + addr_req) + + def test_allocate_any_address_exhausted_pools_fails(self): + # Same as above, the ranges will be recalculated always + ipam_subnet = self._create_and_allocate_ipam_subnet( + '192.168.0.0/30', ip_version=4)[0] + ipam_subnet.allocate(ipam.AnyAddressRequest) + # The second address generation request on a /30 for v4 net must fail + self.assertRaises(ipam_exc.IpAddressGenerationFailure, + ipam_subnet.allocate, + ipam.AnyAddressRequest) + + def _test_deallocate_address(self, cidr, ip_version): + ipam_subnet = self._create_and_allocate_ipam_subnet( + cidr, ip_version=ip_version)[0] + ip_address = ipam_subnet.allocate(ipam.AnyAddressRequest) + ipam_subnet.deallocate(ip_address) + + def test_deallocate_v4_address(self): + self._test_deallocate_address('10.0.0.0/24', 4) + + def test_deallocate_v6_address(self): + # This test does not really exercise any different code path wrt + # test_deallocate_v4_address. It is provided for completeness and for + # future proofing in case v6-specific logic will be added. + self._test_deallocate_address('fde3:abcd:4321:1::/64', 6) + + def test_allocate_unallocated_address_fails(self): + ipam_subnet = self._create_and_allocate_ipam_subnet( + '10.0.0.0/24', ip_version=4)[0] + self.assertRaises(ipam_exc.IpAddressAllocationNotFound, + ipam_subnet.deallocate, '10.0.0.2') + + def test_allocate_all_pool_addresses_triggers_range_recalculation(self): + # This test instead might be made to pass, but for the wrong reasons! + pass + + def _test_allocate_subnet(self, subnet_id): + subnet_req = ipam.SpecificSubnetRequest( + 'tenant_id', subnet_id, '192.168.0.0/24') + return self.ipam_pool.allocate_subnet(subnet_req) + + def test_allocate_subnet_for_non_existent_subnet_pass(self): + # This test should pass because neutron subnet is not checked + # until associate neutron subnet step + subnet_req = ipam.SpecificSubnetRequest( + 'tenant_id', 'meh', '192.168.0.0/24') + self.ipam_pool.allocate_subnet(subnet_req) + + def test_associate_neutron_subnet(self): + ipam_subnet, subnet = self._create_and_allocate_ipam_subnet( + '192.168.0.0/24', ip_version=4) + details = ipam_subnet.get_details() + self.assertEqual(subnet['id'], details.subnet_id) + + def test_associate_non_existing_neutron_subnet_fails(self): + subnet_req = ipam.SpecificSubnetRequest( + 'tenant_id', 'meh', '192.168.0.0/24') + ipam_subnet = self.ipam_pool.allocate_subnet(subnet_req) + self.assertRaises(n_exc.SubnetNotFound, + ipam_subnet.associate_neutron_subnet, + 'meh') diff --git a/neutron/tests/unit/ipam/test_subnet_alloc.py b/neutron/tests/unit/ipam/test_subnet_alloc.py index 592fca00477..25021af2fdc 100644 --- a/neutron/tests/unit/ipam/test_subnet_alloc.py +++ b/neutron/tests/unit/ipam/test_subnet_alloc.py @@ -63,14 +63,14 @@ class TestSubnetAllocation(testlib_api.SqlTestCase): prefix_list, 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with self.ctx.session.begin(subtransactions=True): - sa = subnet_alloc.SubnetAllocator(sp) + sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) - res = sa.allocate_subnet(self.ctx.session, req) + res = sa.allocate_subnet(req) detail = res.get_details() prefix_set = netaddr.IPSet(iterable=prefix_list) - allocated_set = netaddr.IPSet(iterable=[detail.subnet.cidr]) + allocated_set = netaddr.IPSet(iterable=[detail.subnet_cidr]) self.assertTrue(allocated_set.issubset(prefix_set)) self.assertEqual(detail.prefixlen, 21) @@ -80,14 +80,14 @@ class TestSubnetAllocation(testlib_api.SqlTestCase): 21, 4) with self.ctx.session.begin(subtransactions=True): sp = self.plugin._get_subnetpool(self.ctx, sp['id']) - sa = subnet_alloc.SubnetAllocator(sp) + sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.2.0/24') - res = sa.allocate_subnet(self.ctx.session, req) + res = sa.allocate_subnet(req) detail = res.get_details() sp = self._get_subnetpool(self.ctx, self.plugin, sp['id']) - self.assertEqual(str(detail.subnet.cidr), '10.1.2.0/24') + self.assertEqual(str(detail.subnet_cidr), '10.1.2.0/24') self.assertEqual(detail.prefixlen, 24) def test_insufficient_prefix_space_for_any_allocation(self): @@ -95,25 +95,25 @@ class TestSubnetAllocation(testlib_api.SqlTestCase): ['10.1.1.0/24', '192.168.1.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) - sa = subnet_alloc.SubnetAllocator(sp) + sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) self.assertRaises(n_exc.SubnetAllocationError, - sa.allocate_subnet, self.ctx.session, req) + sa.allocate_subnet, req) def test_insufficient_prefix_space_for_specific_allocation(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/24'], 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) - sa = subnet_alloc.SubnetAllocator(sp) + sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.0.0/21') self.assertRaises(n_exc.SubnetAllocationError, - sa.allocate_subnet, self.ctx.session, req) + sa.allocate_subnet, req) def test_allocate_any_subnet_gateway(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', @@ -121,13 +121,14 @@ class TestSubnetAllocation(testlib_api.SqlTestCase): 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with self.ctx.session.begin(subtransactions=True): - sa = subnet_alloc.SubnetAllocator(sp) + sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam.AnySubnetRequest(self._tenant_id, uuidutils.generate_uuid(), constants.IPv4, 21) - res = sa.allocate_subnet(self.ctx.session, req) + res = sa.allocate_subnet(req) detail = res.get_details() - self.assertEqual(detail.gateway_ip, detail.subnet.network + 1) + self.assertEqual(detail.gateway_ip, + detail.subnet_cidr.network + 1) def test_allocate_specific_subnet_specific_gateway(self): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', @@ -135,12 +136,12 @@ class TestSubnetAllocation(testlib_api.SqlTestCase): 21, 4) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) with self.ctx.session.begin(subtransactions=True): - sa = subnet_alloc.SubnetAllocator(sp) + sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), '10.1.2.0/24', gateway_ip='10.1.2.254') - res = sa.allocate_subnet(self.ctx.session, req) + res = sa.allocate_subnet(req) detail = res.get_details() self.assertEqual(detail.gateway_ip, netaddr.IPAddress('10.1.2.254')) @@ -149,8 +150,8 @@ class TestSubnetAllocation(testlib_api.SqlTestCase): sp = self._create_subnet_pool(self.plugin, self.ctx, 'test-sp', ['10.1.0.0/16', '192.168.1.0/24'], 21, 4) - sa = subnet_alloc.SubnetAllocator(sp) - value = sa._allocations_used_by_tenant(self.ctx.session, 32) + sa = subnet_alloc.SubnetAllocator(sp, self.ctx) + value = sa._allocations_used_by_tenant(32) self.assertEqual(value, 0) def test_subnetpool_default_quota_exceeded(self): @@ -158,11 +159,10 @@ class TestSubnetAllocation(testlib_api.SqlTestCase): ['fe80::/48'], 48, 6, default_quota=1) sp = self.plugin._get_subnetpool(self.ctx, sp['id']) - sa = subnet_alloc.SubnetAllocator(sp) + sa = subnet_alloc.SubnetAllocator(sp, self.ctx) req = ipam.SpecificSubnetRequest(self._tenant_id, uuidutils.generate_uuid(), 'fe80::/63') self.assertRaises(n_exc.SubnetPoolQuotaExceeded, sa.allocate_subnet, - self.ctx.session, req) diff --git a/neutron/tests/unit/test_ipam.py b/neutron/tests/unit/test_ipam.py index 7d27f38f7f7..aeec959a5da 100644 --- a/neutron/tests/unit/test_ipam.py +++ b/neutron/tests/unit/test_ipam.py @@ -13,7 +13,9 @@ import netaddr from neutron.common import constants +from neutron.common import ipv6_utils from neutron import ipam +from neutron.ipam import exceptions as ipam_exc from neutron.openstack.common import uuidutils from neutron.tests import base @@ -161,7 +163,7 @@ class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase): gateway_ip='1.2.3.1') self.assertEqual(24, request.prefixlen) self.assertEqual(netaddr.IPAddress('1.2.3.1'), request.gateway_ip) - self.assertEqual(netaddr.IPNetwork('1.2.3.0/24'), request.subnet) + self.assertEqual(netaddr.IPNetwork('1.2.3.0/24'), request.subnet_cidr) def test_subnet_request_bad_gateway(self): self.assertRaises(ValueError, @@ -176,6 +178,12 @@ class TestAddressRequest(base.BaseTestCase): # This class doesn't test much. At least running through all of the # constructors may shake out some trivial bugs. + + EUI64 = ipam.AutomaticAddressRequest.EUI64 + + def setUp(self): + super(TestAddressRequest, self).setUp() + def test_specific_address_ipv6(self): request = ipam.SpecificAddressRequest('2000::45') self.assertEqual(netaddr.IPAddress('2000::45'), request.address) @@ -186,3 +194,33 @@ class TestAddressRequest(base.BaseTestCase): def test_any_address(self): ipam.AnyAddressRequest() + + def test_automatic_address_request_eui64(self): + subnet_cidr = '2607:f0d0:1002:51::/64' + port_mac = 'aa:bb:cc:dd:ee:ff' + eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr, + port_mac)) + request = ipam.AutomaticAddressRequest( + address_type=self.EUI64, + prefix=subnet_cidr, + mac=port_mac) + self.assertEqual(request.address, netaddr.IPAddress(eui_addr)) + + def test_automatic_address_request_invalid_address_type_raises(self): + self.assertRaises(ipam_exc.InvalidAddressType, + ipam.AutomaticAddressRequest, + address_type='kaboom') + + def test_automatic_address_request_eui64_no_mac_raises(self): + self.assertRaises(ipam_exc.AddressCalculationFailure, + ipam.AutomaticAddressRequest, + address_type=self.EUI64, + prefix='meh') + + def test_automatic_address_request_eui64_alien_param_raises(self): + self.assertRaises(ipam_exc.AddressCalculationFailure, + ipam.AutomaticAddressRequest, + address_type=self.EUI64, + mac='meh', + alien='et', + prefix='meh') From 3d2543d710c7071ffeb5c9857ac30a4d95695a7b Mon Sep 17 00:00:00 2001 From: dql Date: Mon, 9 Mar 2015 12:52:11 +0800 Subject: [PATCH 038/292] fix DHCP port changed when dhcp-agent restart When DHCP server is started, the periodic task is running before loading cache state.The method port_update_end need to use the cache information, but the cache information has not been loaded. Change-Id: I0d1da11bb559b7f0f9d4428b82573fb26916a933 Closes-Bug: #1420042 --- neutron/agent/dhcp/agent.py | 3 +++ neutron/tests/unit/agent/dhcp/test_agent.py | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index 89fd0773ed5..4d52df8d61c 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -70,6 +70,9 @@ class DhcpAgent(manager.Manager): config=self.conf, resource_type='dhcp') + def init_host(self): + self.sync_state() + def _populate_networks_cache(self): """Populate the networks cache when the DHCP-agent starts.""" try: diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index 6ea10afefd5..19dfcaa59fe 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -221,6 +221,12 @@ class TestDhcpAgent(base.BaseTestCase): self.mock_makedirs_p = mock.patch("os.makedirs") self.mock_makedirs = self.mock_makedirs_p.start() + def test_init_host(self): + dhcp = dhcp_agent.DhcpAgent(HOSTNAME) + with mock.patch.object(dhcp, 'sync_state') as sync_state: + dhcp.init_host() + sync_state.assert_called_once_with() + def test_dhcp_agent_manager(self): state_rpc_str = 'neutron.agent.rpc.PluginReportStateAPI' # sync_state is needed for this test From 032847b3030106a9e605b3088826b599789d7991 Mon Sep 17 00:00:00 2001 From: ankitagrawal Date: Thu, 14 May 2015 04:08:36 -0700 Subject: [PATCH 039/292] Remove use of contextlib.nested Removed use of contextlib.nested call from codebase, as it has been deprecated since Python 2.7. There are also known issues with contextlib.nested that were addressed by the native support for multiple "with" variables. For instance, if the first object is created but the second one throws an exception, the first object's __exit__ is never called. For more information see https://docs.python.org/2/library/contextlib.html#contextlib.nested contextlib.nested is also not compatible with Python 3. Multi-patch set for easier chunks. This one addresses the neutron/plugins/ml2 directory. Line continuation markers (e.g. '\') had to be used or syntax errors were thrown. While using parentheses is the preferred way for multiple line statements, but in case of long with statements backslashes are acceptable. Partial-Bug: 1428424 Change-Id: I7bbe4cec511125b4b2c954aa93e2d9ff6871b9e0 --- neutron/hacking/checks.py | 1 - neutron/plugins/ml2/plugin.py | 30 ++++++++++++++---------------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/neutron/hacking/checks.py b/neutron/hacking/checks.py index 0426d8c2954..e013b7deb36 100644 --- a/neutron/hacking/checks.py +++ b/neutron/hacking/checks.py @@ -146,7 +146,6 @@ def check_no_contextlib_nested(logical_line, filename): # these issues. It should be removed completely # when bug 1428424 is closed. ignore_dirs = [ - "neutron/plugins/ml2", "neutron/tests/unit/api", "neutron/tests/unit/db", "neutron/tests/unit/extensions", diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 2012417d1f4..0a4eb5d43aa 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib - from eventlet import greenthread from oslo_concurrency import lockutils from oslo_config import cfg @@ -353,8 +351,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): + with lockutils.lock('db-access'),\ + session.begin(subtransactions=True): # Get the current port state and build a new PortContext # reflecting this state as original state for subsequent # mechanism driver update_port_*commit() calls. @@ -740,8 +738,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, self._process_l3_delete(context, id) # Using query().with_lockmode isn't necessary. Foreign-key # constraints prevent deletion if concurrent creation happens. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): + with lockutils.lock('db-access'),\ + session.begin(subtransactions=True): # Get ports to auto-delete. ports = (session.query(models_v2.Port). enable_eagerloads(False). @@ -860,8 +858,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock # wait timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): + with lockutils.lock('db-access'),\ + session.begin(subtransactions=True): record = self._get_subnet(context, id) subnet = self._make_subnet_dict(record, None) qry_allocated = (session.query(models_v2.IPAllocation). @@ -1107,8 +1105,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): + with lockutils.lock('db-access'),\ + session.begin(subtransactions=True): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: raise exc.PortNotFound(port_id=id) @@ -1259,8 +1257,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): + with lockutils.lock('db-access'),\ + session.begin(subtransactions=True): port_db, binding = db.get_locked_port_and_binding(session, id) if not port_db: LOG.debug("The port '%s' was deleted", id) @@ -1386,8 +1384,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # prevent deadlock waiting to acquire a DB lock held by # another thread in the same process, leading to 'lock wait # timeout' errors. - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): + with lockutils.lock('db-access'),\ + session.begin(subtransactions=True): port = db.get_port(session, port_id) if not port: LOG.warning(_LW("Port %(port)s updated up by agent not found"), @@ -1418,8 +1416,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, if (updated and port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE): - with contextlib.nested(lockutils.lock('db-access'), - session.begin(subtransactions=True)): + with lockutils.lock('db-access'),\ + session.begin(subtransactions=True): port = db.get_port(session, port_id) if not port: LOG.warning(_LW("Port %s not found during update"), From 9b893767a5214177e30f596b6eed8324278d5050 Mon Sep 17 00:00:00 2001 From: Anand Shanmugam Date: Sat, 23 May 2015 01:22:23 -0700 Subject: [PATCH 040/292] Ensure mac address added to iptables is always in unix format When a allowed address pair entry is added with a mac format other than unix format the ovs-vs agent keeps on restarting as it is not able to save the proper iptables due to the error "Error while processing VIF ports". This fix makes sure that the mac address sent to the iptables firewall is always in the unix format Change-Id: I86bbf3cb2adf9b998190e472691c01d068ebab9c Closes-Bug: #1457971 --- neutron/agent/linux/iptables_firewall.py | 1 + .../agent/linux/test_iptables_firewall.py | 20 +++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py index dc1d8901bae..840fba7f6f7 100644 --- a/neutron/agent/linux/iptables_firewall.py +++ b/neutron/agent/linux/iptables_firewall.py @@ -313,6 +313,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): def _build_ipv4v6_mac_ip_list(self, mac, ip_address, mac_ipv4_pairs, mac_ipv6_pairs): + mac = str(netaddr.EUI(mac, dialect=netaddr.mac_unix)) if netaddr.IPNetwork(ip_address).version == 4: mac_ipv4_pairs.append((mac, ip_address)) else: diff --git a/neutron/tests/unit/agent/linux/test_iptables_firewall.py b/neutron/tests/unit/agent/linux/test_iptables_firewall.py index 77d98e8b185..97fd1920809 100644 --- a/neutron/tests/unit/agent/linux/test_iptables_firewall.py +++ b/neutron/tests/unit/agent/linux/test_iptables_firewall.py @@ -1674,3 +1674,23 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): [dict(rule.items() + [('source_ip_prefix', '%s/32' % ip)]) for ip in other_ips]) + + def test_build_ipv4v6_mac_ip_list(self): + mac_oth = 'ffff-ffff-ffff' + mac_unix = 'ff:ff:ff:ff:ff:ff' + ipv4 = FAKE_IP['IPv4'] + ipv6 = FAKE_IP['IPv6'] + fake_ipv4_pair = [] + fake_ipv4_pair.append((mac_unix, ipv4)) + fake_ipv6_pair = [] + fake_ipv6_pair.append((mac_unix, ipv6)) + + mac_ipv4_pairs = [] + mac_ipv6_pairs = [] + + self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv4, + mac_ipv4_pairs, mac_ipv6_pairs) + self.assertEqual(fake_ipv4_pair, mac_ipv4_pairs) + self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6, + mac_ipv4_pairs, mac_ipv6_pairs) + self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs) From 514245d63fe902e4548829b9bb66fa60c86509f1 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 24 May 2015 21:19:51 -0700 Subject: [PATCH 041/292] Remove unnecessary brackets TrivialFix Change-Id: I9bd552110785c09b3eaa8762a8141446e51ea02a --- neutron/ipam/exceptions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neutron/ipam/exceptions.py b/neutron/ipam/exceptions.py index 4400e557e3c..8170f2ac2b8 100644 --- a/neutron/ipam/exceptions.py +++ b/neutron/ipam/exceptions.py @@ -53,9 +53,9 @@ class InvalidSubnetRequest(exceptions.BadRequest): class AllocationOnAutoAddressSubnet(exceptions.NeutronException): - message = (_("IPv6 address %(ip)s cannot be directly " - "assigned to a port on subnet %(subnet_id)s as the " - "subnet is configured for automatic addresses")) + message = _("IPv6 address %(ip)s cannot be directly " + "assigned to a port on subnet %(subnet_id)s as the " + "subnet is configured for automatic addresses") class IpAddressGenerationFailure(exceptions.Conflict): From 3425be06bf069a256dbb0fdb9528459544e9947f Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Mon, 25 May 2015 06:15:25 +0000 Subject: [PATCH 042/292] Imported Translations from Transifex For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: If91cdcd85d6fad9b9d37aea367aa11c83ff4b343 --- neutron/locale/neutron-log-error.pot | 213 ++++++------ neutron/locale/neutron-log-warning.pot | 55 +-- neutron/locale/neutron.pot | 312 +++++++++++------- .../pt_BR/LC_MESSAGES/neutron-log-info.po | 20 +- 4 files changed, 353 insertions(+), 247 deletions(-) diff --git a/neutron/locale/neutron-log-error.pot b/neutron/locale/neutron-log-error.pot index c52c2cf597b..1382968ca1d 100644 --- a/neutron/locale/neutron-log-error.pot +++ b/neutron/locale/neutron-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.dev178\n" +"Project-Id-Version: neutron 2015.2.0.dev422\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-04-24 06:13+0000\n" +"POT-Creation-Date: 2015-05-25 06:15+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -39,97 +39,97 @@ msgstr "" msgid "Policy check error while calling %s!" msgstr "" -#: neutron/service.py:106 neutron/service.py:164 +#: neutron/service.py:107 neutron/service.py:165 msgid "Unrecoverable error: please check log for details." msgstr "" -#: neutron/service.py:147 +#: neutron/service.py:148 #, python-format msgid "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." msgstr "" -#: neutron/service.py:171 +#: neutron/service.py:172 msgid "No known API applications configured." msgstr "" -#: neutron/service.py:278 +#: neutron/service.py:279 msgid "Exception occurs when timer stops" msgstr "" -#: neutron/service.py:287 +#: neutron/service.py:288 msgid "Exception occurs when waiting for timer" msgstr "" -#: neutron/wsgi.py:147 +#: neutron/wsgi.py:152 #, python-format msgid "Unable to listen on %(host)s:%(port)s" msgstr "" -#: neutron/wsgi.py:785 +#: neutron/wsgi.py:788 #, python-format msgid "InvalidContentType: %s" msgstr "" -#: neutron/wsgi.py:789 +#: neutron/wsgi.py:792 #, python-format msgid "MalformedRequestBody: %s" msgstr "" -#: neutron/wsgi.py:798 +#: neutron/wsgi.py:801 msgid "Internal error" msgstr "" -#: neutron/agent/common/ovs_lib.py:211 neutron/agent/common/ovs_lib.py:306 +#: neutron/agent/common/ovs_lib.py:212 neutron/agent/common/ovs_lib.py:307 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:232 +#: neutron/agent/common/ovs_lib.py:233 #, python-format msgid "Timed out retrieving ofport on port %(pname)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:510 +#: neutron/agent/common/ovs_lib.py:526 #, python-format msgid "OVS flows could not be applied on bridge %s" msgstr "" -#: neutron/agent/dhcp/agent.py:129 +#: neutron/agent/dhcp/agent.py:134 #, python-format msgid "Unable to %(action)s dhcp for %(net_id)s." msgstr "" -#: neutron/agent/dhcp/agent.py:156 +#: neutron/agent/dhcp/agent.py:161 #, python-format msgid "Unable to sync network state on deleted network %s" msgstr "" -#: neutron/agent/dhcp/agent.py:169 +#: neutron/agent/dhcp/agent.py:174 msgid "Unable to sync network state." msgstr "" -#: neutron/agent/dhcp/agent.py:200 +#: neutron/agent/dhcp/agent.py:205 #, python-format msgid "Network %s info call failed." msgstr "" -#: neutron/agent/dhcp/agent.py:577 neutron/agent/l3/agent.py:614 +#: neutron/agent/dhcp/agent.py:582 neutron/agent/l3/agent.py:614 #: neutron/agent/metadata/agent.py:311 #: neutron/plugins/hyperv/agent/l2_agent.py:94 #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:108 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:779 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:284 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:787 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:289 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:129 #: neutron/services/metering/agents/metering_agent.py:283 msgid "Failed reporting state!" msgstr "" -#: neutron/agent/l3/agent.py:172 neutron/tests/unit/agent/l3/test_agent.py:2086 +#: neutron/agent/l3/agent.py:172 neutron/tests/unit/agent/l3/test_agent.py:2130 #, python-format msgid "Error importing interface driver '%s'" msgstr "" -#: neutron/agent/l3/agent.py:232 neutron/agent/linux/dhcp.py:789 +#: neutron/agent/l3/agent.py:232 neutron/agent/linux/dhcp.py:825 msgid "An interface driver must be specified" msgstr "" @@ -171,23 +171,23 @@ msgstr "" msgid "Failed synchronizing routers due to RPC error" msgstr "" -#: neutron/agent/l3/dvr_router.py:200 +#: neutron/agent/l3/dvr_router.py:202 msgid "DVR: Failed updating arp entry" msgstr "" -#: neutron/agent/l3/dvr_router.py:225 +#: neutron/agent/l3/dvr_router.py:227 msgid "DVR: no map match_port found!" msgstr "" -#: neutron/agent/l3/dvr_router.py:281 +#: neutron/agent/l3/dvr_router.py:292 msgid "DVR: error adding redirection logic" msgstr "" -#: neutron/agent/l3/dvr_router.py:283 +#: neutron/agent/l3/dvr_router.py:294 msgid "DVR: removed snat failed" msgstr "" -#: neutron/agent/l3/dvr_router.py:505 +#: neutron/agent/l3/dvr_router.py:517 msgid "Missing subnet/agent_gateway_port" msgstr "" @@ -211,11 +211,11 @@ msgstr "" msgid "Failed to destroy stale namespace %s" msgstr "" -#: neutron/agent/l3/namespace_manager.py:119 +#: neutron/agent/l3/namespace_manager.py:120 msgid "RuntimeError in obtaining namespace list for namespace cleanup." msgstr "" -#: neutron/agent/l3/namespaces.py:52 +#: neutron/agent/l3/namespaces.py:83 #, python-format msgid "Failed trying to delete namespace: %s" msgstr "" @@ -230,21 +230,21 @@ msgstr "" msgid "An error occurred while communicating with async process [%s]." msgstr "" -#: neutron/agent/linux/daemon.py:115 +#: neutron/agent/linux/daemon.py:117 #, python-format msgid "Error while handling pidfile: %s" msgstr "" -#: neutron/agent/linux/daemon.py:176 +#: neutron/agent/linux/daemon.py:178 msgid "Fork failed" msgstr "" -#: neutron/agent/linux/daemon.py:218 +#: neutron/agent/linux/daemon.py:221 #, python-format msgid "Pidfile %s already exist. Daemon already running?" msgstr "" -#: neutron/agent/linux/dhcp.py:795 +#: neutron/agent/linux/dhcp.py:831 #, python-format msgid "Error importing interface driver '%(driver)s': %(inner)s" msgstr "" @@ -272,23 +272,36 @@ msgid "" "identified by uuid %(uuid)s" msgstr "" -#: neutron/agent/linux/interface.py:155 +#: neutron/agent/linux/interface.py:158 #, python-format msgid "Failed deleting ingress connection state of floatingip %s" msgstr "" -#: neutron/agent/linux/interface.py:164 +#: neutron/agent/linux/interface.py:167 #, python-format msgid "Failed deleting egress connection state of floatingip %s" msgstr "" -#: neutron/agent/linux/interface.py:286 neutron/agent/linux/interface.py:327 -#: neutron/agent/linux/interface.py:389 neutron/agent/linux/interface.py:429 +#: neutron/agent/linux/interface.py:294 neutron/agent/linux/interface.py:331 +#: neutron/agent/linux/interface.py:389 neutron/agent/linux/interface.py:425 #, python-format msgid "Failed unplugging interface '%s'" msgstr "" -#: neutron/agent/linux/ip_lib.py:678 +#: neutron/agent/linux/ip_lib.py:407 +msgid "Address not present on interface" +msgstr "" + +#: neutron/agent/linux/ip_lib.py:412 +msgid "Duplicate adddress detected" +msgstr "" + +#: neutron/agent/linux/ip_lib.py:413 +#, python-format +msgid "Exceeded %s second limit waiting for address to leave the tentative state." +msgstr "" + +#: neutron/agent/linux/ip_lib.py:718 #, python-format msgid "Failed sending gratuitous ARP to %(addr)s on %(iface)s in namespace %(ns)s" msgstr "" @@ -305,11 +318,11 @@ msgstr "" msgid "Unable to parse route \"%s\"" msgstr "" -#: neutron/agent/linux/iptables_manager.py:393 +#: neutron/agent/linux/iptables_manager.py:402 msgid "Failure applying iptables rules" msgstr "" -#: neutron/agent/linux/iptables_manager.py:471 +#: neutron/agent/linux/iptables_manager.py:480 #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables " @@ -337,12 +350,12 @@ msgstr "" msgid "OVSDB Error: %s" msgstr "" -#: neutron/agent/ovsdb/impl_vsctl.py:67 +#: neutron/agent/ovsdb/impl_vsctl.py:68 #, python-format msgid "Unable to execute %(cmd)s." msgstr "" -#: neutron/agent/ovsdb/impl_vsctl.py:126 +#: neutron/agent/ovsdb/impl_vsctl.py:127 #, python-format msgid "Could not parse: %s" msgstr "" @@ -530,12 +543,12 @@ msgid "" "%(agent)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:937 neutron/plugins/ml2/plugin.py:565 +#: neutron/db/db_base_plugin_v2.py:935 neutron/plugins/ml2/plugin.py:571 #, python-format msgid "An exception occurred while creating the %(resource)s:%(item)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1778 +#: neutron/db/db_base_plugin_v2.py:1799 #, python-format msgid "Unable to generate mac address after %s attempts" msgstr "" @@ -559,7 +572,7 @@ msgstr "" msgid "Exception encountered during router rescheduling." msgstr "" -#: neutron/db/l3_db.py:540 +#: neutron/db/l3_db.py:541 msgid "Cannot have multiple IPv4 subnets on router port" msgstr "" @@ -573,6 +586,13 @@ msgstr "" msgid "No plugin for L3 routing registered to handle router scheduling" msgstr "" +#: neutron/ipam/drivers/neutrondb_ipam/driver.py:91 +#, python-format +msgid "" +"Unable to retrieve IPAM subnet as the referenced Neutron subnet %s does " +"not exist" +msgstr "" + #: neutron/notifiers/nova.py:248 #, python-format msgid "Failed to notify nova on events: %s" @@ -702,8 +722,8 @@ msgid "" msgstr "" #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:255 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1711 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1723 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1770 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1782 #, python-format msgid "%s Agent terminated!" msgstr "" @@ -733,33 +753,33 @@ msgstr "" msgid "Unknown network_type %(network_type)s for network %(network_id)s." msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:453 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:461 #, python-format msgid "Cannot delete bridge %s, does not exist" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:532 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:540 msgid "No valid Segmentation ID to perform UCAST test." msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:789 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:797 msgid "Unable to obtain MAC address for unique ID. Agent terminated!" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:986 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:994 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:270 #, python-format msgid "Error in agent loop. Devices info: %s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1009 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 #: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:40 #, python-format msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" msgstr "" -#: neutron/plugins/ml2/db.py:242 neutron/plugins/ml2/db.py:327 -#: neutron/plugins/ml2/plugin.py:1332 +#: neutron/plugins/ml2/db.py:241 neutron/plugins/ml2/db.py:325 +#: neutron/plugins/ml2/plugin.py:1339 #, python-format msgid "Multiple ports have port_id starting with %s" msgstr "" @@ -818,97 +838,97 @@ msgstr "" msgid "Extension driver '%(name)s' failed in %(method)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:288 +#: neutron/plugins/ml2/plugin.py:289 #, python-format msgid "Failed to commit binding results for %(port)s after %(max)s tries" msgstr "" -#: neutron/plugins/ml2/plugin.py:445 +#: neutron/plugins/ml2/plugin.py:451 #, python-format msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:456 +#: neutron/plugins/ml2/plugin.py:462 #, python-format msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:542 +#: neutron/plugins/ml2/plugin.py:548 #, python-format msgid "Could not find %s to delete." msgstr "" -#: neutron/plugins/ml2/plugin.py:545 +#: neutron/plugins/ml2/plugin.py:551 #, python-format msgid "Could not delete %(res)s %(id)s." msgstr "" -#: neutron/plugins/ml2/plugin.py:578 +#: neutron/plugins/ml2/plugin.py:584 #, python-format msgid "" "mechanism_manager.create_%(res)s_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:624 +#: neutron/plugins/ml2/plugin.py:630 #, python-format msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:694 +#: neutron/plugins/ml2/plugin.py:700 #, python-format msgid "Exception auto-deleting port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:706 +#: neutron/plugins/ml2/plugin.py:713 #, python-format msgid "Exception auto-deleting subnet %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:788 +#: neutron/plugins/ml2/plugin.py:795 msgid "mechanism_manager.delete_network_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:809 +#: neutron/plugins/ml2/plugin.py:816 #, python-format msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:930 +#: neutron/plugins/ml2/plugin.py:937 #, python-format msgid "Exception deleting fixed_ip from port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:939 +#: neutron/plugins/ml2/plugin.py:946 msgid "mechanism_manager.delete_subnet_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:1004 +#: neutron/plugins/ml2/plugin.py:1011 #, python-format msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1016 +#: neutron/plugins/ml2/plugin.py:1023 #, python-format msgid "_bind_port_if_needed failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1046 +#: neutron/plugins/ml2/plugin.py:1053 #, python-format msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1191 +#: neutron/plugins/ml2/plugin.py:1198 #, python-format msgid "No Host supplied to bind DVR Port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1313 +#: neutron/plugins/ml2/plugin.py:1320 #, python-format msgid "mechanism_manager.delete_port_postcommit failed for port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1345 +#: neutron/plugins/ml2/plugin.py:1352 #, python-format msgid "Binding info for DVR port %s not found" msgstr "" @@ -1070,110 +1090,107 @@ msgstr "" #: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:603 #, python-format -msgid "Centralized-SNAT port %s already seen on " +msgid "" +"Centralized-SNAT port %(port)s on subnet %(port_subnet)s already seen on " +"a different subnet %(orig_subnet)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:605 -#, python-format -msgid "a different subnet %s" -msgstr "" - -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:356 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:376 msgid "No tunnel_type specified, cannot create tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:359 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:382 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:379 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:402 #, python-format msgid "tunnel_type %s not supported by agent" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:375 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:395 msgid "No tunnel_ip specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:379 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:399 msgid "No tunnel_type specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:517 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:541 #, python-format msgid "No local VLAN available for net-id=%s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:556 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:580 #, python-format msgid "" "Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " "tunneling disabled" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:575 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:599 #, python-format msgid "" "Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:603 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:627 #, python-format msgid "" "Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:612 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:636 #, python-format msgid "" "Cannot provision unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:668 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:692 #, python-format msgid "" "Cannot reclaim unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:855 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:911 msgid "" "Failed to create OVS patch port. Cannot have tunneling enabled on this " "agent, since this version of OVS does not support tunnels or patch ports." " Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:982 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1038 #, python-format msgid "" "Bridge %(bridge)s for physical network %(physical_network)s does not " "exist. Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1174 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1233 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1379 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1426 #, python-format msgid "" "process_network_ports - iteration:%d - failure while retrieving port " "details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1408 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1462 #, python-format msgid "" "process_ancillary_network_ports - iteration:%d - failure while retrieving" " port details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1553 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1608 msgid "Error while synchronizing tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1624 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1683 msgid "Error while processing VIF ports" msgstr "" diff --git a/neutron/locale/neutron-log-warning.pot b/neutron/locale/neutron-log-warning.pot index de38046a22a..0d10ecfd440 100644 --- a/neutron/locale/neutron-log-warning.pot +++ b/neutron/locale/neutron-log-warning.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev319\n" +"Project-Id-Version: neutron 2015.2.0.dev422\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-10 06:14+0000\n" +"POT-Creation-Date: 2015-05-25 06:15+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -29,17 +29,24 @@ msgstr "" msgid "Unable to find data type descriptor for attribute %s" msgstr "" -#: neutron/quota.py:223 +#: neutron/quota.py:227 msgid "" "The quota driver neutron.quota.ConfDriver is deprecated as of Liberty. " "neutron.db.quota_db.DbQuotaDriver should be used in its place" msgstr "" -#: neutron/quota.py:237 +#: neutron/quota.py:241 #, python-format msgid "%s is already registered." msgstr "" +#: neutron/quota.py:341 +msgid "" +"Registering resources to apply quota limits to using the quota_items " +"option is deprecated as of Liberty.Resource REST controllers should take " +"care of registering resources with the quota engine." +msgstr "" + #: neutron/agent/rpc.py:113 msgid "DVR functionality requires a server upgrade." msgstr "" @@ -135,24 +142,30 @@ msgstr "" msgid "Unable to configure IP address for floating IP: %s" msgstr "" -#: neutron/agent/linux/dhcp.py:226 +#: neutron/agent/linux/dhcp.py:227 #, python-format msgid "Failed trying to delete interface: %s" msgstr "" -#: neutron/agent/linux/dhcp.py:234 +#: neutron/agent/linux/dhcp.py:235 #, python-format msgid "Failed trying to delete namespace: %s" msgstr "" -#: neutron/agent/linux/iptables_manager.py:239 +#: neutron/agent/linux/ebtables_manager.py:168 +#, python-format +msgid "Attempted to remove chain %s which does not exist" +msgstr "" + +#: neutron/agent/linux/ebtables_manager.py:237 +#: neutron/agent/linux/iptables_manager.py:247 #, python-format msgid "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " "%(top)r" msgstr "" -#: neutron/agent/linux/iptables_manager.py:689 +#: neutron/agent/linux/iptables_manager.py:696 #, python-format msgid "Attempted to get traffic counters of chain %s which does not exist" msgstr "" @@ -276,12 +289,12 @@ msgstr "" msgid "No active L3 agents found for SNAT" msgstr "" -#: neutron/db/securitygroups_rpc_base.py:371 +#: neutron/db/securitygroups_rpc_base.py:372 #, python-format msgid "No valid gateway port on subnet %s is found for IPv6 RA" msgstr "" -#: neutron/db/migration/alembic_migrations/heal_script.py:90 +#: neutron/db/migration/alembic_migrations/heal_script.py:91 #, python-format msgid "Ignoring alembic command %s" msgstr "" @@ -385,28 +398,28 @@ msgid "" "VXLAN MCAST mode" msgstr "" -#: neutron/plugins/ml2/driver_context.py:184 +#: neutron/plugins/ml2/driver_context.py:191 #, python-format msgid "Could not expand segment %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:526 +#: neutron/plugins/ml2/plugin.py:532 #, python-format msgid "" "In _notify_port_updated(), no bound segment for port %(port_id)s on " "network %(network_id)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:776 +#: neutron/plugins/ml2/plugin.py:783 msgid "A concurrent port creation has occurred" msgstr "" -#: neutron/plugins/ml2/plugin.py:1384 +#: neutron/plugins/ml2/plugin.py:1391 #, python-format msgid "Port %(port)s updated up by agent not found" msgstr "" -#: neutron/plugins/ml2/plugin.py:1416 +#: neutron/plugins/ml2/plugin.py:1423 #, python-format msgid "Port %s not found during update" msgstr "" @@ -528,33 +541,33 @@ msgstr "" msgid "Action %s not supported" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1013 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1010 #, python-format msgid "" "Creating an interface named %(name)s exceeds the %(limit)d character " "limitation. It was shortened to %(new_name)s to fit." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1214 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1211 #, python-format msgid "VIF port: %s has no ofport configured, and might not be able to transmit" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1326 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1323 #, python-format msgid "Device %s not defined on plugin" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1484 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1483 #, python-format msgid "Invalid remote IP: %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1527 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1526 msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1531 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1530 msgid "" "OVS is dead. OVSNeutronAgent will keep running and checking OVS status " "periodically." diff --git a/neutron/locale/neutron.pot b/neutron/locale/neutron.pot index 23bf2df194a..a6c4be0300a 100644 --- a/neutron/locale/neutron.pot +++ b/neutron/locale/neutron.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev319\n" +"Project-Id-Version: neutron 2015.2.0.dev422\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-10 06:14+0000\n" +"POT-Creation-Date: 2015-05-25 06:14+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -59,33 +59,35 @@ msgid "" "found" msgstr "" -#: neutron/quota.py:37 -msgid "Resource name(s) that are supported in quota features" +#: neutron/quota.py:40 +msgid "" +"Resource name(s) that are supported in quota features. This option is now" +" deprecated for removal." msgstr "" -#: neutron/quota.py:41 +#: neutron/quota.py:45 msgid "" "Default number of resource allowed per tenant. A negative value means " "unlimited." msgstr "" -#: neutron/quota.py:45 +#: neutron/quota.py:49 msgid "Number of networks allowed per tenant. A negative value means unlimited." msgstr "" -#: neutron/quota.py:49 +#: neutron/quota.py:53 msgid "Number of subnets allowed per tenant, A negative value means unlimited." msgstr "" -#: neutron/quota.py:53 +#: neutron/quota.py:57 msgid "Number of ports allowed per tenant. A negative value means unlimited." msgstr "" -#: neutron/quota.py:57 +#: neutron/quota.py:61 msgid "Default driver to use for quota checks" msgstr "" -#: neutron/quota.py:136 neutron/quota.py:141 +#: neutron/quota.py:140 neutron/quota.py:145 msgid "Access to this resource was denied." msgstr "" @@ -493,39 +495,39 @@ msgstr "" msgid "Process is not running." msgstr "" -#: neutron/agent/linux/daemon.py:42 +#: neutron/agent/linux/daemon.py:44 #, python-format msgid "Failed to set uid %s" msgstr "" -#: neutron/agent/linux/daemon.py:56 +#: neutron/agent/linux/daemon.py:58 #, python-format msgid "Failed to set gid %s" msgstr "" -#: neutron/agent/linux/daemon.py:86 +#: neutron/agent/linux/daemon.py:88 msgid "Root permissions are required to drop privileges." msgstr "" -#: neutron/agent/linux/daemon.py:94 +#: neutron/agent/linux/daemon.py:96 msgid "Failed to remove supplemental groups" msgstr "" -#: neutron/agent/linux/daemon.py:123 +#: neutron/agent/linux/daemon.py:125 msgid "Unable to unlock pid file" msgstr "" -#: neutron/agent/linux/dhcp.py:240 +#: neutron/agent/linux/dhcp.py:241 #, python-format msgid "Error while reading %s" msgstr "" -#: neutron/agent/linux/dhcp.py:247 +#: neutron/agent/linux/dhcp.py:248 #, python-format msgid "Unable to convert value in %s" msgstr "" -#: neutron/agent/linux/dhcp.py:249 +#: neutron/agent/linux/dhcp.py:250 #, python-format msgid "Unable to access %s" msgstr "" @@ -534,6 +536,12 @@ msgstr "" msgid "Location of temporary ebtables table files." msgstr "" +#: neutron/agent/linux/ebtables_manager.py:210 +#: neutron/agent/linux/iptables_manager.py:210 +#, python-format +msgid "Unknown chain: %r" +msgstr "" + #: neutron/agent/linux/external_process.py:37 msgid "Location to store child pid files" msgstr "" @@ -595,6 +603,11 @@ msgstr "" msgid "Force ip_lib calls to use the root helper" msgstr "" +#: neutron/agent/linux/ip_lib.py:42 +#, python-format +msgid "Failure waiting for address %(address)s to become ready: %(reason)s" +msgstr "" + #: neutron/agent/linux/ip_link_support.py:33 #, python-format msgid "ip link command is not supported: %(reason)s" @@ -605,11 +618,6 @@ msgstr "" msgid "ip link capability %(capability)s is not supported" msgstr "" -#: neutron/agent/linux/iptables_manager.py:202 -#, python-format -msgid "Unknown chain: %r" -msgstr "" - #: neutron/agent/linux/keepalived.py:52 #, python-format msgid "" @@ -846,7 +854,7 @@ msgid "" " and '%(desc)s'" msgstr "" -#: neutron/api/api_common.py:316 neutron/api/v2/base.py:614 +#: neutron/api/api_common.py:316 neutron/api/v2/base.py:617 #, python-format msgid "Unable to find '%s' in request body" msgstr "" @@ -873,116 +881,116 @@ msgstr "" msgid "Unrecognized action" msgstr "" -#: neutron/api/v2/attributes.py:54 +#: neutron/api/v2/attributes.py:55 #, python-format msgid "" "Invalid input. '%(target_dict)s' must be a dictionary with keys: " "%(expected_keys)s" msgstr "" -#: neutron/api/v2/attributes.py:66 +#: neutron/api/v2/attributes.py:67 #, python-format msgid "" "Validation of dictionary's keys failed. Expected keys: %(expected_keys)s " "Provided keys: %(provided_keys)s" msgstr "" -#: neutron/api/v2/attributes.py:81 +#: neutron/api/v2/attributes.py:82 #, python-format msgid "'%(data)s' is not in %(valid_values)s" msgstr "" -#: neutron/api/v2/attributes.py:97 +#: neutron/api/v2/attributes.py:98 #, python-format msgid "'%s' Blank strings are not permitted" msgstr "" -#: neutron/api/v2/attributes.py:109 +#: neutron/api/v2/attributes.py:110 #, python-format msgid "'%s' is not a valid string" msgstr "" -#: neutron/api/v2/attributes.py:114 +#: neutron/api/v2/attributes.py:115 #, python-format msgid "'%(data)s' exceeds maximum length of %(max_len)s" msgstr "" -#: neutron/api/v2/attributes.py:124 +#: neutron/api/v2/attributes.py:125 #, python-format msgid "'%s' is not a valid boolean value" msgstr "" -#: neutron/api/v2/attributes.py:143 neutron/api/v2/attributes.py:473 +#: neutron/api/v2/attributes.py:144 neutron/api/v2/attributes.py:474 #, python-format msgid "'%s' is not an integer" msgstr "" -#: neutron/api/v2/attributes.py:147 +#: neutron/api/v2/attributes.py:148 #, python-format msgid "'%(data)s' is too small - must be at least '%(limit)d'" msgstr "" -#: neutron/api/v2/attributes.py:152 +#: neutron/api/v2/attributes.py:153 #, python-format msgid "'%(data)s' is too large - must be no larger than '%(limit)d'" msgstr "" -#: neutron/api/v2/attributes.py:161 +#: neutron/api/v2/attributes.py:162 #, python-format msgid "'%s' contains whitespace" msgstr "" -#: neutron/api/v2/attributes.py:176 +#: neutron/api/v2/attributes.py:177 #, python-format msgid "'%s' is not a valid MAC address" msgstr "" -#: neutron/api/v2/attributes.py:206 +#: neutron/api/v2/attributes.py:207 #, python-format msgid "'%s' is not a valid IP address" msgstr "" -#: neutron/api/v2/attributes.py:217 +#: neutron/api/v2/attributes.py:218 #, python-format msgid "Invalid data format for IP pool: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:234 neutron/api/v2/attributes.py:241 +#: neutron/api/v2/attributes.py:235 neutron/api/v2/attributes.py:242 #, python-format msgid "Invalid data format for fixed IP: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:249 +#: neutron/api/v2/attributes.py:250 #, python-format msgid "Duplicate IP address '%s'" msgstr "" -#: neutron/api/v2/attributes.py:264 +#: neutron/api/v2/attributes.py:265 #, python-format msgid "Invalid data format for nameserver: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:273 +#: neutron/api/v2/attributes.py:274 #, python-format msgid "'%(host)s' is not a valid nameserver. %(msg)s" msgstr "" -#: neutron/api/v2/attributes.py:278 +#: neutron/api/v2/attributes.py:279 #, python-format msgid "Duplicate nameserver '%s'" msgstr "" -#: neutron/api/v2/attributes.py:286 +#: neutron/api/v2/attributes.py:287 #, python-format msgid "Invalid data format for hostroute: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:303 +#: neutron/api/v2/attributes.py:304 #, python-format msgid "Duplicate hostroute '%s'" msgstr "" -#: neutron/api/v2/attributes.py:320 +#: neutron/api/v2/attributes.py:321 #: neutron/tests/unit/api/v2/test_attributes.py:501 #: neutron/tests/unit/api/v2/test_attributes.py:515 #: neutron/tests/unit/api/v2/test_attributes.py:523 @@ -990,59 +998,59 @@ msgstr "" msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" -#: neutron/api/v2/attributes.py:326 +#: neutron/api/v2/attributes.py:327 #, python-format msgid "'%s' is not a valid IP subnet" msgstr "" -#: neutron/api/v2/attributes.py:334 neutron/api/v2/attributes.py:387 +#: neutron/api/v2/attributes.py:335 neutron/api/v2/attributes.py:388 #, python-format msgid "'%s' is not a list" msgstr "" -#: neutron/api/v2/attributes.py:339 neutron/api/v2/attributes.py:397 +#: neutron/api/v2/attributes.py:340 neutron/api/v2/attributes.py:398 #, python-format msgid "Duplicate items in the list: '%s'" msgstr "" -#: neutron/api/v2/attributes.py:362 +#: neutron/api/v2/attributes.py:363 #, python-format msgid "'%s' is not a valid input" msgstr "" -#: neutron/api/v2/attributes.py:375 +#: neutron/api/v2/attributes.py:376 #: neutron/plugins/cisco/n1kv/n1kv_neutron_plugin.py:532 #, python-format msgid "'%s' is not a valid UUID" msgstr "" -#: neutron/api/v2/attributes.py:417 +#: neutron/api/v2/attributes.py:418 #, python-format msgid "Validator '%s' does not exist." msgstr "" -#: neutron/api/v2/attributes.py:429 +#: neutron/api/v2/attributes.py:430 #, python-format msgid "'%s' is not a dictionary" msgstr "" -#: neutron/api/v2/attributes.py:478 +#: neutron/api/v2/attributes.py:479 #, python-format msgid "'%s' should be non-negative" msgstr "" -#: neutron/api/v2/attributes.py:497 +#: neutron/api/v2/attributes.py:498 #, python-format msgid "'%s' cannot be converted to boolean" msgstr "" -#: neutron/api/v2/attributes.py:510 +#: neutron/api/v2/attributes.py:511 #: neutron/plugins/nec/extensions/packetfilter.py:72 #, python-format msgid "'%s' is not a integer" msgstr "" -#: neutron/api/v2/attributes.py:529 +#: neutron/api/v2/attributes.py:530 #, python-format msgid "'%s' is not of the form =[value]" msgstr "" @@ -1070,41 +1078,45 @@ msgstr "" msgid "Resource body required" msgstr "" -#: neutron/api/v2/base.py:600 +#: neutron/api/v2/base.py:601 msgid "Bulk operation not supported" msgstr "" -#: neutron/api/v2/base.py:603 +#: neutron/api/v2/base.py:604 msgid "Resources required" msgstr "" -#: neutron/api/v2/base.py:625 +#: neutron/api/v2/base.py:614 +msgid "Body contains invalid data" +msgstr "" + +#: neutron/api/v2/base.py:628 #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "" -#: neutron/api/v2/base.py:632 +#: neutron/api/v2/base.py:635 #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "" -#: neutron/api/v2/base.py:637 +#: neutron/api/v2/base.py:640 #, python-format msgid "Cannot update read-only attribute %s" msgstr "" -#: neutron/api/v2/base.py:655 +#: neutron/api/v2/base.py:658 #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "" -#: neutron/api/v2/base.py:664 neutron/extensions/allowedaddresspairs.py:75 +#: neutron/api/v2/base.py:667 neutron/extensions/allowedaddresspairs.py:75 #: neutron/extensions/multiprovidernet.py:45 #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "" -#: neutron/api/v2/base.py:683 +#: neutron/api/v2/base.py:686 #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" @@ -1791,6 +1803,12 @@ msgstr "" msgid "Device '%(device_name)s' does not exist" msgstr "" +#: neutron/common/exceptions.py:463 +msgid "" +"Subnets hosted on the same network must be allocated from the same subnet" +" pool" +msgstr "" + #: neutron/common/ipv6_utils.py:36 msgid "Unable to generate IP address by EUI64 for IPv4 prefix" msgstr "" @@ -1888,57 +1906,57 @@ msgid "" "such agents is available if this option is True." msgstr "" -#: neutron/db/common_db_mixin.py:122 +#: neutron/db/common_db_mixin.py:123 msgid "Cannot create resource for another tenant" msgstr "" -#: neutron/db/db_base_plugin_v2.py:392 +#: neutron/db/db_base_plugin_v2.py:380 msgid "IP allocation requires subnet_id or ip_address" msgstr "" -#: neutron/db/db_base_plugin_v2.py:409 +#: neutron/db/db_base_plugin_v2.py:397 #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips " "included invalid subnet %(subnet_id)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:435 +#: neutron/db/db_base_plugin_v2.py:423 #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet" " %(id)s since the subnet is configured for automatic addresses" msgstr "" -#: neutron/db/db_base_plugin_v2.py:454 neutron/db/db_base_plugin_v2.py:497 +#: neutron/db/db_base_plugin_v2.py:442 neutron/db/db_base_plugin_v2.py:485 #: neutron/plugins/opencontrail/contrail_plugin.py:388 msgid "Exceeded maximim amount of fixed ips per port" msgstr "" -#: neutron/db/db_base_plugin_v2.py:621 +#: neutron/db/db_base_plugin_v2.py:609 msgid "0 is not allowed as CIDR prefix length" msgstr "" -#: neutron/db/db_base_plugin_v2.py:631 +#: neutron/db/db_base_plugin_v2.py:619 #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps" " with another subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:716 neutron/db/db_base_plugin_v2.py:720 +#: neutron/db/db_base_plugin_v2.py:714 neutron/db/db_base_plugin_v2.py:718 #, python-format msgid "Invalid route: %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:798 +#: neutron/db/db_base_plugin_v2.py:796 #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" -#: neutron/db/db_base_plugin_v2.py:806 +#: neutron/db/db_base_plugin_v2.py:804 #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " @@ -1946,77 +1964,77 @@ msgid "" "the same value" msgstr "" -#: neutron/db/db_base_plugin_v2.py:814 +#: neutron/db/db_base_plugin_v2.py:812 msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set " "to False." msgstr "" -#: neutron/db/db_base_plugin_v2.py:820 +#: neutron/db/db_base_plugin_v2.py:818 msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1036 +#: neutron/db/db_base_plugin_v2.py:1034 #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1063 +#: neutron/db/db_base_plugin_v2.py:1061 msgid "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" -#: neutron/db/db_base_plugin_v2.py:1084 +#: neutron/db/db_base_plugin_v2.py:1082 msgid "Gateway is not valid on subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1104 neutron/db/db_base_plugin_v2.py:1118 +#: neutron/db/db_base_plugin_v2.py:1102 neutron/db/db_base_plugin_v2.py:1116 #: neutron/plugins/opencontrail/contrail_plugin.py:312 msgid "new subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1111 +#: neutron/db/db_base_plugin_v2.py:1109 #, python-format msgid "Error parsing dns address %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1127 +#: neutron/db/db_base_plugin_v2.py:1125 msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1131 +#: neutron/db/db_base_plugin_v2.py:1129 msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1277 +#: neutron/db/db_base_plugin_v2.py:1278 msgid "allocation_pools allowed only for specific subnet requests." msgstr "" -#: neutron/db/db_base_plugin_v2.py:1288 +#: neutron/db/db_base_plugin_v2.py:1289 #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1366 +#: neutron/db/db_base_plugin_v2.py:1367 msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1383 +#: neutron/db/db_base_plugin_v2.py:1384 msgid "cidr and prefixlen must not be supplied together" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1394 +#: neutron/db/db_base_plugin_v2.py:1395 msgid "A cidr must be specified in the absence of a subnet pool" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1694 +#: neutron/db/db_base_plugin_v2.py:1695 msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1761 +#: neutron/db/db_base_plugin_v2.py:1762 msgid "Subnet pool has existing allocations" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1768 +#: neutron/db/db_base_plugin_v2.py:1769 msgid "mac address update" msgstr "" @@ -2154,17 +2172,17 @@ msgstr "" msgid "has device owner %s" msgstr "" -#: neutron/db/l3_dvr_db.py:51 +#: neutron/db/l3_dvr_db.py:52 msgid "" "System-wide flag to determine the type of router that tenants can create." " Only admin can override." msgstr "" -#: neutron/db/l3_dvr_db.py:565 +#: neutron/db/l3_dvr_db.py:566 msgid "Unable to create the Agent Gateway Port" msgstr "" -#: neutron/db/l3_dvr_db.py:597 +#: neutron/db/l3_dvr_db.py:598 msgid "Unable to create the SNAT Interface Port" msgstr "" @@ -2845,16 +2863,68 @@ msgstr "" msgid "Backend does not support VLAN Transparency." msgstr "" -#: neutron/ipam/subnet_alloc.py:106 +#: neutron/ipam/exceptions.py:20 +#, python-format +msgid "Cannot handle subnet of type %(subnet_type)s" +msgstr "" + +#: neutron/ipam/exceptions.py:24 +#, python-format +msgid "Unable to calculate %(address_type)s address because of:%(reason)s" +msgstr "" + +#: neutron/ipam/exceptions.py:29 +#, python-format +msgid "Unknown address type %(address_type)s" +msgstr "" + +#: neutron/ipam/exceptions.py:33 +#, python-format +msgid "Unable to find IP address %(ip_address)s on subnet %(subnet_id)s" +msgstr "" + +#: neutron/ipam/exceptions.py:38 +#, python-format +msgid "IP address %(ip)s already allocated in subnet %(subnet_id)s" +msgstr "" + +#: neutron/ipam/exceptions.py:42 +#, python-format +msgid "IP address %(ip)s does not belong to subnet %(subnet_id)s" +msgstr "" + +#: neutron/ipam/exceptions.py:46 +#, python-format +msgid "The address allocation request could not be satisfied because: %(reason)s" +msgstr "" + +#: neutron/ipam/exceptions.py:51 +#, python-format +msgid "The subnet request could not be satisfied because: %(reason)s" +msgstr "" + +#: neutron/ipam/exceptions.py:56 +#, python-format +msgid "" +"IPv6 address %(ip)s cannot be directly assigned to a port on subnet " +"%(subnet_id)s as the subnet is configured for automatic addresses" +msgstr "" + +#: neutron/ipam/exceptions.py:62 +#, python-format +msgid "No more IP addresses available for subnet %(subnet_id)s." +msgstr "" + +#: neutron/ipam/subnet_alloc.py:108 #, python-format msgid "Insufficient prefix space to allocate subnet size /%s" msgstr "" -#: neutron/ipam/subnet_alloc.py:124 +#: neutron/ipam/subnet_alloc.py:125 msgid "Cannot allocate requested subnet from the available set of prefixes" msgstr "" -#: neutron/ipam/subnet_alloc.py:145 +#: neutron/ipam/subnet_alloc.py:146 msgid "Unsupported request type" msgstr "" @@ -4005,7 +4075,7 @@ msgstr "" msgid "network_type value '%s' not supported" msgstr "" -#: neutron/plugins/ml2/plugin.py:232 +#: neutron/plugins/ml2/plugin.py:233 msgid "binding:profile value too large" msgstr "" @@ -4394,21 +4464,7 @@ msgstr "" #: neutron/plugins/ml2/drivers/mech_sriov/mech_driver.py:186 #, python-format -msgid "Invalid pci_vendor_info: '%s'" -msgstr "" - -#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver.py:189 -#, python-format -msgid "Missing vendor_id in: '%s'" -msgstr "" - -#: neutron/plugins/ml2/drivers/mech_sriov/mech_driver.py:192 -#, python-format -msgid "Missing product_id in: '%s'" -msgstr "" - -#: neutron/plugins/ml2/drivers/mlnx/config.py:24 -msgid "Type of VM network interface: mlnx_direct or hostdev" +msgid "Incorrect pci_vendor_info: \"%s\", should be pair vendor_id:product_id" msgstr "" #: neutron/plugins/ml2/drivers/mlnx/agent/config.py:28 @@ -4633,23 +4689,23 @@ msgid "" "error: %(error)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1712 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1711 msgid "" "DVR deployments for VXLAN/GRE underlays require L2-pop to be enabled, in " "both the Agent and Server side." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1730 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1729 #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1752 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1751 #, python-format msgid "Invalid tunnel type specified: %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1755 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1754 msgid "Tunneling cannot be enabled without a valid local_ip." msgstr "" @@ -5044,6 +5100,10 @@ msgstr "" msgid "Unknown attribute '%s'." msgstr "" +#: neutron/tests/common/agents/l3_agent.py:64 +msgid "Suffix to append to all namespace names." +msgstr "" + #: neutron/tests/functional/agent/linux/simple_daemon.py:37 msgid "" "uuid provided from the command line so external_process can track us via " @@ -5074,12 +5134,12 @@ msgid "" "operation." msgstr "" -#: neutron/tests/unit/plugins/ml2/test_plugin.py:417 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:424 #, python-format msgid "Deleting port %s" msgstr "" -#: neutron/tests/unit/plugins/ml2/test_plugin.py:418 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:425 #, python-format msgid "The port '%s' was deleted" msgstr "" @@ -5102,17 +5162,19 @@ msgstr "" #, python-format msgid "" "%(method)s called with port settings %(current)s (original settings " -"%(original)s) binding levels %(levels)s (original binding levels " -"%(original_levels)s) on network %(network)s with segments to bind " -"%(segments_to_bind)s" +"%(original)s) host %(host)s (original host %(original_host)s) vif type " +"%(vif_type)s (original vif type %(original_vif_type)s) vif details " +"%(vif_details)s (original vif details %(original_vif_details)s) binding " +"levels %(levels)s (original binding levels %(original_levels)s) on " +"network %(network)s with segments to bind %(segments_to_bind)s" msgstr "" #: neutron/tests/unit/plugins/ml2/extensions/fake_extension.py:54 msgid "Adds test attributes to core resources." msgstr "" -#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:955 -#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:972 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:963 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:980 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" diff --git a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po index 27f0cc0759a..7fcb0eb5cc4 100644 --- a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po @@ -3,13 +3,14 @@ # This file is distributed under the same license as the neutron project. # # Translators: +# Andre Campos Bezerra , 2015 msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-04 06:08+0000\n" -"PO-Revision-Date: 2015-04-28 07:35+0000\n" -"Last-Translator: openstackjenkins \n" +"POT-Creation-Date: 2015-05-25 06:15+0000\n" +"PO-Revision-Date: 2015-05-22 16:09+0000\n" +"Last-Translator: Andre Campos Bezerra \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" "neutron/language/pt_BR/)\n" "Language: pt_BR\n" @@ -88,6 +89,9 @@ msgstr "Agente DHCP iniciado" msgid "Synchronizing state" msgstr "Sincronizando estado" +msgid "Synchronizing state complete" +msgstr "Sincronizando estado finalizado" + #, python-format msgid "agent_updated by server side %s!" msgstr "agent_updated por lado do servidor %s!" @@ -120,6 +124,10 @@ msgstr "" "Permitir que a classificação seja ativada porque a paginação nativa requer " "classificação nativa" +#, python-format +msgid "Deleting port: %s" +msgstr "Deletando porta: %s" + msgid "OVS cleanup completed successfully" msgstr "Limpeza de OVS concluída com êxito" @@ -164,6 +172,9 @@ msgstr "Localizados intervalos de sobreposição: %(l_range)s e %(r_range)s" msgid "Skipping port %s as no IP is configure on it" msgstr "Ignorando a porta %s porque nenhum IP está configurado nela" +msgid "SNAT already bound to a service node." +msgstr "SNAT já conectado a um nó de serviço." + #, python-format msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" msgstr "Backdoor de Eventlet escutando na porta %(port)s pelo processo %(pid)d" @@ -210,6 +221,9 @@ msgstr "Filho %(pid)s encerrando com status %(code)d" msgid "Caught %s, stopping children" msgstr "%s capturado, parando filhos" +msgid "Wait called after thread killed. Cleaning up." +msgstr "Espera requisitada depois que thread foi morta. Limpando." + #, python-format msgid "Waiting on %d children to exit" msgstr "Aguardando em %d filhos para sair" From eab71473c3a1d40ec3f1d8a84b839b06b140e4fe Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 2 Mar 2015 13:14:48 +0900 Subject: [PATCH 043/292] OVS-agent: Separate ovs-ofctl using code as a driver This is a preparation to introduce another Ryu-based implementation. The aim is to replace this with the new Ryu-based implementation eventually. Add a config option for OVS-agent which selects the implementation. Currently, the only available choice is 'ovs-ofctl'. Also, this commit simplifies DVR logics by reducing duplications and makes some of DVR UTs actually check the flows rather than just "add_flow is called". Partially-Implements: blueprint ovs-ofctl-to-python Change-Id: Ie1224f8a1c17268cd7d1c474ed82fdfb8852eaa8 --- .../openvswitch/ovs_neutron_plugin.ini | 4 + .../cmd/eventlet/plugins/ovs_neutron_agent.py | 4 +- neutron/plugins/openvswitch/agent/main.py | 45 + .../openvswitch/agent/openflow/__init__.py | 0 .../agent/openflow/ovs_ofctl/__init__.py | 0 .../openflow/ovs_ofctl/br_dvr_process.py | 89 + .../agent/openflow/ovs_ofctl/br_int.py | 133 ++ .../agent/openflow/ovs_ofctl/br_phys.py | 59 + .../agent/openflow/ovs_ofctl/br_tun.py | 246 +++ .../agent/openflow/ovs_ofctl/main.py | 33 + .../agent/openflow/ovs_ofctl/ofswitch.py | 74 + .../agent/openflow/ovs_ofctl/ovs_bridge.py | 30 + .../agent/ovs_dvr_neutron_agent.py | 403 ++--- .../openvswitch/agent/ovs_neutron_agent.py | 384 ++--- neutron/plugins/openvswitch/common/config.py | 2 + .../tests/functional/agent/test_ovs_flows.py | 85 +- .../openvswitch/agent/openflow/__init__.py | 0 .../agent/openflow/ovs_ofctl/__init__.py | 0 .../ovs_ofctl/ovs_bridge_test_base.py | 160 ++ .../agent/openflow/ovs_ofctl/test_br_int.py | 213 +++ .../agent/openflow/ovs_ofctl/test_br_phys.py | 97 ++ .../agent/openflow/ovs_ofctl/test_br_tun.py | 259 +++ .../openvswitch/agent/ovs_test_base.py | 54 + .../agent/test_ovs_neutron_agent.py | 1455 ++++++++--------- .../plugins/openvswitch/test_ovs_tunnel.py | 363 ++-- 25 files changed, 2713 insertions(+), 1479 deletions(-) create mode 100644 neutron/plugins/openvswitch/agent/main.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/__init__.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/__init__.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_int.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_phys.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/main.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py create mode 100644 neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py create mode 100644 neutron/tests/unit/plugins/openvswitch/agent/openflow/__init__.py create mode 100644 neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/__init__.py create mode 100644 neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py create mode 100644 neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py create mode 100644 neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py create mode 100644 neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py create mode 100644 neutron/tests/unit/plugins/openvswitch/agent/ovs_test_base.py diff --git a/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini b/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini index 42673cf1b8b..85586c5969b 100644 --- a/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini +++ b/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini @@ -53,6 +53,10 @@ # ovs-vsctl set-manager ptcp:6640:127.0.0.1 # ovsdb_connection = tcp:127.0.0.1:6640 +# (StrOpt) OpenFlow interface to use. +# 'ovs-ofctl' is currently the only available choice. +# of_interface = ovs-ofctl + [agent] # Agent's polling interval in seconds # polling_interval = 2 diff --git a/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py b/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py index ef686f86c6c..2d545bc5316 100644 --- a/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py +++ b/neutron/cmd/eventlet/plugins/ovs_neutron_agent.py @@ -13,8 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. -from neutron.plugins.openvswitch.agent import ovs_neutron_agent +import neutron.plugins.openvswitch.agent.main as agent_main def main(): - ovs_neutron_agent.main() + agent_main.main() diff --git a/neutron/plugins/openvswitch/agent/main.py b/neutron/plugins/openvswitch/agent/main.py new file mode 100644 index 00000000000..7dbac9fbea7 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/main.py @@ -0,0 +1,45 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014 Fumihiko Kakuma +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +from oslo_config import cfg +from oslo_log import log as logging +from oslo_utils import importutils + +from neutron.common import config as common_config +from neutron.common import utils as n_utils + + +LOG = logging.getLogger(__name__) +cfg.CONF.import_group('OVS', 'neutron.plugins.openvswitch.common.config') + + +_main_modules = { + 'ovs-ofctl': 'neutron.plugins.openvswitch.agent.openflow.ovs_ofctl.main', +} + + +def main(): + common_config.init(sys.argv[1:]) + driver_name = cfg.CONF.OVS.of_interface + mod_name = _main_modules[driver_name] + mod = importutils.import_module(mod_name) + mod.init_config() + common_config.setup_logging() + n_utils.log_opt_values(LOG) + mod.main() diff --git a/neutron/plugins/openvswitch/agent/openflow/__init__.py b/neutron/plugins/openvswitch/agent/openflow/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/__init__.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py new file mode 100644 index 00000000000..46db4ec697b --- /dev/null +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_dvr_process.py @@ -0,0 +1,89 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +class OVSDVRProcessMixin(object): + """Common logic for br-tun and br-phys' DVR_PROCESS tables. + + Inheriters should provide self.dvr_process_table_id and + self.dvr_process_next_table_id. + """ + + def install_dvr_process_ipv4(self, vlan_tag, gateway_ip): + # block ARP + self.add_flow(table=self.dvr_process_table_id, + priority=3, + dl_vlan=vlan_tag, + proto='arp', + nw_dst=gateway_ip, + actions='drop') + + def delete_dvr_process_ipv4(self, vlan_tag, gateway_ip): + self.delete_flows(table=self.dvr_process_table_id, + dl_vlan=vlan_tag, + proto='arp', + nw_dst=gateway_ip) + + def install_dvr_process_ipv6(self, vlan_tag, gateway_mac): + # block RA + self.add_flow(table=self.dvr_process_table_id, + priority=3, + dl_vlan=vlan_tag, + proto='icmp6', + dl_src=gateway_mac, + actions='drop') + + def delete_dvr_process_ipv6(self, vlan_tag, gateway_mac): + self.delete_flows(table=self.dvr_process_table_id, + dl_vlan=vlan_tag, + proto='icmp6', + dl_src=gateway_mac) + + def install_dvr_process(self, vlan_tag, vif_mac, dvr_mac_address): + self.add_flow(table=self.dvr_process_table_id, + priority=2, + dl_vlan=vlan_tag, + dl_dst=vif_mac, + actions="drop") + self.add_flow(table=self.dvr_process_table_id, + priority=1, + dl_vlan=vlan_tag, + dl_src=vif_mac, + actions="mod_dl_src:%s,resubmit(,%s)" % + (dvr_mac_address, self.dvr_process_next_table_id)) + + def delete_dvr_process(self, vlan_tag, vif_mac): + self.delete_flows(table=self.dvr_process_table_id, + dl_vlan=vlan_tag, + dl_dst=vif_mac) + self.delete_flows(table=self.dvr_process_table_id, + dl_vlan=vlan_tag, + dl_src=vif_mac) diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_int.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_int.py new file mode 100644 index 00000000000..34e0c2f4153 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_int.py @@ -0,0 +1,133 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +* references +** OVS agent https://wiki.openstack.org/wiki/Ovs-flow-logic +""" + +from neutron.plugins.common import constants as p_const +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ovs_bridge +from neutron.plugins.openvswitch.common import constants + + +class OVSIntegrationBridge(ovs_bridge.OVSAgentBridge): + """openvswitch agent br-int specific logic.""" + + def setup_default_table(self): + self.delete_flows() + self.install_normal() + self.setup_canary_table() + self.install_drop(table_id=constants.ARP_SPOOF_TABLE) + + def setup_canary_table(self): + self.install_drop(constants.CANARY_TABLE) + + def check_canary_table(self): + canary_flows = self.dump_flows(constants.CANARY_TABLE) + if canary_flows == '': + return constants.OVS_RESTARTED + elif canary_flows is None: + return constants.OVS_DEAD + else: + return constants.OVS_NORMAL + + def provision_local_vlan(self, port, lvid, segmentation_id): + if segmentation_id is None: + dl_vlan = 0xffff + else: + dl_vlan = segmentation_id + self.add_flow(priority=3, + in_port=port, + dl_vlan=dl_vlan, + actions="mod_vlan_vid:%s,normal" % lvid) + + def reclaim_local_vlan(self, port, segmentation_id): + if segmentation_id is None: + dl_vlan = 0xffff + else: + dl_vlan = segmentation_id + self.delete_flows(in_port=port, dl_vlan=dl_vlan) + + @staticmethod + def _dvr_to_src_mac_table_id(network_type): + if network_type == p_const.TYPE_VLAN: + return constants.DVR_TO_SRC_MAC_VLAN + else: + return constants.DVR_TO_SRC_MAC + + def install_dvr_to_src_mac(self, network_type, + vlan_tag, gateway_mac, dst_mac, dst_port): + table_id = self._dvr_to_src_mac_table_id(network_type) + self.add_flow(table=table_id, + priority=4, + dl_vlan=vlan_tag, + dl_dst=dst_mac, + actions="strip_vlan,mod_dl_src:%s," + "output:%s" % (gateway_mac, dst_port)) + + def delete_dvr_to_src_mac(self, network_type, vlan_tag, dst_mac): + table_id = self._dvr_to_src_mac_table_id(network_type) + self.delete_flows(table=table_id, + dl_vlan=vlan_tag, + dl_dst=dst_mac) + + def add_dvr_mac_vlan(self, mac, port): + self.install_goto(table_id=constants.LOCAL_SWITCHING, + priority=4, + in_port=port, + eth_src=mac, + dest_table_id=constants.DVR_TO_SRC_MAC_VLAN) + + def remove_dvr_mac_vlan(self, mac): + # REVISIT(yamamoto): match in_port as well? + self.delete_flows(table_id=constants.LOCAL_SWITCHING, + eth_src=mac) + + def add_dvr_mac_tun(self, mac, port): + # Table LOCAL_SWITCHING will now sort DVR traffic from other + # traffic depending on in_port + self.install_goto(table_id=constants.LOCAL_SWITCHING, + priority=2, + in_port=port, + eth_src=mac, + dest_table_id=constants.DVR_TO_SRC_MAC) + + def remove_dvr_mac_tun(self, mac, port): + self.delete_flows(table_id=constants.LOCAL_SWITCHING, + in_port=port, eth_src=mac) + + def install_arp_spoofing_protection(self, port, ip_addresses): + # allow ARPs as long as they match addresses that actually + # belong to the port. + for ip in ip_addresses: + self.install_normal( + table_id=constants.ARP_SPOOF_TABLE, priority=2, + proto='arp', arp_spa=ip, in_port=port) + + # Now that the rules are ready, direct ARP traffic from the port into + # the anti-spoof table. + # This strategy fails gracefully because OVS versions that can't match + # on ARP headers will just process traffic normally. + self.add_flow(table=constants.LOCAL_SWITCHING, + priority=10, proto='arp', in_port=port, + actions=("resubmit(,%s)" % constants.ARP_SPOOF_TABLE)) + + def delete_arp_spoofing_protection(self, port): + self.delete_flows(table_id=constants.LOCAL_SWITCHING, + in_port=port, proto='arp') + self.delete_flows(table_id=constants.ARP_SPOOF_TABLE, + in_port=port) diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_phys.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_phys.py new file mode 100644 index 00000000000..3498bb86fb5 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_phys.py @@ -0,0 +1,59 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_dvr_process +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ovs_bridge +from neutron.plugins.openvswitch.common import constants + + +class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge, + br_dvr_process.OVSDVRProcessMixin): + """openvswitch agent physical bridge specific logic.""" + + # Used by OVSDVRProcessMixin + dvr_process_table_id = constants.DVR_PROCESS_VLAN + dvr_process_next_table_id = constants.LOCAL_VLAN_TRANSLATION + + def setup_default_table(self): + self.delete_flows() + self.install_normal() + + def provision_local_vlan(self, port, lvid, segmentation_id, distributed): + table_id = constants.LOCAL_VLAN_TRANSLATION if distributed else 0 + if segmentation_id is None: + self.add_flow(table=table_id, + priority=4, + in_port=port, + dl_vlan=lvid, + actions="strip_vlan,normal") + else: + self.add_flow(table=table_id, + priority=4, + in_port=port, + dl_vlan=lvid, + actions="mod_vlan_vid:%s,normal" % segmentation_id) + + def reclaim_local_vlan(self, port, lvid): + self.delete_flows(in_port=port, dl_vlan=lvid) + + def add_dvr_mac_vlan(self, mac, port): + self.install_output(table_id=constants.DVR_NOT_LEARN_VLAN, + priority=2, eth_src=mac, port=port) + + def remove_dvr_mac_vlan(self, mac): + # REVISIT(yamamoto): match in_port as well? + self.delete_flows(table_id=constants.DVR_NOT_LEARN_VLAN, + eth_src=mac) diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py new file mode 100644 index 00000000000..4407e2fe01b --- /dev/null +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py @@ -0,0 +1,246 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Copyright 2011 VMware, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools + +import netaddr + +from neutron.agent.common import ovs_lib +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_dvr_process +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ovs_bridge +from neutron.plugins.openvswitch.common import constants + + +class OVSTunnelBridge(ovs_bridge.OVSAgentBridge, + br_dvr_process.OVSDVRProcessMixin): + """openvswitch agent tunnel bridge specific logic.""" + + # Used by OVSDVRProcessMixin + dvr_process_table_id = constants.DVR_PROCESS + dvr_process_next_table_id = constants.PATCH_LV_TO_TUN + + def setup_default_table(self, patch_int_ofport, arp_responder_enabled): + # Table 0 (default) will sort incoming traffic depending on in_port + self.add_flow(priority=1, + in_port=patch_int_ofport, + actions="resubmit(,%s)" % + constants.PATCH_LV_TO_TUN) + self.add_flow(priority=0, actions="drop") + + if arp_responder_enabled: + # ARP broadcast-ed request go to the local ARP_RESPONDER table to + # be locally resolved + # REVISIT(yamamoto): arp_op=arp.ARP_REQUEST + self.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=1, + proto='arp', + dl_dst="ff:ff:ff:ff:ff:ff", + actions=("resubmit(,%s)" % + constants.ARP_RESPONDER)) + + # PATCH_LV_TO_TUN table will handle packets coming from patch_int + # unicasts go to table UCAST_TO_TUN where remote addresses are learnt + self.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.UCAST_TO_TUN) + + # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding + self.add_flow(table=constants.PATCH_LV_TO_TUN, + priority=0, + dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", + actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) + + # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id + # for each tunnel type, and resubmit to table LEARN_FROM_TUN where + # remote mac addresses will be learnt + for tunnel_type in constants.TUNNEL_NETWORK_TYPES: + self.add_flow(table=constants.TUN_TABLE[tunnel_type], + priority=0, + actions="drop") + + # LEARN_FROM_TUN table will have a single flow using a learn action to + # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac + # addresses (assumes that lvid has already been set by a previous flow) + learned_flow = ("table=%s," + "priority=1," + "hard_timeout=300," + "NXM_OF_VLAN_TCI[0..11]," + "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," + "load:0->NXM_OF_VLAN_TCI[]," + "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," + "output:NXM_OF_IN_PORT[]" % + constants.UCAST_TO_TUN) + # Once remote mac addresses are learnt, output packet to patch_int + self.add_flow(table=constants.LEARN_FROM_TUN, + priority=1, + actions="learn(%s),output:%s" % + (learned_flow, patch_int_ofport)) + + # Egress unicast will be handled in table UCAST_TO_TUN, where remote + # mac addresses will be learned. For now, just add a default flow that + # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them + # as broadcasts/multicasts + self.add_flow(table=constants.UCAST_TO_TUN, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + + if arp_responder_enabled: + # If none of the ARP entries correspond to the requested IP, the + # broadcast-ed packet is resubmitted to the flooding table + self.add_flow(table=constants.ARP_RESPONDER, + priority=0, + actions="resubmit(,%s)" % + constants.FLOOD_TO_TUN) + + # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, + # for now, add a default drop action + self.install_drop(table_id=constants.FLOOD_TO_TUN) + + def provision_local_vlan(self, network_type, lvid, segmentation_id, + distributed=False): + if distributed: + table_id = constants.DVR_NOT_LEARN + else: + table_id = constants.LEARN_FROM_TUN + self.add_flow(table=constants.TUN_TABLE[network_type], + priority=1, + tun_id=segmentation_id, + actions="mod_vlan_vid:%s," + "resubmit(,%s)" % + (lvid, table_id)) + + def reclaim_local_vlan(self, network_type, segmentation_id): + self.delete_flows(table=constants.TUN_TABLE[network_type], + tun_id=segmentation_id) + + @staticmethod + def _ofport_set_to_str(ports_set): + return ",".join(map(str, ports_set)) + + def install_flood_to_tun(self, vlan, tun_id, ports, deferred_br=None): + br = deferred_br if deferred_br else self + br.mod_flow(table=constants.FLOOD_TO_TUN, + dl_vlan=vlan, + actions="strip_vlan,set_tunnel:%s,output:%s" % + (tun_id, self._ofport_set_to_str(ports))) + + def delete_flood_to_tun(self, vlan, deferred_br=None): + br = deferred_br if deferred_br else self + br.delete_flows(table=constants.FLOOD_TO_TUN, dl_vlan=vlan) + + def install_unicast_to_tun(self, vlan, tun_id, port, mac, + deferred_br=None): + br = deferred_br if deferred_br else self + br.add_flow(table=constants.UCAST_TO_TUN, + priority=2, + dl_vlan=vlan, + dl_dst=mac, + actions="strip_vlan,set_tunnel:%s,output:%s" % + (tun_id, port)) + + def delete_unicast_to_tun(self, vlan, mac, deferred_br=None): + br = deferred_br if deferred_br else self + if mac is None: + br.delete_flows(table=constants.UCAST_TO_TUN, + dl_vlan=vlan) + else: + br.delete_flows(table=constants.UCAST_TO_TUN, + dl_vlan=vlan, + dl_dst=mac) + + def install_arp_responder(self, vlan, ip, mac, deferred_br=None): + br = deferred_br if deferred_br else self + actions = constants.ARP_RESPONDER_ACTIONS % { + 'mac': netaddr.EUI(mac, dialect=netaddr.mac_unix), + 'ip': netaddr.IPAddress(ip), + } + br.add_flow(table=constants.ARP_RESPONDER, + priority=1, + proto='arp', + dl_vlan=vlan, + nw_dst='%s' % ip, + actions=actions) + + def delete_arp_responder(self, vlan, ip, deferred_br=None): + br = deferred_br if deferred_br else self + if ip is None: + br.delete_flows(table=constants.ARP_RESPONDER, + proto='arp', + dl_vlan=vlan) + else: + br.delete_flows(table=constants.ARP_RESPONDER, + proto='arp', + dl_vlan=vlan, + nw_dst='%s' % ip) + + def setup_tunnel_port(self, network_type, port): + self.add_flow(priority=1, + in_port=port, + actions="resubmit(,%s)" % + constants.TUN_TABLE[network_type]) + + def cleanup_tunnel_port(self, port): + self.delete_flows(in_port=port) + + def add_dvr_mac_tun(self, mac, port): + # Table DVR_NOT_LEARN ensures unique dvr macs in the cloud + # are not learnt, as they may result in flow explosions + self.install_output(table_id=constants.DVR_NOT_LEARN, + priority=1, + eth_src=mac, + port=port) + + def remove_dvr_mac_tun(self, mac): + # REVISIT(yamamoto): match in_port as well? + self.delete_flows(table_id=constants.DVR_NOT_LEARN, + eth_src=mac) + + def deferred(self, **kwargs): + return DeferredOVSTunnelBridge(self, **kwargs) + + +class DeferredOVSTunnelBridge(ovs_lib.DeferredOVSBridge): + _METHODS = [ + 'install_unicast_to_tun', + 'delete_unicast_to_tun', + 'install_flood_to_tun', + 'delete_flood_to_tun', + 'install_arp_responder', + 'delete_arp_responder', + ] + + def __getattr__(self, name): + if name in self._METHODS: + m = getattr(self.br, name) + return functools.partial(m, deferred_br=self) + raise AttributeError(name) diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/main.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/main.py new file mode 100644 index 00000000000..537c324b173 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/main.py @@ -0,0 +1,33 @@ +# Copyright (C) 2015 VA Linux Systems Japan K.K. +# Copyright (C) 2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_int +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_phys +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import br_tun +from neutron.plugins.openvswitch.agent import ovs_neutron_agent + + +def init_config(): + pass + + +def main(): + bridge_classes = { + 'br_int': br_int.OVSIntegrationBridge, + 'br_phys': br_phys.OVSPhysicalBridge, + 'br_tun': br_tun.OVSTunnelBridge, + } + ovs_neutron_agent.main(bridge_classes) diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py new file mode 100644 index 00000000000..578e3e2196a --- /dev/null +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/ofswitch.py @@ -0,0 +1,74 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Field name mappings (from Ryu to ovs-ofctl) +_keywords = { + 'eth_src': 'dl_src', + 'eth_dst': 'dl_dst', + 'ipv4_src': 'nw_src', + 'ipv4_dst': 'nw_dst', + 'table_id': 'table', +} + + +class OpenFlowSwitchMixin(object): + """Mixin to provide common convenient routines for an openflow switch.""" + + @staticmethod + def _conv_args(kwargs): + for our_name, ovs_ofctl_name in _keywords.items(): + if our_name in kwargs: + kwargs[ovs_ofctl_name] = kwargs.pop(our_name) + return kwargs + + def dump_flows(self, table_id): + return self.dump_flows_for_table(table_id) + + def install_goto_next(self, table_id): + self.install_goto(table_id=table_id, dest_table_id=table_id + 1) + + def install_output(self, port, table_id=0, priority=0, **kwargs): + self.add_flow(table=table_id, + priority=priority, + actions="output:%s" % port, + **self._conv_args(kwargs)) + + def install_normal(self, table_id=0, priority=0, **kwargs): + self.add_flow(table=table_id, + priority=priority, + actions="normal", + **self._conv_args(kwargs)) + + def install_goto(self, dest_table_id, table_id=0, priority=0, **kwargs): + self.add_flow(table=table_id, + priority=priority, + actions="resubmit(,%s)" % dest_table_id, + **self._conv_args(kwargs)) + + def install_drop(self, table_id=0, priority=0, **kwargs): + self.add_flow(table=table_id, + priority=priority, + actions="drop", + **self._conv_args(kwargs)) + + def delete_flows(self, **kwargs): + # NOTE(yamamoto): super() points to ovs_lib.OVSBridge. + # See ovs_bridge.py how this class is actually used. + if kwargs: + super(OpenFlowSwitchMixin, self).delete_flows( + **self._conv_args(kwargs)) + else: + super(OpenFlowSwitchMixin, self).remove_all_flows() diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py new file mode 100644 index 00000000000..179994d1806 --- /dev/null +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge.py @@ -0,0 +1,30 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutron.agent.common import ovs_lib +from neutron.plugins.openvswitch.agent.openflow.ovs_ofctl import ofswitch + + +class OVSAgentBridge(ofswitch.OpenFlowSwitchMixin, ovs_lib.OVSBridge): + """Common code for bridges used by OVS agent""" + + def setup_controllers(self, conf): + self.set_protocols("[OpenFlow10]") + self.del_controller() + + def drop_port(self, in_port): + self.install_drop(priority=2, in_port=in_port) diff --git a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py index 43f4ac5e7e0..0243a555bdd 100644 --- a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -171,8 +171,9 @@ class OVSDVRNeutronAgent(object): if not self.in_distributed_mode(): # switch all traffic using L2 learning - self.int_br.add_flow(table=constants.LOCAL_SWITCHING, - priority=1, actions="normal") + # REVISIT(yamamoto): why to install the same flow as + # setup_integration_br? + self.int_br.install_normal() def get_dvr_mac_address_with_retry(self): # Get the local DVR MAC Address from the Neutron Server. @@ -204,52 +205,42 @@ class OVSDVRNeutronAgent(object): LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"), self.dvr_mac_address) # Remove existing flows in integration bridge - self.int_br.remove_all_flows() + self.int_br.delete_flows() # Add a canary flow to int_br to track OVS restarts - self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0, - actions="drop") + self.int_br.setup_canary_table() # Insert 'drop' action as the default for Table DVR_TO_SRC_MAC - self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC, - priority=1, - actions="drop") + self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1) - self.int_br.add_flow(table=constants.DVR_TO_SRC_MAC_VLAN, - priority=1, - actions="drop") + self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN, + priority=1) # Insert 'normal' action as the default for Table LOCAL_SWITCHING - self.int_br.add_flow(table=constants.LOCAL_SWITCHING, - priority=1, - actions="normal") + self.int_br.install_normal(table_id=constants.LOCAL_SWITCHING, + priority=1) for physical_network in self.bridge_mappings: - self.int_br.add_flow(table=constants.LOCAL_SWITCHING, - priority=2, - in_port=self.int_ofports[physical_network], - actions="drop") + self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING, + priority=2, + in_port=self.int_ofports[ + physical_network]) def setup_dvr_flows_on_tun_br(self): '''Setup up initial dvr flows into br-tun''' if not self.enable_tunneling or not self.in_distributed_mode(): return - self.tun_br.add_flow(priority=1, - in_port=self.patch_int_ofport, - actions="resubmit(,%s)" % - constants.DVR_PROCESS) + self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS, + priority=1, + in_port=self.patch_int_ofport) # table-miss should be sent to learning table - self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, - priority=0, - actions="resubmit(,%s)" % - constants.LEARN_FROM_TUN) + self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN, + dest_table_id=constants.LEARN_FROM_TUN) - self.tun_br.add_flow(table=constants.DVR_PROCESS, - priority=0, - actions="resubmit(,%s)" % - constants.PATCH_LV_TO_TUN) + self.tun_br.install_goto(table_id=constants.DVR_PROCESS, + dest_table_id=constants.PATCH_LV_TO_TUN) def setup_dvr_flows_on_phys_br(self): '''Setup up initial dvr flows into br-phys''' @@ -257,27 +248,63 @@ class OVSDVRNeutronAgent(object): return for physical_network in self.bridge_mappings: - self.phys_brs[physical_network].add_flow(priority=2, + self.phys_brs[physical_network].install_goto( in_port=self.phys_ofports[physical_network], - actions="resubmit(,%s)" % - constants.DVR_PROCESS_VLAN) - self.phys_brs[physical_network].add_flow(priority=1, - actions="resubmit(,%s)" % - constants.DVR_NOT_LEARN_VLAN) - self.phys_brs[physical_network].add_flow( - table=constants.DVR_PROCESS_VLAN, - priority=0, - actions="resubmit(,%s)" % - constants.LOCAL_VLAN_TRANSLATION) - self.phys_brs[physical_network].add_flow( - table=constants.LOCAL_VLAN_TRANSLATION, priority=2, - in_port=self.phys_ofports[physical_network], - actions="drop") - self.phys_brs[physical_network].add_flow( - table=constants.DVR_NOT_LEARN_VLAN, + dest_table_id=constants.DVR_PROCESS_VLAN) + self.phys_brs[physical_network].install_goto( priority=1, - actions="NORMAL") + dest_table_id=constants.DVR_NOT_LEARN_VLAN) + self.phys_brs[physical_network].install_goto( + table_id=constants.DVR_PROCESS_VLAN, + priority=0, + dest_table_id=constants.LOCAL_VLAN_TRANSLATION) + self.phys_brs[physical_network].install_drop( + table_id=constants.LOCAL_VLAN_TRANSLATION, + in_port=self.phys_ofports[physical_network], + priority=2) + self.phys_brs[physical_network].install_normal( + table_id=constants.DVR_NOT_LEARN_VLAN, + priority=1) + + def _add_dvr_mac_for_phys_br(self, physical_network, mac): + self.int_br.add_dvr_mac_vlan(mac=mac, + port=self.int_ofports[physical_network]) + phys_br = self.phys_brs[physical_network] + phys_br.add_dvr_mac_vlan(mac=mac, + port=self.phys_ofports[physical_network]) + + def _remove_dvr_mac_for_phys_br(self, physical_network, mac): + # REVISIT(yamamoto): match in_port as well? + self.int_br.remove_dvr_mac_vlan(mac=mac) + phys_br = self.phys_brs[physical_network] + # REVISIT(yamamoto): match in_port as well? + phys_br.remove_dvr_mac_vlan(mac=mac) + + def _add_dvr_mac_for_tun_br(self, mac): + self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport) + self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport) + + def _remove_dvr_mac_for_tun_br(self, mac): + self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport) + # REVISIT(yamamoto): match in_port as well? + self.tun_br.remove_dvr_mac_tun(mac=mac) + + def _add_dvr_mac(self, mac): + for physical_network in self.bridge_mappings: + self._add_dvr_mac_for_phys_br(physical_network, mac) + if self.enable_tunneling: + self._add_dvr_mac_for_tun_br(mac) + LOG.debug("Added DVR MAC flow for %s", mac) + self.registered_dvr_macs.add(mac) + + def _remove_dvr_mac(self, mac): + for physical_network in self.bridge_mappings: + self._remove_dvr_mac_for_phys_br(physical_network, mac) + if self.enable_tunneling: + self._remove_dvr_mac_for_tun_br(mac) + LOG.debug("Removed DVR MAC flow for %s", mac) + self.registered_dvr_macs.remove(mac) def setup_dvr_mac_flows_on_all_brs(self): if not self.in_distributed_mode(): @@ -289,38 +316,7 @@ class OVSDVRNeutronAgent(object): for mac in dvr_macs: if mac['mac_address'] == self.dvr_mac_address: continue - for physical_network in self.bridge_mappings: - self.int_br.add_flow(table=constants.LOCAL_SWITCHING, - priority=4, - in_port=self.int_ofports[physical_network], - dl_src=mac['mac_address'], - actions="resubmit(,%s)" % - constants.DVR_TO_SRC_MAC_VLAN) - self.phys_brs[physical_network].add_flow( - table=constants.DVR_NOT_LEARN_VLAN, - priority=2, - dl_src=mac['mac_address'], - actions="output:%s" % - self.phys_ofports[physical_network]) - - if self.enable_tunneling: - # Table 0 (default) will now sort DVR traffic from other - # traffic depending on in_port - self.int_br.add_flow(table=constants.LOCAL_SWITCHING, - priority=2, - in_port=self.patch_tun_ofport, - dl_src=mac['mac_address'], - actions="resubmit(,%s)" % - constants.DVR_TO_SRC_MAC) - # Table DVR_NOT_LEARN ensures unique dvr macs in the cloud - # are not learnt, as they may - # result in flow explosions - self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, - priority=1, - dl_src=mac['mac_address'], - actions="output:%s" % - self.patch_int_ofport) - self.registered_dvr_macs.add(mac['mac_address']) + self._add_dvr_mac(mac['mac_address']) def dvr_mac_address_update(self, dvr_macs): if not self.dvr_mac_address: @@ -342,50 +338,10 @@ class OVSDVRNeutronAgent(object): dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs for oldmac in dvr_macs_removed: - for physical_network in self.bridge_mappings: - self.int_br.delete_flows(table=constants.LOCAL_SWITCHING, - in_port=self.int_ofports[physical_network], - dl_src=oldmac) - self.phys_brs[physical_network].delete_flows( - table=constants.DVR_NOT_LEARN_VLAN, - dl_src=oldmac) - if self.enable_tunneling: - self.int_br.delete_flows(table=constants.LOCAL_SWITCHING, - in_port=self.patch_tun_ofport, - dl_src=oldmac) - self.tun_br.delete_flows(table=constants.DVR_NOT_LEARN, - dl_src=oldmac) - LOG.debug("Removed DVR MAC flow for %s", oldmac) - self.registered_dvr_macs.remove(oldmac) + self._remove_dvr_mac(oldmac) for newmac in dvr_macs_added: - for physical_network in self.bridge_mappings: - self.int_br.add_flow(table=constants.LOCAL_SWITCHING, - priority=4, - in_port=self.int_ofports[physical_network], - dl_src=newmac, - actions="resubmit(,%s)" % - constants.DVR_TO_SRC_MAC_VLAN) - self.phys_brs[physical_network].add_flow( - table=constants.DVR_NOT_LEARN_VLAN, - priority=2, - dl_src=newmac, - actions="output:%s" % - self.phys_ofports[physical_network]) - if self.enable_tunneling: - self.int_br.add_flow(table=constants.LOCAL_SWITCHING, - priority=2, - in_port=self.patch_tun_ofport, - dl_src=newmac, - actions="resubmit(,%s)" % - constants.DVR_TO_SRC_MAC) - self.tun_br.add_flow(table=constants.DVR_NOT_LEARN, - priority=1, - dl_src=newmac, - actions="output:%s" % - self.patch_int_ofport) - LOG.debug("Added DVR MAC flow for %s", newmac) - self.registered_dvr_macs.add(newmac) + self._add_dvr_mac(newmac) def in_distributed_mode(self): return self.dvr_mac_address is not None @@ -394,16 +350,11 @@ class OVSDVRNeutronAgent(object): return device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE def process_tunneled_network(self, network_type, lvid, segmentation_id): - if self.in_distributed_mode(): - table_id = constants.DVR_NOT_LEARN - else: - table_id = constants.LEARN_FROM_TUN - self.tun_br.add_flow(table=constants.TUN_TABLE[network_type], - priority=1, - tun_id=segmentation_id, - actions="mod_vlan_vid:%s," - "resubmit(,%s)" % - (lvid, table_id)) + self.tun_br.provision_local_vlan( + network_type=network_type, + lvid=lvid, + segmentation_id=segmentation_id, + distributed=self.in_distributed_mode()) def _bind_distributed_router_interface_port(self, port, lvm, fixed_ips, device_owner): @@ -436,10 +387,8 @@ class OVSDVRNeutronAgent(object): # DVR takes over ldm.set_dvr_owned(True) - table_id = constants.DVR_TO_SRC_MAC vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: - table_id = constants.DVR_TO_SRC_MAC_VLAN vlan_to_use = lvm.segmentation_id subnet_info = ldm.get_subnet_info() @@ -469,73 +418,31 @@ class OVSDVRNeutronAgent(object): comp_ovsport.add_subnet(subnet_uuid) self.local_ports[vif.vif_id] = comp_ovsport # create rule for just this vm port - self.int_br.add_flow(table=table_id, - priority=4, - dl_vlan=vlan_to_use, - dl_dst=comp_ovsport.get_mac(), - actions="strip_vlan,mod_dl_src:%s," - "output:%s" % - (subnet_info['gateway_mac'], - comp_ovsport.get_ofport())) + self.int_br.install_dvr_to_src_mac( + network_type=lvm.network_type, + vlan_tag=vlan_to_use, + gateway_mac=subnet_info['gateway_mac'], + dst_mac=comp_ovsport.get_mac(), + dst_port=comp_ovsport.get_ofport()) if lvm.network_type == p_const.TYPE_VLAN: - args = {'table': constants.DVR_PROCESS_VLAN, - 'priority': 3, - 'dl_vlan': lvm.vlan, - 'actions': "drop"} - if ip_version == 4: - args['proto'] = 'arp' - args['nw_dst'] = subnet_info['gateway_ip'] - else: - args['proto'] = 'icmp6' - args['icmp_type'] = n_const.ICMPV6_TYPE_RA - args['dl_src'] = subnet_info['gateway_mac'] - # TODO(vivek) remove the IPv6 related add_flow once SNAT is not + # TODO(vivek) remove the IPv6 related flows once SNAT is not # used for IPv6 DVR. - self.phys_brs[lvm.physical_network].add_flow(**args) - self.phys_brs[lvm.physical_network].add_flow( - table=constants.DVR_PROCESS_VLAN, - priority=2, - dl_vlan=lvm.vlan, - dl_dst=port.vif_mac, - actions="drop") - - self.phys_brs[lvm.physical_network].add_flow( - table=constants.DVR_PROCESS_VLAN, - priority=1, - dl_vlan=lvm.vlan, - dl_src=port.vif_mac, - actions="mod_dl_src:%s,resubmit(,%s)" % - (self.dvr_mac_address, constants.LOCAL_VLAN_TRANSLATION)) - + br = self.phys_brs[lvm.physical_network] if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: - args = {'table': constants.DVR_PROCESS, - 'priority': 3, - 'dl_vlan': lvm.vlan, - 'actions': "drop"} - if ip_version == 4: - args['proto'] = 'arp' - args['nw_dst'] = subnet_info['gateway_ip'] - else: - args['proto'] = 'icmp6' - args['icmp_type'] = n_const.ICMPV6_TYPE_RA - args['dl_src'] = subnet_info['gateway_mac'] - # TODO(vivek) remove the IPv6 related add_flow once SNAT is not - # used for IPv6 DVR. - self.tun_br.add_flow(**args) - self.tun_br.add_flow(table=constants.DVR_PROCESS, - priority=2, - dl_vlan=lvm.vlan, - dl_dst=port.vif_mac, - actions="drop") + br = self.tun_br + # TODO(vivek) remove the IPv6 related flows once SNAT is not + # used for IPv6 DVR. + if ip_version == 4: + br.install_dvr_process_ipv4( + vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip']) + else: + br.install_dvr_process_ipv6( + vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac']) + br.install_dvr_process( + vlan_tag=lvm.vlan, vif_mac=port.vif_mac, + dvr_mac_address=self.dvr_mac_address) - self.tun_br.add_flow(table=constants.DVR_PROCESS, - priority=1, - dl_vlan=lvm.vlan, - dl_src=port.vif_mac, - actions="mod_dl_src:%s,resubmit(,%s)" % - (self.dvr_mac_address, - constants.PATCH_LV_TO_TUN)) # the dvr router interface is itself a port, so capture it # queue this subnet to that port. A subnet appears only once as # a router interface on any given router @@ -578,20 +485,16 @@ class OVSDVRNeutronAgent(object): port.vif_mac, device_owner) ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport - table_id = constants.DVR_TO_SRC_MAC vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: - table_id = constants.DVR_TO_SRC_MAC_VLAN vlan_to_use = lvm.segmentation_id # create a rule for this vm port - self.int_br.add_flow(table=table_id, - priority=4, - dl_vlan=vlan_to_use, - dl_dst=ovsport.get_mac(), - actions="strip_vlan,mod_dl_src:%s," - "output:%s" % - (subnet_info['gateway_mac'], - ovsport.get_ofport())) + self.int_br.install_dvr_to_src_mac( + network_type=lvm.network_type, + vlan_tag=vlan_to_use, + gateway_mac=subnet_info['gateway_mac'], + dst_mac=ovsport.get_mac(), + dst_port=ovsport.get_ofport()) def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm, fixed_ips, device_owner): @@ -631,19 +534,15 @@ class OVSDVRNeutronAgent(object): port.vif_mac, device_owner) ovsport.add_subnet(subnet_uuid) self.local_ports[port.vif_id] = ovsport - table_id = constants.DVR_TO_SRC_MAC vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: - table_id = constants.DVR_TO_SRC_MAC_VLAN vlan_to_use = lvm.segmentation_id - self.int_br.add_flow(table=table_id, - priority=4, - dl_vlan=vlan_to_use, - dl_dst=ovsport.get_mac(), - actions="strip_vlan,mod_dl_src:%s," - " output:%s" % - (subnet_info['gateway_mac'], - ovsport.get_ofport())) + self.int_br.install_dvr_to_src_mac( + network_type=lvm.network_type, + vlan_tag=vlan_to_use, + gateway_mac=subnet_info['gateway_mac'], + dst_mac=ovsport.get_mac(), + dst_port=ovsport.get_ofport()) def bind_port_to_dvr(self, port, local_vlan_map, fixed_ips, device_owner): @@ -681,10 +580,8 @@ class OVSDVRNeutronAgent(object): subnet_set = set(subnet_ids) network_type = lvm.network_type physical_network = lvm.physical_network - table_id = constants.DVR_TO_SRC_MAC vlan_to_use = lvm.vlan if network_type == p_const.TYPE_VLAN: - table_id = constants.DVR_TO_SRC_MAC_VLAN vlan_to_use = lvm.segmentation_id # ensure we process for all the subnets laid on this removed port for sub_uuid in subnet_set: @@ -700,9 +597,9 @@ class OVSDVRNeutronAgent(object): compute_ports = ldm.get_compute_ofports() for vif_id in compute_ports: comp_port = self.local_ports[vif_id] - self.int_br.delete_flows(table=table_id, - dl_vlan=vlan_to_use, - dl_dst=comp_port.get_mac()) + self.int_br.delete_dvr_to_src_mac( + network_type=network_type, + vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac()) ldm.remove_all_compute_ofports() if ldm.get_csnat_ofport() == constants.OFPORT_INVALID: @@ -711,47 +608,23 @@ class OVSDVRNeutronAgent(object): # ports available on this agent anymore self.local_dvr_map.pop(sub_uuid, None) if network_type == p_const.TYPE_VLAN: - args = {'table': constants.DVR_PROCESS_VLAN, - 'dl_vlan': lvm.vlan} - if ip_version == 4: - args['proto'] = 'arp' - args['nw_dst'] = subnet_info['gateway_ip'] - else: - args['proto'] = 'icmp6' - args['icmp_type'] = n_const.ICMPV6_TYPE_RA - args['dl_src'] = subnet_info['gateway_mac'] - self.phys_br[physical_network].delete_flows(**args) - + br = self.phys_br[physical_network] if network_type in constants.TUNNEL_NETWORK_TYPES: - args = {'table': constants.DVR_PROCESS, - 'dl_vlan': lvm.vlan} - if ip_version == 4: - args['proto'] = 'arp' - args['nw_dst'] = subnet_info['gateway_ip'] - else: - args['proto'] = 'icmp6' - args['icmp_type'] = n_const.ICMPV6_TYPE_RA - args['dl_src'] = subnet_info['gateway_mac'] - self.tun_br.delete_flows(**args) + br = self.tun_br + if ip_version == 4: + br.delete_dvr_process_ipv4( + vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip']) + else: + br.delete_dvr_process_ipv6( + vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac']) ovsport.remove_subnet(sub_uuid) if lvm.network_type == p_const.TYPE_VLAN: - self.phys_br[physical_network].delete_flows( - table=constants.DVR_PROCESS_VLAN, - dl_vlan=lvm.vlan, - dl_dst=port.vif_mac) - self.phys_br[physical_network].delete_flows( - table=constants.DVR_PROCESS_VLAN, - dl_vlan=lvm.vlan, - dl_src=port.vif_mac) - + br = self.phys_br[physical_network] if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: - self.tun_br.delete_flows(table=constants.DVR_PROCESS, - dl_vlan=lvm.vlan, - dl_dst=port.vif_mac) - self.tun_br.delete_flows(table=constants.DVR_PROCESS, - dl_vlan=lvm.vlan, - dl_src=port.vif_mac) + br = self.tun_br + br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac) + # release port state self.local_ports.pop(port.vif_id, None) @@ -767,15 +640,13 @@ class OVSDVRNeutronAgent(object): continue ldm = self.local_dvr_map[sub_uuid] ldm.remove_compute_ofport(port.vif_id) - table_id = constants.DVR_TO_SRC_MAC vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: - table_id = constants.DVR_TO_SRC_MAC_VLAN vlan_to_use = lvm.segmentation_id # first remove this vm port rule - self.int_br.delete_flows(table=table_id, - dl_vlan=vlan_to_use, - dl_dst=ovsport.get_mac()) + self.int_br.delete_dvr_to_src_mac( + network_type=lvm.network_type, + vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac()) # release port state self.local_ports.pop(port.vif_id, None) @@ -790,15 +661,13 @@ class OVSDVRNeutronAgent(object): return ldm = self.local_dvr_map[sub_uuid] ldm.set_csnat_ofport(constants.OFPORT_INVALID) - table_id = constants.DVR_TO_SRC_MAC vlan_to_use = lvm.vlan if lvm.network_type == p_const.TYPE_VLAN: - table_id = constants.DVR_TO_SRC_MAC_VLAN vlan_to_use = lvm.segmentation_id # then remove csnat port rule - self.int_br.delete_flows(table=table_id, - dl_vlan=vlan_to_use, - dl_dst=ovsport.get_mac()) + self.int_br.delete_dvr_to_src_mac( + network_type=lvm.network_type, + vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac()) if not ldm.is_dvr_owned(): # if not owned by DVR (only used for csnat), remove this # subnet state altogether diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index 168e9093377..cb3d44827c1 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # Copyright 2011 VMware, Inc. # All Rights Reserved. # @@ -25,7 +24,6 @@ from oslo_log import log as logging import oslo_messaging from six import moves -from neutron.agent.common import config from neutron.agent.common import ovs_lib from neutron.agent.common import polling from neutron.agent.common import utils @@ -34,7 +32,6 @@ from neutron.agent.linux import ip_lib from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.handlers import dvr_rpc -from neutron.common import config as common_config from neutron.common import constants as q_const from neutron.common import exceptions from neutron.common import topics @@ -49,11 +46,16 @@ from neutron.plugins.openvswitch.common import constants LOG = logging.getLogger(__name__) cfg.CONF.import_group('AGENT', 'neutron.plugins.openvswitch.common.config') +cfg.CONF.import_group('OVS', 'neutron.plugins.openvswitch.common.config') # A placeholder for dead vlans. DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1 +class _mac_mydialect(netaddr.mac_unix): + word_fmt = '%.2x' + + class DeviceListRetrievalError(exceptions.NeutronException): message = _("Unable to retrieve port details for devices: %(devices)s " "because of error: %(error)s") @@ -119,7 +121,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # 1.2 Support DVR (Distributed Virtual Router) RPC target = oslo_messaging.Target(version='1.2') - def __init__(self, integ_br, tun_br, local_ip, + def __init__(self, bridge_classes, integ_br, tun_br, local_ip, bridge_mappings, polling_interval, tunnel_types=None, veth_mtu=None, l2_population=False, enable_distributed_routing=False, @@ -132,6 +134,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, quitting_rpc_timeout=None): '''Constructor. + :param bridge_classes: a dict for bridge classes. :param integ_br: name of the integration bridge. :param tun_br: name of the tunnel bridge. :param local_ip: local IP address of this hypervisor. @@ -159,6 +162,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, SIGTERM is received ''' super(OVSNeutronAgent, self).__init__() + self.br_int_cls = bridge_classes['br_int'] + self.br_phys_cls = bridge_classes['br_phys'] + self.br_tun_cls = bridge_classes['br_tun'] self.use_veth_interconnection = use_veth_interconnection self.veth_mtu = veth_mtu self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG, @@ -197,7 +203,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # Keep track of int_br's device count for use by _report_state() self.int_br_device_count = 0 - self.int_br = ovs_lib.OVSBridge(integ_br) + self.int_br = self.br_int_cls(integ_br) self.setup_integration_br() # Stores port update notifications for processing in main rpc loop self.updated_ports = set() @@ -440,21 +446,16 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): if port_info == q_const.FLOODING_ENTRY: lvm.tun_ofports.add(ofport) - ofports = _ofport_set_to_str(lvm.tun_ofports) - br.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=lvm.vlan, - actions="strip_vlan,set_tunnel:%s,output:%s" % - (lvm.segmentation_id, ofports)) + br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, + lvm.tun_ofports) else: self.setup_entry_for_arp_reply(br, 'add', lvm.vlan, port_info.mac_address, port_info.ip_address) - br.add_flow(table=constants.UCAST_TO_TUN, - priority=2, - dl_vlan=lvm.vlan, - dl_dst=port_info.mac_address, - actions="strip_vlan,set_tunnel:%s,output:%s" % - (lvm.segmentation_id, ofport)) + br.install_unicast_to_tun(lvm.vlan, + lvm.segmentation_id, + ofport, + port_info.mac_address) def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): if port_info == q_const.FLOODING_ENTRY: @@ -463,21 +464,16 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, return lvm.tun_ofports.remove(ofport) if len(lvm.tun_ofports) > 0: - ofports = _ofport_set_to_str(lvm.tun_ofports) - br.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=lvm.vlan, - actions="strip_vlan,set_tunnel:%s,output:%s" % - (lvm.segmentation_id, ofports)) + br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, + lvm.tun_ofports) else: # This local vlan doesn't require any more tunnelling - br.delete_flows(table=constants.FLOOD_TO_TUN, dl_vlan=lvm.vlan) + br.delete_flood_to_tun(lvm.vlan) else: self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan, port_info.mac_address, port_info.ip_address) - br.delete_flows(table=constants.UCAST_TO_TUN, - dl_vlan=lvm.vlan, - dl_dst=port_info.mac_address) + br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address) def _fdb_chg_ip(self, context, fdb_entries): LOG.debug("update chg_ip received") @@ -496,25 +492,39 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, if not self.arp_responder_enabled: return - mac = netaddr.EUI(mac_address, dialect=netaddr.mac_unix) - ip = netaddr.IPAddress(ip_address) + mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect)) + ip = str(netaddr.IPAddress(ip_address)) if action == 'add': - actions = constants.ARP_RESPONDER_ACTIONS % {'mac': mac, 'ip': ip} - br.add_flow(table=constants.ARP_RESPONDER, - priority=1, - proto='arp', - dl_vlan=local_vid, - nw_dst='%s' % ip, - actions=actions) + br.install_arp_responder(local_vid, ip, mac) elif action == 'remove': - br.delete_flows(table=constants.ARP_RESPONDER, - proto='arp', - dl_vlan=local_vid, - nw_dst='%s' % ip) + br.delete_arp_responder(local_vid, ip) else: LOG.warning(_LW('Action %s not supported'), action) + def _local_vlan_for_flat(self, lvid, physical_network): + phys_br = self.phys_brs[physical_network] + phys_port = self.phys_ofports[physical_network] + int_br = self.int_br + int_port = self.int_ofports[physical_network] + phys_br.provision_local_vlan(port=phys_port, lvid=lvid, + segmentation_id=None, + distributed=False) + int_br.provision_local_vlan(port=int_port, lvid=lvid, + segmentation_id=None) + + def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id): + distributed = self.enable_distributed_routing + phys_br = self.phys_brs[physical_network] + phys_port = self.phys_ofports[physical_network] + int_br = self.int_br + int_port = self.int_ofports[physical_network] + phys_br.provision_local_vlan(port=phys_port, lvid=lvid, + segmentation_id=segmentation_id, + distributed=distributed) + int_br.provision_local_vlan(port=int_port, lvid=lvid, + segmentation_id=segmentation_id) + def provision_local_vlan(self, net_uuid, network_type, physical_network, segmentation_id, local_vlan=None): '''Provisions a local VLAN. @@ -554,28 +564,20 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, if network_type in constants.TUNNEL_NETWORK_TYPES: if self.enable_tunneling: # outbound broadcast/multicast - ofports = _ofport_set_to_str( - self.tun_br_ofports[network_type].values()) + ofports = self.tun_br_ofports[network_type].values() if ofports: - self.tun_br.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=lvid, - actions="strip_vlan," - "set_tunnel:%s,output:%s" % - (segmentation_id, ofports)) + self.tun_br.install_flood_to_tun(lvid, + segmentation_id, + ofports) # inbound from tunnels: set lvid in the right table # and resubmit to Table LEARN_FROM_TUN for mac learning if self.enable_distributed_routing: self.dvr_agent.process_tunneled_network( network_type, lvid, segmentation_id) else: - self.tun_br.add_flow( - table=constants.TUN_TABLE[network_type], - priority=1, - tun_id=segmentation_id, - actions="mod_vlan_vid:%s," - "resubmit(,%s)" % - (lvid, constants.LEARN_FROM_TUN)) - + self.tun_br.provision_local_vlan( + network_type=network_type, lvid=lvid, + segmentation_id=segmentation_id) else: LOG.error(_LE("Cannot provision %(network_type)s network for " "net-id=%(net_uuid)s - tunneling disabled"), @@ -583,18 +585,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, 'net_uuid': net_uuid}) elif network_type == p_const.TYPE_FLAT: if physical_network in self.phys_brs: - # outbound - br = self.phys_brs[physical_network] - br.add_flow(priority=4, - in_port=self.phys_ofports[physical_network], - dl_vlan=lvid, - actions="strip_vlan,normal") - # inbound - self.int_br.add_flow( - priority=3, - in_port=self.int_ofports[physical_network], - dl_vlan=0xffff, - actions="mod_vlan_vid:%s,normal" % lvid) + self._local_vlan_for_flat(lvid, physical_network) else: LOG.error(_LE("Cannot provision flat network for " "net-id=%(net_uuid)s - no bridge for " @@ -603,26 +594,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, 'physical_network': physical_network}) elif network_type == p_const.TYPE_VLAN: if physical_network in self.phys_brs: - # outbound - br = self.phys_brs[physical_network] - if self.enable_distributed_routing: - br.add_flow(table=constants.LOCAL_VLAN_TRANSLATION, - priority=4, - in_port=self.phys_ofports[physical_network], - dl_vlan=lvid, - actions="mod_vlan_vid:%s,normal" % segmentation_id) - else: - br.add_flow(priority=4, - in_port=self.phys_ofports[physical_network], - dl_vlan=lvid, - actions="mod_vlan_vid:%s,normal" % segmentation_id) - - # inbound - self.int_br.add_flow(priority=3, - in_port=self. - int_ofports[physical_network], - dl_vlan=segmentation_id, - actions="mod_vlan_vid:%s,normal" % lvid) + self._local_vlan_for_vlan(lvid, physical_network, + segmentation_id) else: LOG.error(_LE("Cannot provision VLAN network for " "net-id=%(net_uuid)s - no bridge for " @@ -654,10 +627,12 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: if self.enable_tunneling: - self.tun_br.delete_flows( - table=constants.TUN_TABLE[lvm.network_type], - tun_id=lvm.segmentation_id) - self.tun_br.delete_flows(dl_vlan=lvm.vlan) + self.tun_br.reclaim_local_vlan( + network_type=lvm.network_type, + segmentation_id=lvm.segmentation_id) + self.tun_br.delete_flood_to_tun(lvm.vlan) + self.tun_br.delete_unicast_to_tun(lvm.vlan, None) + self.tun_br.delete_arp_responder(lvm.vlan, None) if self.l2_pop: # Try to remove tunnel ports if not used by other networks for ofport in lvm.tun_ofports: @@ -667,24 +642,26 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, if lvm.physical_network in self.phys_brs: # outbound br = self.phys_brs[lvm.physical_network] - br.delete_flows(in_port=self.phys_ofports[lvm. - physical_network], - dl_vlan=lvm.vlan) + br.reclaim_local_vlan( + port=self.phys_ofports[lvm.physical_network], + lvid=lvm.vlan) # inbound br = self.int_br - br.delete_flows(in_port=self.int_ofports[lvm.physical_network], - dl_vlan=0xffff) + br.reclaim_local_vlan( + port=self.int_ofports[lvm.physical_network], + segmentation_id=None) elif lvm.network_type == p_const.TYPE_VLAN: if lvm.physical_network in self.phys_brs: # outbound br = self.phys_brs[lvm.physical_network] - br.delete_flows(in_port=self.phys_ofports[lvm. - physical_network], - dl_vlan=lvm.vlan) + br.reclaim_local_vlan( + port=self.phys_ofports[lvm.physical_network], + lvid=lvm.vlan) # inbound br = self.int_br - br.delete_flows(in_port=self.int_ofports[lvm.physical_network], - dl_vlan=lvm.segmentation_id) + br.reclaim_local_vlan( + port=self.int_ofports[lvm.physical_network], + segmentation_id=lvm.segmentation_id) elif lvm.network_type == p_const.TYPE_LOCAL: # no flows needed for local networks pass @@ -746,6 +723,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.int_br.set_db_attribute( "Port", port.port_name, "tag", lvm.vlan) if port.ofport != -1: + # NOTE(yamamoto): Remove possible drop_port flow + # installed by port_dead. self.int_br.delete_flows(in_port=port.ofport) # update plugin about port status @@ -766,43 +745,22 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, @staticmethod def setup_arp_spoofing_protection(bridge, vif, port_details): # clear any previous flows related to this port in our ARP table - bridge.delete_flows(table=constants.LOCAL_SWITCHING, - in_port=vif.ofport, proto='arp') - bridge.delete_flows(table=constants.ARP_SPOOF_TABLE, - in_port=vif.ofport) + bridge.delete_arp_spoofing_protection(port=vif.ofport) if not port_details.get('port_security_enabled', True): LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because " "it has port security disabled"), vif.port_name) return - # all of the rules here are based on 'in_port' match criteria - # so their cleanup will be handled by 'update_stale_ofport_rules' - # collect all of the addresses and cidrs that belong to the port - addresses = [f['ip_address'] for f in port_details['fixed_ips']] + addresses = {f['ip_address'] for f in port_details['fixed_ips']} if port_details.get('allowed_address_pairs'): - addresses += [p['ip_address'] - for p in port_details['allowed_address_pairs']] + addresses |= {p['ip_address'] + for p in port_details['allowed_address_pairs']} - # allow ARPs as long as they match addresses that actually - # belong to the port. - for ip in addresses: - if netaddr.IPNetwork(ip).version != 4: - continue - bridge.add_flow(table=constants.ARP_SPOOF_TABLE, priority=2, - proto='arp', arp_spa=ip, in_port=vif.ofport, - actions="NORMAL") + addresses = {ip for ip in addresses + if netaddr.IPNetwork(ip).version == 4} - # drop any ARPs in this table that aren't explicitly allowed - bridge.add_flow(table=constants.ARP_SPOOF_TABLE, priority=1, - proto='arp', actions="DROP") - - # Now that the rules are ready, direct ARP traffic from the port into - # the anti-spoof table. - # This strategy fails gracefully because OVS versions that can't match - # on ARP headers will just process traffic normally. - bridge.add_flow(table=constants.LOCAL_SWITCHING, - priority=10, proto='arp', in_port=vif.ofport, - actions=("resubmit(,%s)" % constants.ARP_SPOOF_TABLE)) + bridge.install_arp_spoofing_protection(port=vif.ofport, + ip_addresses=addresses) def port_unbound(self, vif_id, net_uuid=None): '''Unbind port. @@ -841,8 +799,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, if cur_tag != DEAD_VLAN_TAG: self.int_br.set_db_attribute("Port", port.port_name, "tag", DEAD_VLAN_TAG) - self.int_br.add_flow(priority=2, in_port=port.ofport, - actions="drop") + self.int_br.drop_port(in_port=port.ofport) def setup_integration_br(self): '''Setup the integration bridge. @@ -855,14 +812,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # which does nothing if bridge already exists. self.int_br.create() self.int_br.set_secure_mode() + self.int_br.setup_controllers(cfg.CONF) self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) - self.int_br.remove_all_flows() - # switch all traffic using L2 learning - self.int_br.add_flow(priority=1, actions="normal") - # Add a canary flow to int_br to track OVS restarts - self.int_br.add_flow(table=constants.CANARY_TABLE, priority=0, - actions="drop") + + self.int_br.setup_default_table() def setup_ancillary_bridges(self, integ_br, tun_br): '''Setup ancillary bridges - for example br-ex.''' @@ -899,9 +853,10 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, :param tun_br_name: the name of the tunnel bridge. ''' if not self.tun_br: - self.tun_br = ovs_lib.OVSBridge(tun_br_name) + self.tun_br = self.br_tun_cls(tun_br_name) self.tun_br.reset_bridge(secure_mode=True) + self.tun_br.setup_controllers(cfg.CONF) self.patch_tun_ofport = self.int_br.add_patch_port( cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) self.patch_int_ofport = self.tun_br.add_patch_port( @@ -913,83 +868,15 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, "version of OVS does not support tunnels or patch " "ports. Agent terminated!")) exit(1) - self.tun_br.remove_all_flows() + self.tun_br.delete_flows() def setup_tunnel_br(self): '''Setup the tunnel bridge. Add all flows to the tunnel bridge. ''' - # Table 0 (default) will sort incoming traffic depending on in_port - self.tun_br.add_flow(priority=1, - in_port=self.patch_int_ofport, - actions="resubmit(,%s)" % - constants.PATCH_LV_TO_TUN) - self.tun_br.add_flow(priority=0, actions="drop") - if self.arp_responder_enabled: - # ARP broadcast-ed request go to the local ARP_RESPONDER table to - # be locally resolved - self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=1, - proto='arp', - dl_dst="ff:ff:ff:ff:ff:ff", - actions=("resubmit(,%s)" % - constants.ARP_RESPONDER)) - # PATCH_LV_TO_TUN table will handle packets coming from patch_int - # unicasts go to table UCAST_TO_TUN where remote addresses are learnt - self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=0, - dl_dst="00:00:00:00:00:00/01:00:00:00:00:00", - actions="resubmit(,%s)" % constants.UCAST_TO_TUN) - # Broadcasts/multicasts go to table FLOOD_TO_TUN that handles flooding - self.tun_br.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=0, - dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", - actions="resubmit(,%s)" % constants.FLOOD_TO_TUN) - # Tables [tunnel_type]_TUN_TO_LV will set lvid depending on tun_id - # for each tunnel type, and resubmit to table LEARN_FROM_TUN where - # remote mac addresses will be learnt - for tunnel_type in constants.TUNNEL_NETWORK_TYPES: - self.tun_br.add_flow(table=constants.TUN_TABLE[tunnel_type], - priority=0, - actions="drop") - # LEARN_FROM_TUN table will have a single flow using a learn action to - # dynamically set-up flows in UCAST_TO_TUN corresponding to remote mac - # addresses (assumes that lvid has already been set by a previous flow) - learned_flow = ("table=%s," - "priority=1," - "hard_timeout=300," - "NXM_OF_VLAN_TCI[0..11]," - "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," - "load:0->NXM_OF_VLAN_TCI[]," - "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," - "output:NXM_OF_IN_PORT[]" % - constants.UCAST_TO_TUN) - # Once remote mac addresses are learnt, output packet to patch_int - self.tun_br.add_flow(table=constants.LEARN_FROM_TUN, - priority=1, - actions="learn(%s),output:%s" % - (learned_flow, self.patch_int_ofport)) - # Egress unicast will be handled in table UCAST_TO_TUN, where remote - # mac addresses will be learned. For now, just add a default flow that - # will resubmit unknown unicasts to table FLOOD_TO_TUN to treat them - # as broadcasts/multicasts - self.tun_br.add_flow(table=constants.UCAST_TO_TUN, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN) - if self.arp_responder_enabled: - # If none of the ARP entries correspond to the requested IP, the - # broadcast-ed packet is resubmitted to the flooding table - self.tun_br.add_flow(table=constants.ARP_RESPONDER, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN) - # FLOOD_TO_TUN will handle flooding in tunnels based on lvid, - # for now, add a default drop action - self.tun_br.add_flow(table=constants.FLOOD_TO_TUN, - priority=0, - actions="drop") + self.tun_br.setup_default_table(self.patch_int_ofport, + self.arp_responder_enabled) def get_peer_name(self, prefix, name): """Construct a peer name based on the prefix and name. @@ -1041,9 +928,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, {'physical_network': physical_network, 'bridge': bridge}) sys.exit(1) - br = ovs_lib.OVSBridge(bridge) - br.remove_all_flows() - br.add_flow(priority=1, actions="normal") + br = self.br_phys_cls(bridge) + br.setup_controllers(cfg.CONF) + br.setup_default_table() self.phys_brs[physical_network] = br # interconnect physical and integration bridges using veth/patchs @@ -1076,9 +963,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.phys_ofports[physical_network] = phys_ofport # block all untranslated traffic between bridges - self.int_br.add_flow(priority=2, in_port=int_ofport, - actions="drop") - br.add_flow(priority=2, in_port=phys_ofport, actions="drop") + self.int_br.drop_port(in_port=int_ofport) + br.drop_port(in_port=phys_ofport) if self.use_veth_interconnection: # enable veth to pass traffic @@ -1114,7 +1000,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # delete any stale rules based on removed ofports ofports_deleted = set(previous.values()) - set(current.values()) for ofport in ofports_deleted: - self.int_br.delete_flows(in_port=ofport) + self.int_br.delete_arp_spoofing_protection(port=ofport) # store map for next iteration self.vifname_to_ofport_map = current @@ -1237,20 +1123,16 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.tun_br_ofports[tunnel_type][remote_ip] = ofport # Add flow in default table to resubmit to the right # tunnelling table (lvid will be set in the latter) - br.add_flow(priority=1, - in_port=ofport, - actions="resubmit(,%s)" % - constants.TUN_TABLE[tunnel_type]) + br.setup_tunnel_port(tunnel_type, ofport) - ofports = _ofport_set_to_str(self.tun_br_ofports[tunnel_type].values()) + ofports = self.tun_br_ofports[tunnel_type].values() if ofports and not self.l2_pop: # Update flooding flows to include the new tunnel for vlan_mapping in list(self.local_vlan_map.values()): if vlan_mapping.network_type == tunnel_type: - br.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=vlan_mapping.vlan, - actions="strip_vlan,set_tunnel:%s,output:%s" % - (vlan_mapping.segmentation_id, ofports)) + br.install_flood_to_tun(vlan_mapping.vlan, + vlan_mapping.segmentation_id, + ofports) return ofport def setup_tunnel_port(self, br, remote_ip, network_type): @@ -1276,7 +1158,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, port_name = '%s-%s' % (tunnel_type, self.get_ip_in_hex(remote_ip)) br.delete_port(port_name) - br.delete_flows(in_port=ofport) + br.cleanup_tunnel_port(ofport) self.tun_br_ofports[tunnel_type].pop(remote_ip, None) def treat_devices_added_or_updated(self, devices, ovs_restarted): @@ -1521,18 +1403,14 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, def check_ovs_status(self): # Check for the canary flow - canary_flow = self.int_br.dump_flows_for_table(constants.CANARY_TABLE) - if canary_flow == '': + status = self.int_br.check_canary_table() + if status == constants.OVS_RESTARTED: LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset " "bridges and recover ports.")) - return constants.OVS_RESTARTED - elif canary_flow is None: + elif status == constants.OVS_DEAD: LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running " "and checking OVS status periodically.")) - return constants.OVS_DEAD - else: - # OVS is in normal status - return constants.OVS_NORMAL + return status def loop_count_and_wait(self, start_time, port_stats): # sleep till end of polling interval @@ -1688,6 +1566,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.loop_count_and_wait(start, port_stats) def daemon_loop(self): + # Start everything. + LOG.info(_LI("Agent initialized successfully, now running... ")) + signal.signal(signal.SIGTERM, self._handle_sigterm) with polling.get_polling_manager( self.minimize_polling, self.ovsdb_monitor_respawn_interval) as pm: @@ -1713,10 +1594,6 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, "Agent and Server side.")) -def _ofport_set_to_str(ofport_set): - return ",".join(map(str, ofport_set)) - - def create_agent_config_map(config): """Create a map of agent config parameters. @@ -1757,36 +1634,25 @@ def create_agent_config_map(config): return kwargs -def main(): - cfg.CONF.register_opts(ip_lib.OPTS) - config.register_root_helper(cfg.CONF) - common_config.init(sys.argv[1:]) - common_config.setup_logging() - q_utils.log_opt_values(LOG) - - try: - agent_config = create_agent_config_map(cfg.CONF) - except ValueError as e: - LOG.error(_LE('%s Agent terminated!'), e) - sys.exit(1) - +def prepare_xen_compute(): is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper if is_xen_compute_host: # Force ip_lib to always use the root helper to ensure that ip # commands target xen dom0 rather than domU. + cfg.CONF.register_opts(ip_lib.OPTS) cfg.CONF.set_default('ip_lib_force_root', True) + +def main(bridge_classes): try: - agent = OVSNeutronAgent(**agent_config) + agent_config = create_agent_config_map(cfg.CONF) + except ValueError: + LOG.exception(_LE("Agent failed to create agent config map")) + raise SystemExit(1) + prepare_xen_compute() + try: + agent = OVSNeutronAgent(bridge_classes, **agent_config) except RuntimeError as e: LOG.error(_LE("%s Agent terminated!"), e) sys.exit(1) - signal.signal(signal.SIGTERM, agent._handle_sigterm) - - # Start everything. - LOG.info(_LI("Agent initialized successfully, now running... ")) agent.daemon_loop() - - -if __name__ == "__main__": - main() diff --git a/neutron/plugins/openvswitch/common/config.py b/neutron/plugins/openvswitch/common/config.py index 887a6c5cb86..cbde4173338 100644 --- a/neutron/plugins/openvswitch/common/config.py +++ b/neutron/plugins/openvswitch/common/config.py @@ -44,6 +44,8 @@ ovs_opts = [ cfg.BoolOpt('use_veth_interconnection', default=False, help=_("Use veths instead of patch ports to interconnect the " "integration bridge to physical bridges.")), + cfg.StrOpt('of_interface', default='ovs-ofctl', choices=['ovs-ofctl'], + help=_("OpenFlow interface to use.")), ] agent_opts = [ diff --git a/neutron/tests/functional/agent/test_ovs_flows.py b/neutron/tests/functional/agent/test_ovs_flows.py index 73719d1c448..bf9936d633a 100644 --- a/neutron/tests/functional/agent/test_ovs_flows.py +++ b/neutron/tests/functional/agent/test_ovs_flows.py @@ -13,9 +13,16 @@ # License for the specific language governing permissions and limitations # under the License. +import eventlet +import mock + +from oslo_config import cfg +from oslo_utils import importutils + from neutron.agent.linux import ip_lib from neutron.cmd.sanity import checks from neutron.plugins.openvswitch.agent import ovs_neutron_agent as ovsagt +from neutron.plugins.openvswitch.common import constants from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent import test_ovs_lib @@ -23,16 +30,68 @@ from neutron.tests.functional import base from neutron.tests import tools -class ARPSpoofTestCase(test_ovs_lib.OVSBridgeTestBase, - base.BaseSudoTestCase): +cfg.CONF.import_group('OVS', 'neutron.plugins.openvswitch.common.config') + +class _OVSAgentTestBase(test_ovs_lib.OVSBridgeTestBase, + base.BaseSudoTestCase): + def setUp(self): + super(_OVSAgentTestBase, self).setUp() + self.br = self.useFixture(net_helpers.OVSBridgeFixture()).bridge + self.of_interface_mod = importutils.import_module(self._MAIN_MODULE) + self.br_int_cls = None + self.br_tun_cls = None + self.br_phys_cls = None + self.br_int = None + self.init_done = False + self.init_done_ev = eventlet.event.Event() + self._main_thread = eventlet.spawn(self._kick_main) + self.addCleanup(self._kill_main) + + # Wait for _kick_main -> of_interface main -> _agent_main + # NOTE(yamamoto): This complexity came from how "native" of_interface + # runs its openflow controller. "native" of_interface's main routine + # blocks while running the embedded openflow controller. In that case, + # the agent rpc_loop runs in another thread. However, for FT we need + # to run setUp() and test_xxx() in the same thread. So I made this + # run of_interface's main in a separate thread instead. + while not self.init_done: + self.init_done_ev.wait() + + def _kick_main(self): + with mock.patch.object(ovsagt, 'main', self._agent_main): + self.of_interface_mod.main() + + def _kill_main(self): + self._main_thread.kill() + self._main_thread.wait() + + def _agent_main(self, bridge_classes): + self.br_int_cls = bridge_classes['br_int'] + self.br_phys_cls = bridge_classes['br_phys'] + self.br_tun_cls = bridge_classes['br_tun'] + self.br_int = self.br_int_cls(self.br.br_name) + self.br_int.set_secure_mode() + self.br_int.setup_controllers(cfg.CONF) + self.br_int.setup_default_table() + + # signal to setUp() + self.init_done = True + self.init_done_ev.send() + + +class _OVSAgentOFCtlTestBase(_OVSAgentTestBase): + _MAIN_MODULE = 'neutron.plugins.openvswitch.agent.openflow.ovs_ofctl.main' + + +class _ARPSpoofTestCase(object): def setUp(self): if not checks.arp_header_match_supported(): self.skipTest("ARP header matching not supported") # NOTE(kevinbenton): it would be way cooler to use scapy for # these but scapy requires the python process to be running as # root to bind to the ports. - super(ARPSpoofTestCase, self).setUp() + super(_ARPSpoofTestCase, self).setUp() self.src_addr = '192.168.0.1' self.dst_addr = '192.168.0.2' self.src_namespace = self.useFixture( @@ -120,4 +179,22 @@ class ARPSpoofTestCase(test_ovs_lib.OVSBridgeTestBase, 'allowed_address_pairs': [ dict(ip_address=ip) for ip in addrs]} ovsagt.OVSNeutronAgent.setup_arp_spoofing_protection( - self.br, VifPort(), details) + self.br_int, VifPort(), details) + + +class ARPSpoofOFCtlTestCase(_ARPSpoofTestCase, _OVSAgentOFCtlTestBase): + pass + + +class _CanaryTableTestCase(object): + def test_canary_table(self): + self.br_int.delete_flows() + self.assertEqual(constants.OVS_RESTARTED, + self.br_int.check_canary_table()) + self.br_int.setup_canary_table() + self.assertEqual(constants.OVS_NORMAL, + self.br_int.check_canary_table()) + + +class CanaryTableOFCtlTestCase(_CanaryTableTestCase, _OVSAgentOFCtlTestBase): + pass diff --git a/neutron/tests/unit/plugins/openvswitch/agent/openflow/__init__.py b/neutron/tests/unit/plugins/openvswitch/agent/openflow/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/__init__.py b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py new file mode 100644 index 00000000000..5d071fe3bbd --- /dev/null +++ b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/ovs_bridge_test_base.py @@ -0,0 +1,160 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.tests.unit.plugins.openvswitch.agent import ovs_test_base + + +call = mock.call # short hand + + +class OVSBridgeTestBase(ovs_test_base.OVSOFCtlTestBase): + def setup_bridge_mock(self, name, cls): + self.br = cls(name) + mock_add_flow = mock.patch.object(self.br, 'add_flow').start() + mock_mod_flow = mock.patch.object(self.br, 'mod_flow').start() + mock_delete_flows = mock.patch.object(self.br, 'delete_flows').start() + self.mock = mock.Mock() + self.mock.attach_mock(mock_add_flow, 'add_flow') + self.mock.attach_mock(mock_mod_flow, 'mod_flow') + self.mock.attach_mock(mock_delete_flows, 'delete_flows') + + def test_drop_port(self): + in_port = 2345 + self.br.drop_port(in_port=in_port) + expected = [ + call.add_flow(priority=2, table=0, actions='drop', + in_port=in_port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_goto(self): + dest_table_id = 123 + priority = 99 + in_port = 666 + self.br.install_goto(dest_table_id=dest_table_id, + priority=priority, in_port=in_port) + expected = [ + call.add_flow(priority=priority, table=0, + actions='resubmit(,%s)' % dest_table_id, + in_port=in_port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_drop(self): + priority = 99 + in_port = 666 + self.br.install_drop(priority=priority, in_port=in_port) + expected = [ + call.add_flow(priority=priority, table=0, + actions='drop', + in_port=in_port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_normal(self): + priority = 99 + in_port = 666 + self.br.install_normal(priority=priority, in_port=in_port) + expected = [ + call.add_flow(priority=priority, table=0, + actions='normal', + in_port=in_port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + +class OVSDVRProcessTestMixin(object): + def test_install_dvr_process_ipv4(self): + vlan_tag = 999 + gateway_ip = '192.0.2.1' + self.br.install_dvr_process_ipv4(vlan_tag=vlan_tag, + gateway_ip=gateway_ip) + expected = [ + call.add_flow(table=self.dvr_process_table_id, + proto='arp', nw_dst=gateway_ip, actions='drop', + priority=3, dl_vlan=vlan_tag), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_dvr_process_ipv4(self): + vlan_tag = 999 + gateway_ip = '192.0.2.1' + self.br.delete_dvr_process_ipv4(vlan_tag=vlan_tag, + gateway_ip=gateway_ip) + expected = [ + call.delete_flows(table=self.dvr_process_table_id, + dl_vlan=vlan_tag, proto='arp', + nw_dst=gateway_ip), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_dvr_process_ipv6(self): + vlan_tag = 999 + gateway_mac = '08:60:6e:7f:74:e7' + self.br.install_dvr_process_ipv6(vlan_tag=vlan_tag, + gateway_mac=gateway_mac) + expected = [ + call.add_flow(table=self.dvr_process_table_id, + proto='icmp6', dl_src=gateway_mac, actions='drop', + priority=3, dl_vlan=vlan_tag), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_dvr_process_ipv6(self): + vlan_tag = 999 + gateway_mac = '08:60:6e:7f:74:e7' + self.br.delete_dvr_process_ipv6(vlan_tag=vlan_tag, + gateway_mac=gateway_mac) + expected = [ + call.delete_flows(table=self.dvr_process_table_id, + dl_vlan=vlan_tag, dl_src=gateway_mac, + proto='icmp6'), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_dvr_process(self): + vlan_tag = 999 + vif_mac = '00:0e:0c:5e:95:d0' + dvr_mac_address = 'f2:0b:a4:5b:b2:ab' + self.br.install_dvr_process(vlan_tag=vlan_tag, + vif_mac=vif_mac, + dvr_mac_address=dvr_mac_address) + expected = [ + call.add_flow(priority=2, table=self.dvr_process_table_id, + dl_dst=vif_mac, dl_vlan=vlan_tag, actions='drop'), + call.add_flow(priority=1, table=self.dvr_process_table_id, + dl_vlan=vlan_tag, dl_src=vif_mac, + actions='mod_dl_src:%(mac)s,resubmit(,%(next)s)' % { + 'mac': dvr_mac_address, + 'next': self.dvr_process_next_table_id, + }), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_dvr_process(self): + vlan_tag = 999 + vif_mac = '00:0e:0c:5e:95:d0' + self.br.delete_dvr_process(vlan_tag=vlan_tag, + vif_mac=vif_mac) + expected = [ + call.delete_flows(table=self.dvr_process_table_id, + dl_dst=vif_mac, dl_vlan=vlan_tag), + call.delete_flows(table=self.dvr_process_table_id, + dl_vlan=vlan_tag, dl_src=vif_mac), + ] + self.assertEqual(expected, self.mock.mock_calls) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py new file mode 100644 index 00000000000..27cf9de3f0a --- /dev/null +++ b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_int.py @@ -0,0 +1,213 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.tests.unit.plugins.openvswitch.agent.openflow.ovs_ofctl \ + import ovs_bridge_test_base + + +call = mock.call # short hand + + +class OVSIntegrationBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase): + def setUp(self): + super(OVSIntegrationBridgeTest, self).setUp() + self.setup_bridge_mock('br-int', self.br_int_cls) + + def test_setup_default_table(self): + self.br.setup_default_table() + expected = [ + call.delete_flows(), + call.add_flow(priority=0, table=0, actions='normal'), + call.add_flow(priority=0, table=23, actions='drop'), + call.add_flow(priority=0, table=24, actions='drop'), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_provision_local_vlan(self): + port = 999 + lvid = 888 + segmentation_id = 777 + self.br.provision_local_vlan(port=port, lvid=lvid, + segmentation_id=segmentation_id) + expected = [ + call.add_flow(priority=3, dl_vlan=segmentation_id, + in_port=port, + actions='mod_vlan_vid:%s,normal' % lvid), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_provision_local_vlan_novlan(self): + port = 999 + lvid = 888 + segmentation_id = None + self.br.provision_local_vlan(port=port, lvid=lvid, + segmentation_id=segmentation_id) + expected = [ + call.add_flow(priority=3, dl_vlan=0xffff, + in_port=port, + actions='mod_vlan_vid:%s,normal' % lvid), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_reclaim_local_vlan(self): + port = 999 + segmentation_id = 777 + self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id) + expected = [ + call.delete_flows(dl_vlan=segmentation_id, in_port=port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_reclaim_local_vlan_novlan(self): + port = 999 + segmentation_id = None + self.br.reclaim_local_vlan(port=port, segmentation_id=segmentation_id) + expected = [ + call.delete_flows(dl_vlan=0xffff, in_port=port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_dvr_to_src_mac(self): + network_type = 'vxlan' + vlan_tag = 1111 + gateway_mac = '08:60:6e:7f:74:e7' + dst_mac = '00:02:b3:13:fe:3d' + dst_port = 6666 + self.br.install_dvr_to_src_mac(network_type=network_type, + vlan_tag=vlan_tag, + gateway_mac=gateway_mac, + dst_mac=dst_mac, + dst_port=dst_port) + expected = [ + call.add_flow(priority=4, table=1, dl_dst=dst_mac, + dl_vlan=vlan_tag, + actions='strip_vlan,mod_dl_src:%(mac)s,' + 'output:%(port)s' % { + 'mac': gateway_mac, + 'port': dst_port, + }), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_dvr_to_src_mac(self): + network_type = 'vxlan' + vlan_tag = 1111 + dst_mac = '00:02:b3:13:fe:3d' + self.br.delete_dvr_to_src_mac(network_type=network_type, + vlan_tag=vlan_tag, + dst_mac=dst_mac) + expected = [ + call.delete_flows(table=1, dl_dst=dst_mac, dl_vlan=vlan_tag), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_dvr_to_src_mac_vlan(self): + network_type = 'vlan' + vlan_tag = 1111 + gateway_mac = '08:60:6e:7f:74:e7' + dst_mac = '00:02:b3:13:fe:3d' + dst_port = 6666 + self.br.install_dvr_to_src_mac(network_type=network_type, + vlan_tag=vlan_tag, + gateway_mac=gateway_mac, + dst_mac=dst_mac, + dst_port=dst_port) + expected = [ + call.add_flow(priority=4, table=2, dl_dst=dst_mac, + dl_vlan=vlan_tag, + actions='strip_vlan,mod_dl_src:%(mac)s,' + 'output:%(port)s' % { + 'mac': gateway_mac, + 'port': dst_port, + }), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_dvr_to_src_mac_vlan(self): + network_type = 'vlan' + vlan_tag = 1111 + dst_mac = '00:02:b3:13:fe:3d' + self.br.delete_dvr_to_src_mac(network_type=network_type, + vlan_tag=vlan_tag, + dst_mac=dst_mac) + expected = [ + call.delete_flows(table=2, dl_dst=dst_mac, dl_vlan=vlan_tag), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_add_dvr_mac_vlan(self): + mac = '00:02:b3:13:fe:3d' + port = 8888 + self.br.add_dvr_mac_vlan(mac=mac, port=port) + expected = [ + call.add_flow(priority=4, table=0, actions='resubmit(,2)', + dl_src=mac, in_port=port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_remove_dvr_mac_vlan(self): + mac = '00:02:b3:13:fe:3d' + self.br.remove_dvr_mac_vlan(mac=mac) + expected = [ + call.delete_flows(eth_src=mac, table_id=0), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_add_dvr_mac_tun(self): + mac = '00:02:b3:13:fe:3d' + port = 8888 + self.br.add_dvr_mac_tun(mac=mac, port=port) + expected = [ + call.add_flow(priority=2, table=0, actions='resubmit(,1)', + dl_src=mac, in_port=port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_remove_dvr_mac_tun(self): + mac = '00:02:b3:13:fe:3d' + port = 8888 + self.br.remove_dvr_mac_tun(mac=mac, port=port) + expected = [ + call.delete_flows(eth_src=mac, table_id=0, in_port=port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_arp_spoofing_protection(self): + port = 8888 + ip_addresses = ['192.0.2.1', '192.0.2.2/32'] + self.br.install_arp_spoofing_protection(port, ip_addresses) + expected = [ + call.add_flow(proto='arp', actions='normal', + arp_spa='192.0.2.1', + priority=2, table=24, in_port=8888), + call.add_flow(proto='arp', actions='normal', + arp_spa='192.0.2.2/32', + priority=2, table=24, in_port=8888), + call.add_flow(priority=10, table=0, in_port=8888, + actions='resubmit(,24)', proto='arp') + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_arp_spoofing_protection(self): + port = 8888 + self.br.delete_arp_spoofing_protection(port) + expected = [ + call.delete_flows(table_id=0, in_port=8888, proto='arp'), + call.delete_flows(table_id=24, in_port=8888), + ] + self.assertEqual(expected, self.mock.mock_calls) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py new file mode 100644 index 00000000000..ae8753afcff --- /dev/null +++ b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_phys.py @@ -0,0 +1,97 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +import neutron.plugins.openvswitch.common.constants as ovs_const +from neutron.tests.unit.plugins.openvswitch.agent.openflow.ovs_ofctl \ + import ovs_bridge_test_base + + +call = mock.call # short hand + + +class OVSPhysicalBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, + ovs_bridge_test_base.OVSDVRProcessTestMixin): + dvr_process_table_id = ovs_const.DVR_PROCESS_VLAN + dvr_process_next_table_id = ovs_const.LOCAL_VLAN_TRANSLATION + + def setUp(self): + super(OVSPhysicalBridgeTest, self).setUp() + self.setup_bridge_mock('br-phys', self.br_phys_cls) + + def test_setup_default_table(self): + self.br.setup_default_table() + expected = [ + call.delete_flows(), + call.add_flow(priority=0, table=0, actions='normal'), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_provision_local_vlan(self): + port = 999 + lvid = 888 + segmentation_id = 777 + distributed = False + self.br.provision_local_vlan(port=port, lvid=lvid, + segmentation_id=segmentation_id, + distributed=distributed) + expected = [ + call.add_flow(priority=4, table=0, dl_vlan=lvid, in_port=port, + actions='mod_vlan_vid:%s,normal' % segmentation_id), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_provision_local_vlan_novlan(self): + port = 999 + lvid = 888 + segmentation_id = None + distributed = False + self.br.provision_local_vlan(port=port, lvid=lvid, + segmentation_id=segmentation_id, + distributed=distributed) + expected = [ + call.add_flow(priority=4, table=0, dl_vlan=lvid, in_port=port, + actions='strip_vlan,normal') + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_reclaim_local_vlan(self): + port = 999 + lvid = 888 + self.br.reclaim_local_vlan(port=port, lvid=lvid) + expected = [ + call.delete_flows(dl_vlan=lvid, in_port=port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_add_dvr_mac_vlan(self): + mac = '00:02:b3:13:fe:3d' + port = 8888 + self.br.add_dvr_mac_vlan(mac=mac, port=port) + expected = [ + call.add_flow(priority=2, table=3, dl_src=mac, + actions='output:%s' % port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_remove_dvr_mac_vlan(self): + mac = '00:02:b3:13:fe:3d' + self.br.remove_dvr_mac_vlan(mac=mac) + expected = [ + call.delete_flows(eth_src=mac, table_id=3), + ] + self.assertEqual(expected, self.mock.mock_calls) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py new file mode 100644 index 00000000000..27a046d0a4b --- /dev/null +++ b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py @@ -0,0 +1,259 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +import netaddr + +import neutron.plugins.openvswitch.common.constants as ovs_const +from neutron.tests.unit.plugins.openvswitch.agent.openflow.ovs_ofctl \ + import ovs_bridge_test_base + + +call = mock.call # short hand + + +class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, + ovs_bridge_test_base.OVSDVRProcessTestMixin): + dvr_process_table_id = ovs_const.DVR_PROCESS + dvr_process_next_table_id = ovs_const.PATCH_LV_TO_TUN + + def setUp(self): + super(OVSTunnelBridgeTest, self).setUp() + self.setup_bridge_mock('br-tun', self.br_tun_cls) + + def test_setup_default_table(self): + patch_int_ofport = 5555 + arp_responder_enabled = False + self.br.setup_default_table(patch_int_ofport=patch_int_ofport, + arp_responder_enabled=arp_responder_enabled) + expected = [ + call.add_flow(priority=1, in_port=patch_int_ofport, + actions='resubmit(,2)'), + call.add_flow(priority=0, actions='drop'), + call.add_flow(priority=0, table=2, + dl_dst='00:00:00:00:00:00/01:00:00:00:00:00', + actions='resubmit(,20)'), + call.add_flow(priority=0, table=2, + dl_dst='01:00:00:00:00:00/01:00:00:00:00:00', + actions='resubmit(,22)'), + call.add_flow(priority=0, table=3, actions='drop'), + call.add_flow(priority=0, table=4, actions='drop'), + call.add_flow(priority=1, table=10, + actions='learn(table=20,priority=1,' + 'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],' + 'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],' + 'load:0->NXM_OF_VLAN_TCI[],' + 'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],' + 'output:NXM_OF_IN_PORT[]),' + 'output:%s' % patch_int_ofport), + call.add_flow(priority=0, table=20, actions='resubmit(,22)'), + call.add_flow(priority=0, table=22, actions='drop'), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_setup_default_table_arp_responder_enabled(self): + patch_int_ofport = 5555 + arp_responder_enabled = True + self.br.setup_default_table(patch_int_ofport=patch_int_ofport, + arp_responder_enabled=arp_responder_enabled) + expected = [ + call.add_flow(priority=1, in_port=patch_int_ofport, + actions='resubmit(,2)'), + call.add_flow(priority=0, actions='drop'), + call.add_flow(priority=1, table=2, dl_dst='ff:ff:ff:ff:ff:ff', + actions='resubmit(,21)', proto='arp'), + call.add_flow(priority=0, table=2, + dl_dst='00:00:00:00:00:00/01:00:00:00:00:00', + actions='resubmit(,20)'), + call.add_flow(priority=0, table=2, + dl_dst='01:00:00:00:00:00/01:00:00:00:00:00', + actions='resubmit(,22)'), + call.add_flow(priority=0, table=3, actions='drop'), + call.add_flow(priority=0, table=4, actions='drop'), + call.add_flow(priority=1, table=10, + actions='learn(table=20,priority=1,' + 'hard_timeout=300,NXM_OF_VLAN_TCI[0..11],' + 'NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[],' + 'load:0->NXM_OF_VLAN_TCI[],' + 'load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[],' + 'output:NXM_OF_IN_PORT[]),' + 'output:%s' % patch_int_ofport), + call.add_flow(priority=0, table=20, actions='resubmit(,22)'), + call.add_flow(priority=0, table=21, actions='resubmit(,22)'), + call.add_flow(priority=0, table=22, actions='drop'), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_provision_local_vlan(self): + network_type = 'vxlan' + lvid = 888 + segmentation_id = 777 + distributed = False + self.br.provision_local_vlan(network_type=network_type, lvid=lvid, + segmentation_id=segmentation_id, + distributed=distributed) + expected = [ + call.add_flow(priority=1, tun_id=segmentation_id, + actions='mod_vlan_vid:%s,resubmit(,10)' % lvid, + table=4), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_reclaim_local_vlan(self): + network_type = 'vxlan' + segmentation_id = 777 + self.br.reclaim_local_vlan(network_type=network_type, + segmentation_id=segmentation_id) + expected = [ + call.delete_flows(tun_id=segmentation_id, table=4), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_flood_to_tun(self): + vlan = 3333 + tun_id = 2222 + ports = [11, 44, 22, 33] + self.br.install_flood_to_tun(vlan=vlan, + tun_id=tun_id, + ports=ports) + expected = [ + call.mod_flow(table=22, dl_vlan=vlan, + actions='strip_vlan,set_tunnel:%(tun)s,' + 'output:%(ports)s' % { + 'tun': tun_id, + 'ports': ','.join(map(str, ports)), + }), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_flood_to_tun(self): + vlan = 3333 + self.br.delete_flood_to_tun(vlan=vlan) + expected = [ + call.delete_flows(table=22, dl_vlan=vlan), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_unicast_to_tun(self): + vlan = 3333 + port = 55 + mac = '08:60:6e:7f:74:e7' + tun_id = 2222 + self.br.install_unicast_to_tun(vlan=vlan, + tun_id=tun_id, + port=port, + mac=mac) + expected = [ + call.add_flow(priority=2, table=20, dl_dst=mac, dl_vlan=vlan, + actions='strip_vlan,set_tunnel:%(tun)s,' + 'output:%(port)s' % { + 'tun': tun_id, + 'port': port, + }), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_unicast_to_tun(self): + vlan = 3333 + mac = '08:60:6e:7f:74:e7' + self.br.delete_unicast_to_tun(vlan=vlan, mac=mac) + expected = [ + call.delete_flows(table=20, dl_dst=mac, dl_vlan=vlan), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_unicast_to_tun_without_mac(self): + vlan = 3333 + mac = None + self.br.delete_unicast_to_tun(vlan=vlan, mac=mac) + expected = [ + call.delete_flows(table=20, dl_vlan=vlan), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_install_arp_responder(self): + vlan = 3333 + ip = '192.0.2.1' + mac = '08:60:6e:7f:74:e7' + self.br.install_arp_responder(vlan=vlan, ip=ip, mac=mac) + expected = [ + call.add_flow(proto='arp', nw_dst=ip, + actions='move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],' + 'mod_dl_src:%(mac)s,load:0x2->NXM_OF_ARP_OP[],' + 'move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],' + 'move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],' + 'load:%(mac)#x->NXM_NX_ARP_SHA[],' + 'load:%(ip)#x->NXM_OF_ARP_SPA[],in_port' % { + 'mac': netaddr.EUI(mac, + dialect=netaddr.mac_unix), + 'ip': netaddr.IPAddress(ip), + }, + priority=1, table=21, dl_vlan=vlan), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_arp_responder(self): + vlan = 3333 + ip = '192.0.2.1' + self.br.delete_arp_responder(vlan=vlan, ip=ip) + expected = [ + call.delete_flows(table=21, dl_vlan=vlan, proto='arp', nw_dst=ip), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_delete_arp_responder_without_ip(self): + vlan = 3333 + ip = None + self.br.delete_arp_responder(vlan=vlan, ip=ip) + expected = [ + call.delete_flows(table=21, dl_vlan=vlan, proto='arp'), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_setup_tunnel_port(self): + network_type = 'vxlan' + port = 11111 + self.br.setup_tunnel_port(network_type=network_type, port=port) + expected = [ + call.add_flow(priority=1, in_port=port, actions='resubmit(,4)'), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_cleanup_tunnel_port(self): + port = 11111 + self.br.cleanup_tunnel_port(port=port) + expected = [ + call.delete_flows(in_port=port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_add_dvr_mac_tun(self): + mac = '00:02:b3:13:fe:3d' + port = 8888 + self.br.add_dvr_mac_tun(mac=mac, port=port) + expected = [ + call.add_flow(priority=1, table=9, dl_src=mac, + actions='output:%s' % port), + ] + self.assertEqual(expected, self.mock.mock_calls) + + def test_remove_dvr_mac_tun(self): + mac = '00:02:b3:13:fe:3d' + self.br.remove_dvr_mac_tun(mac=mac) + expected = [ + call.delete_flows(eth_src=mac, table_id=9), + ] + self.assertEqual(expected, self.mock.mock_calls) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/ovs_test_base.py b/neutron/tests/unit/plugins/openvswitch/agent/ovs_test_base.py new file mode 100644 index 00000000000..0932a1ea231 --- /dev/null +++ b/neutron/tests/unit/plugins/openvswitch/agent/ovs_test_base.py @@ -0,0 +1,54 @@ +# Copyright (C) 2014,2015 VA Linux Systems Japan K.K. +# Copyright (C) 2014 Fumihiko Kakuma +# Copyright (C) 2014,2015 YAMAMOTO Takashi +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import importutils + +from neutron.tests import base + + +_AGENT_PACKAGE = 'neutron.plugins.openvswitch.agent' +_AGENT_NAME = _AGENT_PACKAGE + '.ovs_neutron_agent' +_DVR_AGENT_NAME = 'neutron.plugins.openvswitch.agent.ovs_dvr_neutron_agent' + + +class OVSAgentConfigTestBase(base.BaseTestCase): + def setUp(self): + super(OVSAgentConfigTestBase, self).setUp() + self.mod_agent = importutils.import_module(_AGENT_NAME) + self.mod_dvr_agent = importutils.import_module(_DVR_AGENT_NAME) + + +class OVSAgentTestBase(OVSAgentConfigTestBase): + def setUp(self): + super(OVSAgentTestBase, self).setUp() + self.br_int_cls = importutils.import_class(self._BR_INT_CLASS) + self.br_phys_cls = importutils.import_class(self._BR_PHYS_CLASS) + self.br_tun_cls = importutils.import_class(self._BR_TUN_CLASS) + + def _bridge_classes(self): + return { + 'br_int': self.br_int_cls, + 'br_phys': self.br_phys_cls, + 'br_tun': self.br_tun_cls, + } + + +class OVSOFCtlTestBase(OVSAgentTestBase): + _DRIVER_PACKAGE = _AGENT_PACKAGE + '.openflow.ovs_ofctl' + _BR_INT_CLASS = _DRIVER_PACKAGE + '.br_int.OVSIntegrationBridge' + _BR_TUN_CLASS = _DRIVER_PACKAGE + '.br_tun.OVSTunnelBridge' + _BR_PHYS_CLASS = _DRIVER_PACKAGE + '.br_phys.OVSPhysicalBridge' diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index c21d7da93d9..fb068790c35 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -17,7 +17,6 @@ import sys import time import mock -import netaddr from oslo_config import cfg from oslo_log import log import oslo_messaging @@ -30,9 +29,8 @@ from neutron.agent.linux import ip_lib from neutron.common import constants as n_const from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc -from neutron.plugins.openvswitch.agent import ovs_neutron_agent from neutron.plugins.openvswitch.common import constants -from neutron.tests import base +from neutron.tests.unit.plugins.openvswitch.agent import ovs_test_base NOTIFIER = 'neutron.plugins.ml2.rpc.AgentNotifierApi' @@ -48,10 +46,10 @@ class FakeVif(object): port_name = 'name' -class CreateAgentConfigMap(base.BaseTestCase): +class CreateAgentConfigMap(ovs_test_base.OVSAgentConfigTestBase): def test_create_agent_config_map_succeeds(self): - self.assertTrue(ovs_neutron_agent.create_agent_config_map(cfg.CONF)) + self.assertTrue(self.mod_agent.create_agent_config_map(cfg.CONF)) def test_create_agent_config_map_fails_for_invalid_tunnel_config(self): # An ip address is required for tunneling but there is no default, @@ -59,29 +57,29 @@ class CreateAgentConfigMap(base.BaseTestCase): cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE], group='AGENT') with testtools.ExpectedException(ValueError): - ovs_neutron_agent.create_agent_config_map(cfg.CONF) + self.mod_agent.create_agent_config_map(cfg.CONF) cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN], group='AGENT') with testtools.ExpectedException(ValueError): - ovs_neutron_agent.create_agent_config_map(cfg.CONF) + self.mod_agent.create_agent_config_map(cfg.CONF) def test_create_agent_config_map_fails_no_local_ip(self): # An ip address is required for tunneling but there is no default cfg.CONF.set_override('tunnel_types', [p_const.TYPE_VXLAN], group='AGENT') with testtools.ExpectedException(ValueError): - ovs_neutron_agent.create_agent_config_map(cfg.CONF) + self.mod_agent.create_agent_config_map(cfg.CONF) def test_create_agent_config_map_fails_for_invalid_tunnel_type(self): cfg.CONF.set_override('tunnel_types', ['foobar'], group='AGENT') with testtools.ExpectedException(ValueError): - ovs_neutron_agent.create_agent_config_map(cfg.CONF) + self.mod_agent.create_agent_config_map(cfg.CONF) def test_create_agent_config_map_multiple_tunnel_types(self): cfg.CONF.set_override('local_ip', '10.10.10.10', group='OVS') cfg.CONF.set_override('tunnel_types', [p_const.TYPE_GRE, p_const.TYPE_VXLAN], group='AGENT') - cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF) self.assertEqual(cfgmap['tunnel_types'], [p_const.TYPE_GRE, p_const.TYPE_VXLAN]) @@ -90,11 +88,11 @@ class CreateAgentConfigMap(base.BaseTestCase): # Verify setting only enable_tunneling will default tunnel_type to GRE cfg.CONF.set_override('enable_distributed_routing', True, group='AGENT') - cfgmap = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + cfgmap = self.mod_agent.create_agent_config_map(cfg.CONF) self.assertEqual(cfgmap['enable_distributed_routing'], True) -class TestOvsNeutronAgent(base.BaseTestCase): +class TestOvsNeutronAgent(object): def setUp(self): super(TestOvsNeutronAgent, self).setUp() @@ -107,7 +105,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): group='SECURITYGROUP') cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT') cfg.CONF.set_default('prevent_arp_spoofing', False, 'AGENT') - kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) class MockFixedIntervalLoopingCall(object): def __init__(self, f): @@ -117,18 +115,11 @@ class TestOvsNeutronAgent(base.BaseTestCase): self.f() with contextlib.nested( - mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' - 'OVSNeutronAgent.setup_integration_br'), - mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' - 'OVSNeutronAgent.setup_ancillary_bridges', + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_integration_br'), + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_ancillary_bridges', return_value=[]), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'create'), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_secure_mode'), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'get_local_port_mac', - return_value='00:00:00:00:00:01'), mock.patch('neutron.agent.linux.utils.get_interface_mac', return_value='00:00:00:00:00:01'), mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), @@ -137,11 +128,12 @@ class TestOvsNeutronAgent(base.BaseTestCase): new=MockFixedIntervalLoopingCall), mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[])): - self.agent = ovs_neutron_agent.OVSNeutronAgent(**kwargs) + self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), + **kwargs) # set back to true because initial report state will succeed due # to mocked out RPC calls self.agent.use_call = True - self.agent.tun_br = mock.Mock() + self.agent.tun_br = self.br_tun_cls(br_name='br-tun') self.agent.sg_agent = mock.Mock() def _mock_port_bound(self, ofport=None, new_local_vlan=None, @@ -153,22 +145,18 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'ip_address': '1.1.1.1'}] if old_local_vlan is not None: self.agent.local_vlan_map[net_uuid] = ( - ovs_neutron_agent.LocalVLANMapping( + self.mod_agent.LocalVLANMapping( old_local_vlan, None, None, None)) - with contextlib.nested( - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_db_attribute', return_value=True), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'db_get_val', return_value={}), - mock.patch.object(self.agent.int_br, 'delete_flows') - ) as (set_ovs_db_func, get_ovs_db_func, delete_flows_func): + with mock.patch.object(self.agent, 'int_br', autospec=True) as int_br: + int_br.db_get_val.return_value = {} + int_br.set_db_attribute.return_value = True self.agent.port_bound(port, net_uuid, 'local', None, None, fixed_ips, "compute:None", False) vlan_mapping = {'net_uuid': net_uuid, 'network_type': 'local', 'physical_network': None, 'segmentation_id': None} - set_ovs_db_func.assert_called_once_with( + int_br.set_db_attribute.assert_called_once_with( "Port", mock.ANY, "other_config", vlan_mapping) def test_check_agent_configurations_for_dvr_raises(self): @@ -202,29 +190,24 @@ class TestOvsNeutronAgent(base.BaseTestCase): def _test_port_dead(self, cur_tag=None): port = mock.Mock() port.ofport = 1 - with contextlib.nested( - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_db_attribute', return_value=True), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'db_get_val', return_value=cur_tag), - mock.patch.object(self.agent.int_br, 'add_flow') - ) as (set_ovs_db_func, get_ovs_db_func, add_flow_func): + with mock.patch.object(self.agent, 'int_br') as int_br: + int_br.db_get_val.return_value = cur_tag self.agent.port_dead(port) - get_ovs_db_func.assert_called_once_with("Port", mock.ANY, "tag") - if cur_tag == ovs_neutron_agent.DEAD_VLAN_TAG: - self.assertFalse(set_ovs_db_func.called) - self.assertFalse(add_flow_func.called) + if cur_tag == self.mod_agent.DEAD_VLAN_TAG: + self.assertFalse(int_br.set_db_attribute.called) + self.assertFalse(int_br.drop_port.called) else: - set_ovs_db_func.assert_called_once_with( - "Port", mock.ANY, "tag", ovs_neutron_agent.DEAD_VLAN_TAG) - add_flow_func.assert_called_once_with( - priority=2, in_port=port.ofport, actions="drop") + int_br.assert_has_calls([ + mock.call.set_db_attribute("Port", mock.ANY, "tag", + self.mod_agent.DEAD_VLAN_TAG), + mock.call.drop_port(in_port=port.ofport), + ]) def test_port_dead(self): self._test_port_dead() def test_port_dead_with_port_already_dead(self): - self._test_port_dead(ovs_neutron_agent.DEAD_VLAN_TAG) + self._test_port_dead(self.mod_agent.DEAD_VLAN_TAG) def mock_scan_ports(self, vif_port_set=None, registered_ports=None, updated_ports=None, port_tags_dict=None): @@ -289,10 +272,10 @@ class TestOvsNeutronAgent(base.BaseTestCase): self.assertEqual(expected, actual) def test_update_ports_returns_changed_vlan(self): - br = ovs_lib.OVSBridge('br-int') + br = self.br_int_cls('br-int') mac = "ca:fe:de:ad:be:ef" port = ovs_lib.VifPort(1, 1, 1, mac, br) - lvm = ovs_neutron_agent.LocalVLANMapping( + lvm = self.mod_agent.LocalVLANMapping( 1, '1', None, 1, {port.vif_id: port}) local_vlan_map = {'1': lvm} vif_port_set = set([1, 3]) @@ -302,7 +285,10 @@ class TestOvsNeutronAgent(base.BaseTestCase): added=set([3]), current=vif_port_set, removed=set([2]), updated=set([1]) ) - with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map): + with contextlib.nested( + mock.patch.dict(self.agent.local_vlan_map, local_vlan_map), + mock.patch.object(self.agent, 'tun_br', autospec=True), + ): actual = self.mock_scan_ports( vif_port_set, registered_ports, port_tags_dict=port_tags_dict) self.assertEqual(expected, actual) @@ -315,7 +301,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=mock.Mock())): self.assertRaises( - ovs_neutron_agent.DeviceListRetrievalError, + self.mod_agent.DeviceListRetrievalError, self.agent.treat_devices_added_or_updated, [{}], False) def _mock_treat_devices_added_updated(self, details, port, func_name): @@ -543,46 +529,37 @@ class TestOvsNeutronAgent(base.BaseTestCase): mock.patch.object(ip_lib, "device_exists"), mock.patch.object(sys, "exit"), mock.patch.object(utils, "execute"), - mock.patch.object(ovs_lib.OVSBridge, "remove_all_flows"), - mock.patch.object(ovs_lib.OVSBridge, "add_flow"), - mock.patch.object(ovs_lib.OVSBridge, "add_patch_port"), - mock.patch.object(ovs_lib.OVSBridge, "delete_port"), - mock.patch.object(ovs_lib.OVSBridge, "set_db_attribute"), - mock.patch.object(self.agent.int_br, "add_flow"), - mock.patch.object(self.agent.int_br, "add_patch_port"), - mock.patch.object(self.agent.int_br, "delete_port"), - mock.patch.object(self.agent.int_br, "set_db_attribute"), - ) as (devex_fn, sysexit_fn, utilsexec_fn, remflows_fn, ovs_add_flow_fn, - ovs_addpatch_port_fn, ovs_delport_fn, ovs_set_attr_fn, - br_add_flow_fn, br_addpatch_port_fn, br_delport_fn, - br_set_attr_fn): + mock.patch.object(self.agent, 'br_phys_cls'), + mock.patch.object(self.agent, 'int_br'), + ) as (devex_fn, sysexit_fn, utilsexec_fn, + phys_br_cls, int_br): devex_fn.return_value = True parent = mock.MagicMock() - parent.attach_mock(ovs_addpatch_port_fn, 'phy_add_patch_port') - parent.attach_mock(ovs_add_flow_fn, 'phy_add_flow') - parent.attach_mock(ovs_set_attr_fn, 'phy_set_attr') - parent.attach_mock(br_addpatch_port_fn, 'int_add_patch_port') - parent.attach_mock(br_add_flow_fn, 'int_add_flow') - parent.attach_mock(br_set_attr_fn, 'int_set_attr') - - ovs_addpatch_port_fn.return_value = "phy_ofport" - br_addpatch_port_fn.return_value = "int_ofport" + phys_br = phys_br_cls() + parent.attach_mock(phys_br_cls, 'phys_br_cls') + parent.attach_mock(phys_br, 'phys_br') + parent.attach_mock(int_br, 'int_br') + phys_br.add_patch_port.return_value = "phy_ofport" + int_br.add_patch_port.return_value = "int_ofport" self.agent.setup_physical_bridges({"physnet1": "br-eth"}) expected_calls = [ - mock.call.phy_add_flow(priority=1, actions='normal'), - mock.call.int_add_patch_port('int-br-eth', - constants.NONEXISTENT_PEER), - mock.call.phy_add_patch_port('phy-br-eth', - constants.NONEXISTENT_PEER), - mock.call.int_add_flow(priority=2, in_port='int_ofport', - actions='drop'), - mock.call.phy_add_flow(priority=2, in_port='phy_ofport', - actions='drop'), - mock.call.int_set_attr('Interface', 'int-br-eth', - 'options:peer', 'phy-br-eth'), - mock.call.phy_set_attr('Interface', 'phy-br-eth', - 'options:peer', 'int-br-eth'), - + mock.call.phys_br_cls('br-eth'), + mock.call.phys_br.setup_controllers(mock.ANY), + mock.call.phys_br.setup_default_table(), + mock.call.int_br.delete_port('int-br-eth'), + mock.call.phys_br.delete_port('phy-br-eth'), + mock.call.int_br.add_patch_port('int-br-eth', + constants.NONEXISTENT_PEER), + mock.call.phys_br.add_patch_port('phy-br-eth', + constants.NONEXISTENT_PEER), + mock.call.int_br.drop_port(in_port='int_ofport'), + mock.call.phys_br.drop_port(in_port='phy_ofport'), + mock.call.int_br.set_db_attribute('Interface', 'int-br-eth', + 'options:peer', + 'phy-br-eth'), + mock.call.phys_br.set_db_attribute('Interface', 'phy-br-eth', + 'options:peer', + 'int-br-eth'), ] parent.assert_has_calls(expected_calls) self.assertEqual(self.agent.int_ofports["physnet1"], @@ -596,19 +573,14 @@ class TestOvsNeutronAgent(base.BaseTestCase): mock.patch.object(ip_lib, "device_exists"), mock.patch.object(sys, "exit"), mock.patch.object(utils, "execute"), - mock.patch.object(ovs_lib.OVSBridge, "remove_all_flows"), - mock.patch.object(ovs_lib.OVSBridge, "add_flow"), - mock.patch.object(ovs_lib.OVSBridge, "add_port"), - mock.patch.object(ovs_lib.OVSBridge, "delete_port"), - mock.patch.object(self.agent.int_br, "add_port"), - mock.patch.object(self.agent.int_br, "delete_port"), + mock.patch.object(self.agent, 'br_phys_cls'), + mock.patch.object(self.agent, 'int_br'), mock.patch.object(ip_lib.IPWrapper, "add_veth"), mock.patch.object(ip_lib.IpLinkCommand, "delete"), mock.patch.object(ip_lib.IpLinkCommand, "set_up"), mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"), mock.patch.object(ovs_lib.BaseOVS, "get_bridges") - ) as (devex_fn, sysexit_fn, utilsexec_fn, remflows_fn, ovs_addfl_fn, - ovs_addport_fn, ovs_delport_fn, br_addport_fn, br_delport_fn, + ) as (devex_fn, sysexit_fn, utilsexec_fn, phys_br_cls, int_br, addveth_fn, linkdel_fn, linkset_fn, linkmtu_fn, get_br_fn): devex_fn.return_value = True parent = mock.MagicMock() @@ -617,8 +589,9 @@ class TestOvsNeutronAgent(base.BaseTestCase): parent.attach_mock(addveth_fn, 'add_veth') addveth_fn.return_value = (ip_lib.IPDevice("int-br-eth1"), ip_lib.IPDevice("phy-br-eth1")) - ovs_addport_fn.return_value = "phys_veth_ofport" - br_addport_fn.return_value = "int_veth_ofport" + phys_br = phys_br_cls() + phys_br.add_port.return_value = "phys_veth_ofport" + int_br.add_port.return_value = "int_veth_ofport" get_br_fn.return_value = ["br-eth"] self.agent.setup_physical_bridges({"physnet1": "br-eth"}) expected_calls = [mock.call.link_delete(), @@ -649,15 +622,10 @@ class TestOvsNeutronAgent(base.BaseTestCase): with contextlib.nested( mock.patch.object(self.agent.int_br, "add_patch_port", return_value=1), - mock.patch.object(self.agent.tun_br, "add_patch_port", - return_value=2), - mock.patch.object(self.agent.tun_br, "remove_all_flows"), - mock.patch.object(self.agent.tun_br, "add_flow"), - mock.patch.object(ovs_lib, "OVSBridge"), - mock.patch.object(self.agent.tun_br, "reset_bridge"), + mock.patch.object(self.agent, 'tun_br', autospec=True), mock.patch.object(sys, "exit") - ) as (intbr_patch_fn, tunbr_patch_fn, remove_all_fn, - add_flow_fn, ovs_br_fn, reset_br_fn, exit_fn): + ) as (intbr_patch_fn, tun_br, exit_fn): + tun_br.add_patch_port.return_value = 2 self.agent.reset_tunnel_br(None) self.agent.setup_tunnel_br() self.assertTrue(intbr_patch_fn.called) @@ -755,50 +723,22 @@ class TestOvsNeutronAgent(base.BaseTestCase): [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} - class ActionMatcher(object): - def __init__(self, action_str): - self.ordered = self.order_ports(action_str) - - def order_ports(self, action_str): - halves = action_str.split('output:') - ports = sorted(halves.pop().split(',')) - halves.append(','.join(ports)) - return 'output:'.join(halves) - - def __eq__(self, other): - return self.ordered == self.order_ports(other) - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'deferred'), - mock.patch.object(self.agent.tun_br, 'do_action_flows'), - mock.patch.object(self.agent, '_setup_tunnel_port'), - ) as (deferred_fn, do_action_flows_fn, add_tun_fn): - deferred_fn.return_value = ovs_lib.DeferredOVSBridge( - self.agent.tun_br) + mock.patch.object(self.agent, 'tun_br', autospec=True), + mock.patch.object(self.agent, '_setup_tunnel_port', autospec=True), + ) as (tun_br, add_tun_fn): self.agent.fdb_add(None, fdb_entry) self.assertFalse(add_tun_fn.called) - actions = (constants.ARP_RESPONDER_ACTIONS % - {'mac': netaddr.EUI(FAKE_MAC, dialect=netaddr.mac_unix), - 'ip': netaddr.IPAddress(FAKE_IP1)}) + deferred_br_call = mock.call.deferred().__enter__() expected_calls = [ - mock.call('add', [dict(table=constants.ARP_RESPONDER, - priority=1, - proto='arp', - dl_vlan='vlan1', - nw_dst=FAKE_IP1, - actions=actions), - dict(table=constants.UCAST_TO_TUN, - priority=2, - dl_vlan='vlan1', - dl_dst=FAKE_MAC, - actions='strip_vlan,' - 'set_tunnel:seg1,output:2')]), - mock.call('mod', [dict(table=constants.FLOOD_TO_TUN, - dl_vlan='vlan1', - actions=ActionMatcher('strip_vlan,' - 'set_tunnel:seg1,output:1,2'))]), + deferred_br_call.install_arp_responder('vlan1', FAKE_IP1, + FAKE_MAC), + deferred_br_call.install_unicast_to_tun('vlan1', 'seg1', '2', + FAKE_MAC), + deferred_br_call.install_flood_to_tun('vlan1', 'seg1', + set(['1', '2'])), ] - do_action_flows_fn.assert_has_calls(expected_calls) + tun_br.assert_has_calls(expected_calls) def test_fdb_del_flows(self): self._prepare_l2_pop_ofports() @@ -809,28 +749,21 @@ class TestOvsNeutronAgent(base.BaseTestCase): {'2.2.2.2': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'deferred'), - mock.patch.object(self.agent.tun_br, 'do_action_flows'), - ) as (deferred_fn, do_action_flows_fn): - deferred_fn.return_value = ovs_lib.DeferredOVSBridge( - self.agent.tun_br) + with mock.patch.object(self.agent, 'tun_br', autospec=True) as br_tun: self.agent.fdb_remove(None, fdb_entry) + deferred_br_call = mock.call.deferred().__enter__() expected_calls = [ - mock.call('mod', [dict(table=constants.FLOOD_TO_TUN, - dl_vlan='vlan2', - actions='strip_vlan,' - 'set_tunnel:seg2,output:1')]), - mock.call('del', [dict(table=constants.ARP_RESPONDER, - proto='arp', - dl_vlan='vlan2', - nw_dst=FAKE_IP1), - dict(table=constants.UCAST_TO_TUN, - dl_vlan='vlan2', - dl_dst=FAKE_MAC), - dict(in_port='2')]), + mock.call.deferred(), + mock.call.deferred().__enter__(), + deferred_br_call.delete_arp_responder('vlan2', FAKE_IP1), + deferred_br_call.delete_unicast_to_tun('vlan2', FAKE_MAC), + deferred_br_call.install_flood_to_tun('vlan2', 'seg2', + set(['1'])), + deferred_br_call.delete_port('gre-02020202'), + deferred_br_call.cleanup_tunnel_port('2'), + mock.call.deferred().__exit__(None, None, None), ] - do_action_flows_fn.assert_has_calls(expected_calls) + br_tun.assert_has_calls(expected_calls) def test_fdb_add_port(self): self._prepare_l2_pop_ofports() @@ -840,17 +773,15 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'ports': {'1.1.1.1': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)]}}} with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'deferred'), - mock.patch.object(self.agent.tun_br, 'do_action_flows'), + mock.patch.object(self.agent, 'tun_br', autospec=True), mock.patch.object(self.agent, '_setup_tunnel_port') - ) as (deferred_fn, do_action_flows_fn, add_tun_fn): - deferred_br = ovs_lib.DeferredOVSBridge(self.agent.tun_br) - deferred_fn.return_value = deferred_br + ) as (tun_br, add_tun_fn): self.agent.fdb_add(None, fdb_entry) self.assertFalse(add_tun_fn.called) fdb_entry['net1']['ports']['10.10.10.10'] = [ l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)] self.agent.fdb_add(None, fdb_entry) + deferred_br = tun_br.deferred().__enter__() add_tun_fn.assert_called_with( deferred_br, 'gre-0a0a0a0a', '10.10.10.10', 'gre') @@ -862,13 +793,12 @@ class TestOvsNeutronAgent(base.BaseTestCase): 'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}} with contextlib.nested( mock.patch.object(self.agent.tun_br, 'deferred'), - mock.patch.object(self.agent.tun_br, 'do_action_flows'), - mock.patch.object(self.agent.tun_br, 'delete_port') - ) as (deferred_fn, do_action_flows_fn, delete_port_fn): - deferred_br = ovs_lib.DeferredOVSBridge(self.agent.tun_br) - deferred_fn.return_value = deferred_br + mock.patch.object(self.agent.tun_br, 'delete_port'), + ) as (defer_fn, delete_port_fn): self.agent.fdb_remove(None, fdb_entry) - delete_port_fn.assert_called_once_with('gre-02020202') + deferred_br = defer_fn().__enter__() + deferred_br.delete_port.assert_called_once_with('gre-02020202') + self.assertFalse(delete_port_fn.called) def test_fdb_update_chg_ip(self): self._prepare_l2_pop_ofports() @@ -877,31 +807,13 @@ class TestOvsNeutronAgent(base.BaseTestCase): {'agent_ip': {'before': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)], 'after': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP2)]}}}} - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'deferred'), - mock.patch.object(self.agent.tun_br, 'do_action_flows'), - ) as (deferred_fn, do_action_flows_fn): - deferred_br = ovs_lib.DeferredOVSBridge(self.agent.tun_br) - deferred_fn.return_value = deferred_br + with mock.patch.object(self.agent.tun_br, 'deferred') as deferred_fn: self.agent.fdb_update(None, fdb_entries) - actions = (constants.ARP_RESPONDER_ACTIONS % - {'mac': netaddr.EUI(FAKE_MAC, dialect=netaddr.mac_unix), - 'ip': netaddr.IPAddress(FAKE_IP2)}) - expected_calls = [ - mock.call('add', [dict(table=constants.ARP_RESPONDER, - priority=1, - proto='arp', - dl_vlan='vlan1', - nw_dst=FAKE_IP2, - actions=actions)]), - mock.call('del', [dict(table=constants.ARP_RESPONDER, - proto='arp', - dl_vlan='vlan1', - nw_dst=FAKE_IP1)]) - ] - do_action_flows_fn.assert_has_calls(expected_calls) - self.assertEqual(len(expected_calls), - len(do_action_flows_fn.mock_calls)) + deferred_br = deferred_fn().__enter__() + deferred_br.assert_has_calls([ + mock.call.install_arp_responder('vlan1', FAKE_IP2, FAKE_MAC), + mock.call.delete_arp_responder('vlan1', FAKE_IP1) + ]) def test_del_fdb_flow_idempotency(self): lvm = mock.Mock() @@ -922,22 +834,17 @@ class TestOvsNeutronAgent(base.BaseTestCase): self._prepare_l2_pop_ofports() self.agent.l2_pop = True self.agent.enable_tunneling = True - with mock.patch.object( - self.agent.tun_br, 'cleanup_tunnel_port' - ) as clean_tun_fn: + with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br: self.agent.reclaim_local_vlan('net1') - self.assertFalse(clean_tun_fn.called) + self.assertFalse(tun_br.cleanup_tunnel_port.called) def test_recl_lv_port_to_remove(self): self._prepare_l2_pop_ofports() self.agent.l2_pop = True self.agent.enable_tunneling = True - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'delete_port'), - mock.patch.object(self.agent.tun_br, 'delete_flows') - ) as (del_port_fn, del_flow_fn): + with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br: self.agent.reclaim_local_vlan('net2') - del_port_fn.assert_called_once_with('gre-02020202') + tun_br.delete_port.assert_called_once_with('gre-02020202') def test_daemon_loop_uses_polling_manager(self): with mock.patch( @@ -952,7 +859,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): with contextlib.nested( mock.patch.object(self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT), - mock.patch.object(ovs_neutron_agent.LOG, 'error') + mock.patch.object(self.mod_agent.LOG, 'error') ) as (add_tunnel_port_fn, log_error_fn): ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE) @@ -968,7 +875,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): with contextlib.nested( mock.patch.object(self.agent.tun_br, 'add_tunnel_port', return_value=ovs_lib.INVALID_OFPORT), - mock.patch.object(ovs_neutron_agent.LOG, 'error') + mock.patch.object(self.mod_agent.LOG, 'error') ) as (add_tunnel_port_fn, log_error_fn): self.agent.dont_fragment = False ofport = self.agent._setup_tunnel_port( @@ -1045,18 +952,18 @@ class TestOvsNeutronAgent(base.BaseTestCase): with contextlib.nested( mock.patch.object(async_process.AsyncProcess, "_spawn"), mock.patch.object(log.KeywordArgumentAdapter, 'exception'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'scan_ports'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'process_network_ports'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'check_ovs_status'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_physical_bridges'), mock.patch.object(time, 'sleep'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'update_stale_ofport_rules') ) as (spawn_fn, log_exception, scan_ports, process_network_ports, check_ovs_status, setup_int_br, setup_phys_br, time_sleep, @@ -1113,7 +1020,7 @@ class TestOvsNeutronAgent(base.BaseTestCase): # all of this is required just to get to the part of # treat_devices_added_or_updated that checks the prevent_arp_spoofing # flag - self.agent.int_br = mock.Mock() + self.agent.int_br = mock.create_autospec(self.agent.int_br) self.agent.treat_vif_port = mock.Mock() self.agent.get_vif_port_by_id = mock.Mock(return_value=FakeVif()) self.agent.plugin_rpc = mock.Mock() @@ -1127,27 +1034,24 @@ class TestOvsNeutronAgent(base.BaseTestCase): self.assertFalse(self.agent.setup_arp_spoofing_protection.called) def test_arp_spoofing_port_security_disabled(self): - int_br = mock.Mock() + int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection( int_br, FakeVif(), {'port_security_enabled': False}) - self.assertFalse(int_br.add_flows.called) + self.assertTrue(int_br.delete_arp_spoofing_protection.called) + self.assertFalse(int_br.install_arp_spoofing_protection.called) def test_arp_spoofing_basic_rule_setup(self): vif = FakeVif() fake_details = {'fixed_ips': []} self.agent.prevent_arp_spoofing = True - int_br = mock.Mock() + int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details) - int_br.delete_flows.assert_has_calls( - [mock.call(table=mock.ANY, in_port=vif.ofport)]) - # make sure redirect into spoof table is installed - int_br.add_flow.assert_any_call( - table=constants.LOCAL_SWITCHING, in_port=vif.ofport, - proto='arp', actions=mock.ANY, priority=10) - # make sure drop rule for replies is installed - int_br.add_flow.assert_any_call( - table=constants.ARP_SPOOF_TABLE, - proto='arp', actions='DROP', priority=mock.ANY) + self.assertEqual( + [mock.call(port=vif.ofport)], + int_br.delete_arp_spoofing_protection.mock_calls) + self.assertEqual( + [mock.call(ip_addresses=set(), port=vif.ofport)], + int_br.install_arp_spoofing_protection.mock_calls) def test_arp_spoofing_fixed_and_allowed_addresses(self): vif = FakeVif() @@ -1158,14 +1062,14 @@ class TestOvsNeutronAgent(base.BaseTestCase): {'ip_address': '192.168.44.103/32'}] } self.agent.prevent_arp_spoofing = True - int_br = mock.Mock() + int_br = mock.create_autospec(self.agent.int_br) self.agent.setup_arp_spoofing_protection(int_br, vif, fake_details) # make sure all addresses are allowed - for addr in ('192.168.44.100', '192.168.44.101', '192.168.44.102/32', - '192.168.44.103/32'): - int_br.add_flow.assert_any_call( - table=constants.ARP_SPOOF_TABLE, in_port=vif.ofport, - proto='arp', actions='NORMAL', arp_spa=addr, priority=mock.ANY) + addresses = {'192.168.44.100', '192.168.44.101', '192.168.44.102/32', + '192.168.44.103/32'} + self.assertEqual( + [mock.call(port=vif.ofport, ip_addresses=addresses)], + int_br.install_arp_spoofing_protection.mock_calls) def test__get_ofport_moves(self): previous = {'port1': 1, 'port2': 2} @@ -1184,8 +1088,9 @@ class TestOvsNeutronAgent(base.BaseTestCase): self.agent.int_br.get_vif_port_to_ofport_map.return_value = newmap self.agent.update_stale_ofport_rules() # rules matching port 1 should have been deleted - self.assertEqual(self.agent.int_br.delete_flows.mock_calls, - [mock.call(in_port=1)]) + self.assertEqual( + [mock.call(port=1)], + self.agent.int_br.delete_arp_spoofing_protection.mock_calls) # make sure the state was updated with the new map self.assertEqual(self.agent.vifname_to_ofport_map, newmap) @@ -1210,22 +1115,24 @@ class TestOvsNeutronAgent(base.BaseTestCase): def add_new_vlan_mapping(*args, **kwargs): self.agent.local_vlan_map['bar'] = ( - ovs_neutron_agent.LocalVLANMapping(1, 2, 3, 4)) + self.mod_agent.LocalVLANMapping(1, 2, 3, 4)) bridge = mock.Mock() tunnel_type = 'vxlan' self.agent.tun_br_ofports = {tunnel_type: dict()} self.agent.l2_pop = False self.agent.local_vlan_map = { - 'foo': ovs_neutron_agent.LocalVLANMapping(4, tunnel_type, 2, 1)} - bridge.mod_flow.side_effect = add_new_vlan_mapping - with mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' - '_ofport_set_to_str', return_value=True): - self.agent._setup_tunnel_port(bridge, 1, 2, - tunnel_type=tunnel_type) + 'foo': self.mod_agent.LocalVLANMapping(4, tunnel_type, 2, 1)} + bridge.install_flood_to_tun.side_effect = add_new_vlan_mapping + self.agent._setup_tunnel_port(bridge, 1, 2, tunnel_type=tunnel_type) self.assertIn('bar', self.agent.local_vlan_map) -class AncillaryBridgesTest(base.BaseTestCase): +class TestOvsNeutronAgentOFCtl(TestOvsNeutronAgent, + ovs_test_base.OVSOFCtlTestBase): + pass + + +class AncillaryBridgesTest(object): def setUp(self): super(AncillaryBridgesTest, self).setUp() @@ -1237,7 +1144,7 @@ class AncillaryBridgesTest(base.BaseTestCase): 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') cfg.CONF.set_override('report_interval', 0, 'AGENT') - self.kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + self.kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) def _test_ancillary_bridges(self, bridges, ancillary): device_ids = ancillary[:] @@ -1252,15 +1159,10 @@ class AncillaryBridgesTest(base.BaseTestCase): return None with contextlib.nested( - mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' - 'OVSNeutronAgent.setup_integration_br'), + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_integration_br'), mock.patch('neutron.agent.linux.utils.get_interface_mac', return_value='00:00:00:00:00:01'), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'get_local_port_mac', - return_value='00:00:00:00:00:01'), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_secure_mode'), mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges', return_value=bridges), mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' @@ -1268,7 +1170,8 @@ class AncillaryBridgesTest(base.BaseTestCase): side_effect=pullup_side_effect), mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[])): - self.agent = ovs_neutron_agent.OVSNeutronAgent(**self.kwargs) + self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), + **self.kwargs) self.assertEqual(len(ancillary), len(self.agent.ancillary_brs)) if ancillary: bridges = [br.br_name for br in self.agent.ancillary_brs] @@ -1288,7 +1191,12 @@ class AncillaryBridgesTest(base.BaseTestCase): self._test_ancillary_bridges(bridges, ['br-ex1', 'br-ex2']) -class TestOvsDvrNeutronAgent(base.BaseTestCase): +class AncillaryBridgesTestOFCtl(AncillaryBridgesTest, + ovs_test_base.OVSOFCtlTestBase): + pass + + +class TestOvsDvrNeutronAgent(object): def setUp(self): super(TestOvsDvrNeutronAgent, self).setUp() @@ -1299,7 +1207,7 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') - kwargs = ovs_neutron_agent.create_agent_config_map(cfg.CONF) + kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) class MockFixedIntervalLoopingCall(object): def __init__(self, f): @@ -1309,18 +1217,11 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): self.f() with contextlib.nested( - mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' - 'OVSNeutronAgent.setup_integration_br'), - mock.patch('neutron.plugins.openvswitch.agent.ovs_neutron_agent.' - 'OVSNeutronAgent.setup_ancillary_bridges', - return_value=[]), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'create'), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_secure_mode'), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'get_local_port_mac', - return_value='00:00:00:00:00:01'), + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_integration_br'), + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_ancillary_bridges', + return_value=[]), mock.patch('neutron.agent.linux.utils.get_interface_mac', return_value='00:00:00:00:00:01'), mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), @@ -1329,11 +1230,12 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): new=MockFixedIntervalLoopingCall), mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', return_value=[])): - self.agent = ovs_neutron_agent.OVSNeutronAgent(**kwargs) + self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), + **kwargs) # set back to true because initial report state will succeed due # to mocked out RPC calls self.agent.use_call = True - self.agent.tun_br = mock.Mock() + self.agent.tun_br = self.br_tun_cls(br_name='br-tun') self.agent.sg_agent = mock.Mock() def _setup_for_dvr_test(self, ofport=10): @@ -1371,6 +1273,36 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): self._compute_fixed_ips = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.3'}] + @staticmethod + def _expected_port_bound(port, lvid): + return [ + mock.call.db_get_val('Port', port.port_name, 'other_config'), + mock.call.set_db_attribute('Port', port.port_name, 'other_config', + mock.ANY), + ] + + def _expected_install_dvr_process(self, lvid, port, ip_version, + gateway_ip, gateway_mac): + if ip_version == 4: + ipvx_calls = [ + mock.call.install_dvr_process_ipv4( + vlan_tag=lvid, + gateway_ip=gateway_ip), + ] + else: + ipvx_calls = [ + mock.call.install_dvr_process_ipv6( + vlan_tag=lvid, + gateway_mac=gateway_mac), + ] + return ipvx_calls + [ + mock.call.install_dvr_process( + vlan_tag=lvid, + dvr_mac_address=self.agent.dvr_agent.dvr_mac_address, + vif_mac=port.vif_mac, + ), + ] + def _test_port_bound_for_dvr_on_vlan_network(self, device_owner, ip_version=4): self._setup_for_dvr_test() @@ -1385,105 +1317,84 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): physical_network = self._physical_network segmentation_id = self._segmentation_id network_type = p_const.TYPE_VLAN + int_br = mock.create_autospec(self.agent.int_br) + tun_br = mock.create_autospec(self.agent.tun_br) + phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) + int_br.set_db_attribute.return_value = True + int_br.db_get_val.return_value = {} with contextlib.nested( - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_db_attribute', - return_value=True), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'db_get_val', - return_value={})): - with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_subnet_for_dvr', - return_value={ - 'gateway_ip': gateway_ip, - 'cidr': cidr, - 'ip_version': ip_version, - 'gateway_mac': gateway_mac}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows'), - mock.patch.object( - self.agent.dvr_agent.phys_brs[physical_network], - 'add_flow'), - mock.patch.object( - self.agent.dvr_agent.phys_brs[physical_network], - 'delete_flows') - ) as (get_subnet_fn, get_cphost_fn, - get_vif_fn, add_flow_int_fn, - add_flow_tun_fn, delete_flows_tun_fn, add_flow_phys_fn, - delete_flows_phys_fn): - self.agent.port_bound( - self._port, self._net_uuid, network_type, - physical_network, segmentation_id, self._fixed_ips, - n_const.DEVICE_OWNER_DVR_INTERFACE, False) - lvm = self.agent.local_vlan_map[self._net_uuid] - phy_ofp = self.agent.dvr_agent.phys_ofports[physical_network] - int_ofp = self.agent.dvr_agent.int_ofports[physical_network] - expected_on_phys_br = [ - mock.call(table=constants.LOCAL_VLAN_TRANSLATION, - priority=4, - in_port=phy_ofp, - dl_vlan=lvm.vlan, - actions="mod_vlan_vid:%s,normal" % - (lvm.segmentation_id)), - mock.call(table=constants.DVR_PROCESS_VLAN, - priority=2, - dl_vlan=lvm.vlan, - dl_dst=self._port.vif_mac, - actions="drop"), - mock.call(table=constants.DVR_PROCESS_VLAN, - priority=1, - dl_vlan=lvm.vlan, - dl_src=self._port.vif_mac, - actions="mod_dl_src:%s,resubmit(,%s)" % - (self.agent.dvr_agent.dvr_mac_address, - constants.LOCAL_VLAN_TRANSLATION)) - ] - if ip_version == 4: - expected_on_phys_br.insert(1, mock.call( - proto='arp', - nw_dst=gateway_ip, actions='drop', - priority=3, table=constants.DVR_PROCESS_VLAN, - dl_vlan=lvm.vlan)) - else: - expected_on_phys_br.insert(1, mock.call( - icmp_type=n_const.ICMPV6_TYPE_RA, proto='icmp6', - dl_src=self._port.vif_mac, actions='drop', - priority=3, table=constants.DVR_PROCESS_VLAN, - dl_vlan=lvm.vlan)) - self.assertEqual(expected_on_phys_br, - add_flow_phys_fn.call_args_list) - self.agent.port_bound(self._compute_port, self._net_uuid, - network_type, physical_network, - segmentation_id, - self._compute_fixed_ips, - device_owner, False) - expected_on_int_br = [ - mock.call(priority=3, - in_port=int_ofp, - dl_vlan=lvm.segmentation_id, - actions="mod_vlan_vid:%s,normal" % lvm.vlan), - mock.call(table=constants.DVR_TO_SRC_MAC_VLAN, - priority=4, - dl_dst=self._compute_port.vif_mac, - dl_vlan=lvm.segmentation_id, - actions="strip_vlan,mod_dl_src:%s," - "output:%s" % - (gateway_mac, - self._compute_port.ofport)) - ] - self.assertEqual(expected_on_int_br, - add_flow_int_fn.call_args_list) - self.assertFalse(add_flow_tun_fn.called) - self.assertFalse(delete_flows_tun_fn.called) - self.assertFalse(delete_flows_phys_fn.called) + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_subnet_for_dvr', + return_value={ + 'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_mac': gateway_mac}), + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]), + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port), + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.dict(self.agent.phys_brs, + {physical_network: phys_br}), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + mock.patch.dict(self.agent.dvr_agent.phys_brs, + {physical_network: phys_br}), + ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _, _, _): + self.agent.port_bound( + self._port, self._net_uuid, network_type, + physical_network, segmentation_id, self._fixed_ips, + n_const.DEVICE_OWNER_DVR_INTERFACE, False) + phy_ofp = self.agent.dvr_agent.phys_ofports[physical_network] + int_ofp = self.agent.dvr_agent.int_ofports[physical_network] + lvid = self.agent.local_vlan_map[self._net_uuid].vlan + expected_on_phys_br = [ + mock.call.provision_local_vlan( + port=phy_ofp, + lvid=lvid, + segmentation_id=segmentation_id, + distributed=True, + ), + ] + self._expected_install_dvr_process( + port=self._port, + lvid=lvid, + ip_version=ip_version, + gateway_ip=gateway_ip, + gateway_mac=gateway_mac) + expected_on_int_br = [ + mock.call.provision_local_vlan( + port=int_ofp, + lvid=lvid, + segmentation_id=segmentation_id, + ), + ] + self._expected_port_bound(self._port, lvid) + self.assertEqual(expected_on_int_br, int_br.mock_calls) + self.assertEqual([], tun_br.mock_calls) + self.assertEqual(expected_on_phys_br, phys_br.mock_calls) + int_br.reset_mock() + tun_br.reset_mock() + phys_br.reset_mock() + self.agent.port_bound(self._compute_port, self._net_uuid, + network_type, physical_network, + segmentation_id, + self._compute_fixed_ips, + device_owner, False) + expected_on_int_br = [ + mock.call.install_dvr_to_src_mac( + network_type=network_type, + gateway_mac=gateway_mac, + dst_mac=self._compute_port.vif_mac, + dst_port=self._compute_port.ofport, + vlan_tag=segmentation_id, + ), + ] + self._expected_port_bound(self._compute_port, lvid) + self.assertEqual(expected_on_int_br, int_br.mock_calls) + self.assertFalse([], tun_br.mock_calls) + self.assertFalse([], phys_br.mock_calls) def _test_port_bound_for_dvr_on_vxlan_network(self, device_owner, ip_version=4): @@ -1499,99 +1410,76 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): self._compute_port.vif_mac = '77:88:99:00:11:22' physical_network = self._physical_network segmentation_id = self._segmentation_id + int_br = mock.create_autospec(self.agent.int_br) + tun_br = mock.create_autospec(self.agent.tun_br) + phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) + int_br.set_db_attribute.return_value = True + int_br.db_get_val.return_value = {} with contextlib.nested( - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_db_attribute', - return_value=True), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'db_get_val', - return_value={})): - with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_subnet_for_dvr', - return_value={ - 'gateway_ip': gateway_ip, - 'cidr': cidr, - 'ip_version': ip_version, - 'gateway_mac': gateway_mac}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows'), - mock.patch.object( - self.agent.dvr_agent.phys_brs[physical_network], - 'add_flow'), - mock.patch.object( - self.agent.dvr_agent.phys_brs[physical_network], - 'delete_flows') - ) as (get_subnet_fn, get_cphost_fn, - get_vif_fn, add_flow_int_fn, - add_flow_tun_fn, delete_flows_tun_fn, - add_flow_phys_fn, delete_flows_phys_fn): - self.agent.port_bound( - self._port, self._net_uuid, network_type, - physical_network, segmentation_id, self._fixed_ips, - n_const.DEVICE_OWNER_DVR_INTERFACE, False) - lvm = self.agent.local_vlan_map[self._net_uuid] - expected_on_tun_br = [ - mock.call( - table=constants.TUN_TABLE['vxlan'], - priority=1, tun_id=lvm.segmentation_id, - actions="mod_vlan_vid:%s," - "resubmit(,%s)" % - (lvm.vlan, constants.DVR_NOT_LEARN)), - mock.call( - table=constants.DVR_PROCESS, priority=2, - dl_vlan=lvm.vlan, - dl_dst=self._port.vif_mac, - actions='drop'), - mock.call( - table=constants.DVR_PROCESS, priority=1, - dl_vlan=lvm.vlan, - dl_src=self._port.vif_mac, - actions="mod_dl_src:%s,resubmit(,%s)" % ( - self.agent.dvr_agent.dvr_mac_address, - constants.PATCH_LV_TO_TUN))] - if ip_version == 4: - expected_on_tun_br.insert(1, mock.call( - proto='arp', - nw_dst=gateway_ip, actions='drop', - priority=3, table=constants.DVR_PROCESS, - dl_vlan=lvm.vlan)) - else: - expected_on_tun_br.insert(1, mock.call( - icmp_type=n_const.ICMPV6_TYPE_RA, - proto='icmp6', - dl_src=self._port.vif_mac, - actions='drop', - priority=3, table=constants.DVR_PROCESS, - dl_vlan=lvm.vlan)) - self.assertEqual(expected_on_tun_br, - add_flow_tun_fn.call_args_list) - self.agent.port_bound(self._compute_port, self._net_uuid, - network_type, physical_network, - segmentation_id, - self._compute_fixed_ips, - device_owner, False) - expected_on_int_br = [ - mock.call(table=constants.DVR_TO_SRC_MAC, priority=4, - dl_dst=self._compute_port.vif_mac, - dl_vlan=lvm.vlan, - actions="strip_vlan,mod_dl_src:%s," - "output:%s" % - (gateway_mac, self._compute_port.ofport)) - ] - self.assertEqual(expected_on_int_br, - add_flow_int_fn.call_args_list) - self.assertFalse(add_flow_phys_fn.called) - self.assertFalse(add_flow_phys_fn.called) - self.assertFalse(delete_flows_tun_fn.called) - self.assertFalse(delete_flows_phys_fn.called) + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_subnet_for_dvr', + return_value={ + 'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_mac': gateway_mac}), + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]), + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port), + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.dict(self.agent.phys_brs, + {physical_network: phys_br}), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + mock.patch.dict(self.agent.dvr_agent.phys_brs, + {physical_network: phys_br}), + ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _, _, _): + self.agent.port_bound( + self._port, self._net_uuid, network_type, + physical_network, segmentation_id, self._fixed_ips, + n_const.DEVICE_OWNER_DVR_INTERFACE, False) + lvid = self.agent.local_vlan_map[self._net_uuid].vlan + expected_on_int_br = self._expected_port_bound( + self._port, lvid) + expected_on_tun_br = [ + mock.call.provision_local_vlan( + network_type=network_type, + segmentation_id=segmentation_id, + lvid=lvid, + distributed=True), + ] + self._expected_install_dvr_process( + port=self._port, + lvid=lvid, + ip_version=ip_version, + gateway_ip=gateway_ip, + gateway_mac=gateway_mac) + self.assertEqual(expected_on_int_br, int_br.mock_calls) + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) + self.assertEqual([], phys_br.mock_calls) + int_br.reset_mock() + tun_br.reset_mock() + phys_br.reset_mock() + self.agent.port_bound(self._compute_port, self._net_uuid, + network_type, physical_network, + segmentation_id, + self._compute_fixed_ips, + device_owner, False) + expected_on_int_br = [ + mock.call.install_dvr_to_src_mac( + network_type=network_type, + gateway_mac=gateway_mac, + dst_mac=self._compute_port.vif_mac, + dst_port=self._compute_port.ofport, + vlan_tag=lvid, + ), + ] + self._expected_port_bound(self._compute_port, lvid) + self.assertEqual(expected_on_int_br, int_br.mock_calls) + self.assertEqual([], tun_br.mock_calls) + self.assertEqual([], phys_br.mock_calls) def test_port_bound_for_dvr_with_compute_ports(self): self._test_port_bound_for_dvr_on_vlan_network( @@ -1625,38 +1513,53 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): def test_port_bound_for_dvr_with_csnat_ports(self, ofport=10): self._setup_for_dvr_test() + int_br = mock.create_autospec(self.agent.int_br) + tun_br = mock.create_autospec(self.agent.tun_br) + int_br.set_db_attribute.return_value = True + int_br.db_get_val.return_value = {} with contextlib.nested( - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_db_attribute', - return_value=True), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'db_get_val', - return_value={})): - with contextlib.nested( - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', - return_value={'gateway_ip': '1.1.1.1', - 'cidr': '1.1.1.0/24', - 'ip_version': 4, - 'gateway_mac': 'aa:bb:cc:11:22:33'}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows') - ) as (get_subnet_fn, get_cphost_fn, - get_vif_fn, add_flow_int_fn, - add_flow_tun_fn, delete_flows_tun_fn): - self.agent.port_bound( - self._port, self._net_uuid, 'vxlan', - None, None, self._fixed_ips, - n_const.DEVICE_OWNER_ROUTER_SNAT, - False) - self.assertTrue(add_flow_int_fn.called) + mock.patch.object( + self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', + return_value={'gateway_ip': '1.1.1.1', + 'cidr': '1.1.1.0/24', + 'ip_version': 4, + 'gateway_mac': 'aa:bb:cc:11:22:33'}), + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]), + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port), + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _): + self.agent.port_bound( + self._port, self._net_uuid, 'vxlan', + None, None, self._fixed_ips, + n_const.DEVICE_OWNER_ROUTER_SNAT, + False) + lvid = self.agent.local_vlan_map[self._net_uuid].vlan + expected_on_int_br = [ + mock.call.install_dvr_to_src_mac( + network_type='vxlan', + gateway_mac='aa:bb:cc:11:22:33', + dst_mac=self._port.vif_mac, + dst_port=self._port.ofport, + vlan_tag=lvid, + ), + ] + self._expected_port_bound(self._port, lvid) + self.assertEqual(expected_on_int_br, int_br.mock_calls) + expected_on_tun_br = [ + mock.call.provision_local_vlan( + network_type='vxlan', + lvid=lvid, + segmentation_id=None, + distributed=True, + ), + ] + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) def test_treat_devices_removed_for_dvr_interface(self, ofport=10): self._test_treat_devices_removed_for_dvr_interface(ofport) @@ -1672,77 +1575,79 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' + gateway_mac = 'aa:bb:cc:11:22:33' + int_br = mock.create_autospec(self.agent.int_br) + tun_br = mock.create_autospec(self.agent.tun_br) + int_br.set_db_attribute.return_value = True + int_br.db_get_val.return_value = {} with contextlib.nested( - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_db_attribute', - return_value=True), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'db_get_val', - return_value={})): - with contextlib.nested( - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', - return_value={'gateway_ip': gateway_ip, - 'cidr': cidr, - 'ip_version': ip_version, - 'gateway_mac': 'aa:bb:cc:11:22:33'}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows') - ) as (get_subnet_fn, get_cphost_fn, - get_vif_fn, add_flow_int_fn, - add_flow_tun_fn, delete_flows_tun_fn): - self.agent.port_bound( - self._port, self._net_uuid, 'vxlan', - None, None, self._fixed_ips, - n_const.DEVICE_OWNER_DVR_INTERFACE, - False) - self.assertTrue(add_flow_tun_fn.called) + mock.patch.object( + self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', + return_value={'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_mac': gateway_mac}), + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]), + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port), + ) as (get_subnet_fn, get_cphost_fn, _, _, _, _, get_vif_fn): + self.agent.port_bound( + self._port, self._net_uuid, 'vxlan', + None, None, self._fixed_ips, + n_const.DEVICE_OWNER_DVR_INTERFACE, + False) + lvid = self.agent.local_vlan_map[self._net_uuid].vlan + self.assertEqual(self._expected_port_bound(self._port, lvid), + int_br.mock_calls) + expected_on_tun_br = [ + mock.call.provision_local_vlan(network_type='vxlan', + lvid=lvid, segmentation_id=None, distributed=True), + ] + self._expected_install_dvr_process( + port=self._port, + lvid=lvid, + ip_version=ip_version, + gateway_ip=gateway_ip, + gateway_mac=gateway_mac) + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) + int_br.reset_mock() + tun_br.reset_mock() with contextlib.nested( mock.patch.object(self.agent, 'reclaim_local_vlan'), mock.patch.object(self.agent.plugin_rpc, 'update_device_down', return_value=None), - mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'), - mock.patch.object(self.agent.dvr_agent.tun_br, - 'delete_flows')) as (reclaim_vlan_fn, - update_dev_down_fn, - delete_flows_int_fn, - delete_flows_tun_fn): + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): self.agent.treat_devices_removed([self._port.vif_id]) if ip_version == 4: - expected = [mock.call( - proto='arp', - nw_dst=gateway_ip, - table=constants.DVR_PROCESS, - dl_vlan=( - self.agent.local_vlan_map[self._net_uuid].vlan))] + expected = [ + mock.call.delete_dvr_process_ipv4( + vlan_tag=lvid, + gateway_ip=gateway_ip), + ] else: - expected = [mock.call( - icmp_type=n_const.ICMPV6_TYPE_RA, proto='icmp6', - dl_src='aa:bb:cc:11:22:33', - table=constants.DVR_PROCESS, - dl_vlan=( - self.agent.local_vlan_map[self._net_uuid].vlan))] + expected = [ + mock.call.delete_dvr_process_ipv6( + vlan_tag=lvid, + gateway_mac=gateway_mac), + ] expected.extend([ - mock.call( - table=constants.DVR_PROCESS, - dl_dst=self._port.vif_mac, - dl_vlan=( - self.agent.local_vlan_map[self._net_uuid].vlan)), - mock.call( - table=constants.DVR_PROCESS, - dl_vlan=( - self.agent.local_vlan_map[self._net_uuid].vlan), - dl_src=self._port.vif_mac) + mock.call.delete_dvr_process( + vlan_tag=lvid, + vif_mac=self._port.vif_mac), ]) - self.assertEqual(expected, delete_flows_tun_fn.call_args_list) + self.assertEqual([], int_br.mock_calls) + self.assertEqual(expected, tun_br.mock_calls) def _test_treat_devices_removed_for_dvr(self, device_owner, ip_version=4): self._setup_for_dvr_test() @@ -1752,61 +1657,91 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): else: gateway_ip = '2001:100::1' cidr = '2001:100::0/64' + gateway_mac = 'aa:bb:cc:11:22:33' + int_br = mock.create_autospec(self.agent.int_br) + tun_br = mock.create_autospec(self.agent.tun_br) + int_br.set_db_attribute.return_value = True + int_br.db_get_val.return_value = {} with contextlib.nested( - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_db_attribute', - return_value=True), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'db_get_val', - return_value={})): - with contextlib.nested( - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', - return_value={'gateway_ip': gateway_ip, - 'cidr': cidr, - 'ip_version': ip_version, - 'gateway_mac': 'aa:bb:cc:11:22:33'}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows') - ) as (get_subnet_fn, get_cphost_fn, - get_vif_fn, add_flow_int_fn, - add_flow_tun_fn, delete_flows_tun_fn): - self.agent.port_bound( - self._port, self._net_uuid, 'vxlan', - None, None, self._fixed_ips, - n_const.DEVICE_OWNER_DVR_INTERFACE, - False) - self.agent.port_bound(self._compute_port, - self._net_uuid, 'vxlan', - None, None, - self._compute_fixed_ips, - device_owner, False) - self.assertTrue(add_flow_tun_fn.called) - self.assertTrue(add_flow_int_fn.called) + mock.patch.object( + self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', + return_value={'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_mac': gateway_mac}), + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]), + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port), + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _): + self.agent.port_bound( + self._port, self._net_uuid, 'vxlan', + None, None, self._fixed_ips, + n_const.DEVICE_OWNER_DVR_INTERFACE, + False) + lvid = self.agent.local_vlan_map[self._net_uuid].vlan + self.assertEqual( + self._expected_port_bound(self._port, lvid), + int_br.mock_calls) + expected_on_tun_br = [ + mock.call.provision_local_vlan( + network_type='vxlan', + segmentation_id=None, + lvid=lvid, + distributed=True), + ] + self._expected_install_dvr_process( + port=self._port, + lvid=lvid, + ip_version=ip_version, + gateway_ip=gateway_ip, + gateway_mac=gateway_mac) + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) + int_br.reset_mock() + tun_br.reset_mock() + self.agent.port_bound(self._compute_port, + self._net_uuid, 'vxlan', + None, None, + self._compute_fixed_ips, + device_owner, False) + self.assertEqual( + [ + mock.call.install_dvr_to_src_mac( + network_type='vxlan', + gateway_mac='aa:bb:cc:11:22:33', + dst_mac=self._compute_port.vif_mac, + dst_port=self._compute_port.ofport, + vlan_tag=lvid, + ), + ] + self._expected_port_bound(self._compute_port, lvid), + int_br.mock_calls) + self.assertEqual([], tun_br.mock_calls) + int_br.reset_mock() + tun_br.reset_mock() with contextlib.nested( mock.patch.object(self.agent, 'reclaim_local_vlan'), mock.patch.object(self.agent.plugin_rpc, 'update_device_down', return_value=None), - mock.patch.object(self.agent.dvr_agent.int_br, - 'delete_flows')) as (reclaim_vlan_fn, - update_dev_down_fn, - delete_flows_int_fn): + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): self.agent.treat_devices_removed([self._compute_port.vif_id]) - expected = [ - mock.call( - table=constants.DVR_TO_SRC_MAC, - dl_dst=self._compute_port.vif_mac, - dl_vlan=( - self.agent.local_vlan_map[self._net_uuid].vlan))] - self.assertEqual(expected, delete_flows_int_fn.call_args_list) + int_br.assert_has_calls([ + mock.call.delete_dvr_to_src_mac( + network_type='vxlan', + vlan_tag=lvid, + dst_mac=self._compute_port.vif_mac, + ), + ]) + self.assertEqual([], tun_br.mock_calls) def test_treat_devices_removed_for_dvr_with_compute_ports(self): self._test_treat_devices_removed_for_dvr( @@ -1828,90 +1763,115 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): def test_treat_devices_removed_for_dvr_csnat_port(self, ofport=10): self._setup_for_dvr_test() + gateway_mac = 'aa:bb:cc:11:22:33' + int_br = mock.create_autospec(self.agent.int_br) + tun_br = mock.create_autospec(self.agent.tun_br) + int_br.set_db_attribute.return_value = True + int_br.db_get_val.return_value = {} with contextlib.nested( - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'set_db_attribute', - return_value=True), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'db_get_val', - return_value={})): - with contextlib.nested( - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', - return_value={'gateway_ip': '1.1.1.1', - 'cidr': '1.1.1.0/24', - 'ip_version': 4, - 'gateway_mac': 'aa:bb:cc:11:22:33'}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows') - ) as (get_subnet_fn, get_cphost_fn, - get_vif_fn, add_flow_int_fn, - add_flow_tun_fn, delete_flows_tun_fn): - self.agent.port_bound( - self._port, self._net_uuid, 'vxlan', - None, None, self._fixed_ips, - n_const.DEVICE_OWNER_ROUTER_SNAT, - False) - self.assertTrue(add_flow_int_fn.called) + mock.patch.object( + self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', + return_value={'gateway_ip': '1.1.1.1', + 'cidr': '1.1.1.0/24', + 'ip_version': 4, + 'gateway_mac': gateway_mac}), + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]), + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port), + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _): + self.agent.port_bound( + self._port, self._net_uuid, 'vxlan', + None, None, self._fixed_ips, + n_const.DEVICE_OWNER_ROUTER_SNAT, + False) + lvid = self.agent.local_vlan_map[self._net_uuid].vlan + expected_on_int_br = [ + mock.call.install_dvr_to_src_mac( + network_type='vxlan', + gateway_mac=gateway_mac, + dst_mac=self._port.vif_mac, + dst_port=self._port.ofport, + vlan_tag=lvid, + ), + ] + self._expected_port_bound(self._port, lvid) + self.assertEqual(expected_on_int_br, int_br.mock_calls) + expected_on_tun_br = [ + mock.call.provision_local_vlan( + network_type='vxlan', + lvid=lvid, + segmentation_id=None, + distributed=True, + ), + ] + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) + int_br.reset_mock() + tun_br.reset_mock() with contextlib.nested( mock.patch.object(self.agent, 'reclaim_local_vlan'), mock.patch.object(self.agent.plugin_rpc, 'update_device_down', return_value=None), - mock.patch.object(self.agent.dvr_agent.int_br, - 'delete_flows')) as (reclaim_vlan_fn, - update_dev_down_fn, - delete_flows_int_fn): + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): self.agent.treat_devices_removed([self._port.vif_id]) - self.assertTrue(delete_flows_int_fn.called) + expected_on_int_br = [ + mock.call.delete_dvr_to_src_mac( + network_type='vxlan', + dst_mac=self._port.vif_mac, + vlan_tag=lvid, + ), + ] + self.assertEqual(expected_on_int_br, int_br.mock_calls) + expected_on_tun_br = [] + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) def test_setup_dvr_flows_on_int_br(self): self._setup_for_dvr_test() + int_br = mock.create_autospec(self.agent.int_br) + tun_br = mock.create_autospec(self.agent.tun_br) with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.int_br, - 'remove_all_flows'), - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'), - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, - 'get_dvr_mac_address_list', - return_value=[{'host': 'cn1', - 'mac_address': 'aa:bb:cc:dd:ee:ff'}, - {'host': 'cn2', - 'mac_address': '11:22:33:44:55:66'}])) as \ - (remove_flows_fn, add_int_flow_fn, add_tun_flow_fn, - get_mac_list_fn): + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + mock.patch.object( + self.agent.dvr_agent.plugin_rpc, + 'get_dvr_mac_address_list', + return_value=[{'host': 'cn1', + 'mac_address': 'aa:bb:cc:dd:ee:ff'}, + {'host': 'cn2', + 'mac_address': '11:22:33:44:55:66'}]) + ) as (_, _, _, _, get_mac_list_fn): self.agent.dvr_agent.setup_dvr_flows_on_integ_br() self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) physical_networks = self.agent.dvr_agent.bridge_mappings.keys() ioport = self.agent.dvr_agent.int_ofports[physical_networks[0]] - expected = [ - mock.call(table=constants.CANARY_TABLE, - priority=0, - actions="drop"), - mock.call(table=constants.DVR_TO_SRC_MAC, - priority=1, - actions="drop"), - mock.call(table=constants.DVR_TO_SRC_MAC_VLAN, - priority=1, - actions="drop"), - mock.call(table=constants.LOCAL_SWITCHING, - priority=1, - actions="normal"), - mock.call( - table=constants.LOCAL_SWITCHING, priority=2, - actions="drop", - in_port=ioport)] - self.assertTrue(remove_flows_fn.called) - self.assertEqual(expected, add_int_flow_fn.call_args_list) - self.assertEqual(add_int_flow_fn.call_count, 5) + expected_on_int_br = [ + # setup_dvr_flows_on_integ_br + mock.call.delete_flows(), + mock.call.setup_canary_table(), + mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC, + priority=1), + mock.call.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN, + priority=1), + mock.call.install_normal(table_id=constants.LOCAL_SWITCHING, + priority=1), + mock.call.install_drop(table_id=constants.LOCAL_SWITCHING, + priority=2, + in_port=ioport), + ] + self.assertEqual(expected_on_int_br, int_br.mock_calls) + self.assertEqual([], tun_br.mock_calls) def test_get_dvr_mac_address(self): self._setup_for_dvr_test() @@ -1928,17 +1888,18 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): def test_get_dvr_mac_address_exception(self): self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None + int_br = mock.create_autospec(self.agent.int_br) with contextlib.nested( mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', side_effect=oslo_messaging.RemoteError), - mock.patch.object(self.agent.dvr_agent.int_br, - 'add_flow')) as (gd_mac, add_int_flow_fn): - + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + ) as (gd_mac, _, _): self.agent.dvr_agent.get_dvr_mac_address() self.assertIsNone(self.agent.dvr_agent.dvr_mac_address) self.assertFalse(self.agent.dvr_agent.in_distributed_mode()) - self.assertEqual(add_int_flow_fn.call_count, 1) + self.assertEqual([mock.call.install_normal()], int_br.mock_calls) def test_get_dvr_mac_address_retried(self): valid_entry = {'host': 'cn1', 'mac_address': 'aa:22:33:44:55:66'} @@ -1962,12 +1923,15 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): # Raise a timeout every time until we give up, currently 5 tries self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None + int_br = mock.create_autospec(self.agent.int_br) with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_dvr_mac_address_by_host', - side_effect=raise_timeout), - mock.patch.object(utils, "execute"), - ) as (rpc_mock, execute_mock): + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_dvr_mac_address_by_host', + side_effect=raise_timeout), + mock.patch.object(utils, "execute"), + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + ) as (rpc_mock, execute_mock, _, _): self.agent.dvr_agent.get_dvr_mac_address() self.assertIsNone(self.agent.dvr_agent.dvr_mac_address) self.assertFalse(self.agent.dvr_agent.in_distributed_mode()) @@ -1978,69 +1942,77 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): self._setup_for_dvr_test() newhost = 'cn2' newmac = 'aa:bb:cc:dd:ee:ff' - int_ofport = self.agent.dvr_agent.int_ofports['physeth1'] - patch_int_ofport = self.agent.dvr_agent.patch_int_ofport - patch_tun_ofport = self.agent.dvr_agent.patch_tun_ofport + int_br = mock.create_autospec(self.agent.int_br) + tun_br = mock.create_autospec(self.agent.tun_br) + phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) + physical_network = 'physeth1' with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'), - mock.patch.object(self.agent.dvr_agent.phys_brs['physeth1'], - 'add_flow') - ) as (add_flow_fn, add_flow_tn_fn, del_flows_fn, add_flow_phys_fn): + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.dict(self.agent.phys_brs, + {physical_network: phys_br}), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + mock.patch.dict(self.agent.dvr_agent.phys_brs, + {physical_network: phys_br}), + ): self.agent.dvr_agent.\ dvr_mac_address_update( dvr_macs=[{'host': newhost, 'mac_address': newmac}]) - expected = [ - mock.call(table=constants.LOCAL_SWITCHING, - priority=4, - in_port=int_ofport, - dl_src=newmac, - actions="resubmit(,%s)" % - constants.DVR_TO_SRC_MAC_VLAN), - mock.call(table=constants.LOCAL_SWITCHING, - priority=2, - in_port=patch_tun_ofport, - dl_src=newmac, - actions="resubmit(,%s)" % - constants.DVR_TO_SRC_MAC)] - self.assertEqual(expected, add_flow_fn.call_args_list) - add_flow_phys_fn.assert_called_with( - table=constants.DVR_NOT_LEARN_VLAN, - priority=2, - dl_src=newmac, - actions="output:%s" % - self.agent.dvr_agent.phys_ofports['physeth1']) - add_flow_tn_fn.assert_called_with(table=constants.DVR_NOT_LEARN, - priority=1, - dl_src=newmac, - actions="output:%s" - % patch_int_ofport) - self.assertFalse(del_flows_fn.called) + expected_on_int_br = [ + mock.call.add_dvr_mac_vlan( + mac=newmac, + port=self.agent.int_ofports[physical_network]), + mock.call.add_dvr_mac_tun( + mac=newmac, + port=self.agent.patch_tun_ofport), + ] + expected_on_tun_br = [ + mock.call.add_dvr_mac_tun( + mac=newmac, + port=self.agent.patch_int_ofport), + ] + expected_on_phys_br = [ + mock.call.add_dvr_mac_vlan( + mac=newmac, + port=self.agent.phys_ofports[physical_network]), + ] + self.assertEqual(expected_on_int_br, int_br.mock_calls) + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) + self.assertEqual(expected_on_phys_br, phys_br.mock_calls) + int_br.reset_mock() + tun_br.reset_mock() + phys_br.reset_mock() with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.int_br, 'add_flow'), - mock.patch.object(self.agent.dvr_agent.tun_br, 'delete_flows'), - mock.patch.object(self.agent.dvr_agent.int_br, 'delete_flows'), - mock.patch.object(self.agent.dvr_agent.phys_brs['physeth1'], - 'delete_flows'), - ) as (add_flow_fn, del_flows_tn_fn, del_flows_fn, del_flows_phys_fn): + mock.patch.object(self.agent, 'int_br', new=int_br), + mock.patch.object(self.agent, 'tun_br', new=tun_br), + mock.patch.dict(self.agent.phys_brs, + {physical_network: phys_br}), + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), + mock.patch.dict(self.agent.dvr_agent.phys_brs, + {physical_network: phys_br}), + ): self.agent.dvr_agent.dvr_mac_address_update(dvr_macs=[]) - ioport = self.agent.dvr_agent.int_ofports['physeth1'] - expected = [ - mock.call(table=constants.LOCAL_SWITCHING, - in_port=ioport, - dl_src=newmac), - mock.call(table=constants.LOCAL_SWITCHING, - in_port=patch_tun_ofport, - dl_src=newmac)] - self.assertEqual(expected, del_flows_fn.call_args_list) - del_flows_phys_fn.asert_called_with( - table=constants.DVR_NOT_LEARN_VLAN, - dl_src=newmac) - del_flows_tn_fn.assert_called_with(table=constants.DVR_NOT_LEARN, - dl_src=newmac) - self.assertFalse(add_flow_fn.called) + expected_on_int_br = [ + mock.call.remove_dvr_mac_vlan( + mac=newmac), + mock.call.remove_dvr_mac_tun( + mac=newmac, + port=self.agent.patch_tun_ofport), + ] + expected_on_tun_br = [ + mock.call.remove_dvr_mac_tun( + mac=newmac), + ] + expected_on_phys_br = [ + mock.call.remove_dvr_mac_vlan( + mac=newmac), + ] + self.assertEqual(expected_on_int_br, int_br.mock_calls) + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) + self.assertEqual(expected_on_phys_br, phys_br.mock_calls) def test_ovs_restart(self): self._setup_for_dvr_test() @@ -2050,11 +2022,13 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): 'setup_dvr_flows_on_phys_br', 'setup_dvr_mac_flows_on_all_brs') reset_mocks = [mock.patch.object(self.agent.dvr_agent, method).start() for method in reset_methods] + tun_br = mock.create_autospec(self.agent.tun_br) with contextlib.nested( mock.patch.object(self.agent, 'check_ovs_status', return_value=constants.OVS_RESTARTED), mock.patch.object(self.agent, '_agent_has_updates', - side_effect=TypeError('loop exit')) + side_effect=TypeError('loop exit')), + mock.patch.object(self.agent, 'tun_br', new=tun_br), ): # block RPC calls and bridge calls self.agent.setup_physical_bridges = mock.Mock() @@ -2066,3 +2040,8 @@ class TestOvsDvrNeutronAgent(base.BaseTestCase): except TypeError: pass self.assertTrue(all([x.called for x in reset_mocks])) + + +class TestOvsDvrNeutronAgentOFCtl(TestOvsDvrNeutronAgent, + ovs_test_base.OVSOFCtlTestBase): + pass diff --git a/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py b/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py index 7773d5a0ea7..a3f8600ed99 100644 --- a/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py +++ b/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py @@ -24,9 +24,8 @@ from oslo_log import log from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib from neutron.plugins.common import constants as p_const -from neutron.plugins.openvswitch.agent import ovs_neutron_agent from neutron.plugins.openvswitch.common import constants -from neutron.tests import base +from neutron.tests.unit.plugins.openvswitch.agent import ovs_test_base # Useful global dummy variables. @@ -40,11 +39,6 @@ OFPORT_NUM = 1 VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM, VIF_ID, VIF_MAC, 'switch') VIF_PORTS = {VIF_ID: VIF_PORT} -LVM = ovs_neutron_agent.LocalVLANMapping(LV_ID, 'gre', None, LS_ID, VIF_PORTS) -LVM_FLAT = ovs_neutron_agent.LocalVLANMapping( - LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS) -LVM_VLAN = ovs_neutron_agent.LocalVLANMapping( - LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS) FIXED_IPS = [{'subnet_id': 'my-subnet-uuid', 'ip_address': '1.1.1.1'}] VM_DEVICE_OWNER = "compute:None" @@ -66,7 +60,7 @@ class DummyVlanBinding(object): self.vlan_id = vlan_id -class TunnelTest(base.BaseTestCase): +class TunnelTest(object): USE_VETH_INTERCONNECTION = False VETH_MTU = None @@ -86,21 +80,41 @@ class TunnelTest(base.BaseTestCase): self.MAP_TUN_INT_OFPORT = 33333 self.MAP_TUN_PHY_OFPORT = 44444 + self.LVM = self.mod_agent.LocalVLANMapping( + LV_ID, 'gre', None, LS_ID, VIF_PORTS) + self.LVM_FLAT = self.mod_agent.LocalVLANMapping( + LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS) + self.LVM_VLAN = self.mod_agent.LocalVLANMapping( + LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS) + self.inta = mock.Mock() self.intb = mock.Mock() - self.ovs_bridges = {self.INT_BRIDGE: mock.Mock(), - self.TUN_BRIDGE: mock.Mock(), - self.MAP_TUN_BRIDGE: mock.Mock(), - } + self.ovs_bridges = { + self.INT_BRIDGE: mock.create_autospec( + self.br_int_cls('br-int')), + self.TUN_BRIDGE: mock.create_autospec( + self.br_tun_cls('br-tun')), + self.MAP_TUN_BRIDGE: mock.create_autospec( + self.br_phys_cls('br-phys')), + } self.ovs_int_ofports = { 'patch-tun': self.TUN_OFPORT, 'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT } - self.mock_bridge = mock.patch.object(ovs_lib, 'OVSBridge').start() - self.mock_bridge.side_effect = (lambda br_name: - self.ovs_bridges[br_name]) + def lookup_br(br_name, *args, **kwargs): + return self.ovs_bridges[br_name] + + self.mock_int_bridge_cls = mock.patch(self._BR_INT_CLASS, + autospec=True).start() + self.mock_int_bridge_cls.side_effect = lookup_br + self.mock_phys_bridge_cls = mock.patch(self._BR_PHYS_CLASS, + autospec=True).start() + self.mock_phys_bridge_cls.side_effect = lookup_br + self.mock_tun_bridge_cls = mock.patch(self._BR_TUN_CLASS, + autospec=True).start() + self.mock_tun_bridge_cls.side_effect = lookup_br self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE] self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT @@ -139,10 +153,14 @@ class TunnelTest(base.BaseTestCase): self._define_expected_calls() - def _define_expected_calls(self): - self.mock_bridge_expected = [ + def _define_expected_calls(self, arp_responder=False): + self.mock_int_bridge_cls_expected = [ mock.call(self.INT_BRIDGE), + ] + self.mock_phys_bridge_cls_expected = [ mock.call(self.MAP_TUN_BRIDGE), + ] + self.mock_tun_bridge_cls_expected = [ mock.call(self.TUN_BRIDGE), ] @@ -150,20 +168,17 @@ class TunnelTest(base.BaseTestCase): self.mock_int_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), + mock.call.setup_controllers(mock.ANY), mock.call.delete_port('patch-tun'), - mock.call.remove_all_flows(), - mock.call.add_flow(priority=1, actions='normal'), - mock.call.add_flow(priority=0, table=constants.CANARY_TABLE, - actions='drop'), + mock.call.setup_default_table(), ] self.mock_map_tun_bridge_expected = [ - mock.call.remove_all_flows(), - mock.call.add_flow(priority=1, actions='normal'), + mock.call.setup_controllers(mock.ANY), + mock.call.setup_default_table(), mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE, - constants.NONEXISTENT_PEER), - ] + constants.NONEXISTENT_PEER), ] self.mock_int_bridge_expected += [ mock.call.delete_port('int-%s' % self.MAP_TUN_BRIDGE), mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE, @@ -171,17 +186,13 @@ class TunnelTest(base.BaseTestCase): ] self.mock_int_bridge_expected += [ - mock.call.add_flow(priority=2, - in_port=self.MAP_TUN_INT_OFPORT, - actions='drop'), + mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT), mock.call.set_db_attribute( 'Interface', 'int-%s' % self.MAP_TUN_BRIDGE, 'options:peer', 'phy-%s' % self.MAP_TUN_BRIDGE), ] self.mock_map_tun_bridge_expected += [ - mock.call.add_flow(priority=2, - in_port=self.MAP_TUN_PHY_OFPORT, - actions='drop'), + mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT), mock.call.set_db_attribute( 'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE, 'options:peer', 'int-%s' % self.MAP_TUN_BRIDGE), @@ -189,6 +200,7 @@ class TunnelTest(base.BaseTestCase): self.mock_tun_bridge_expected = [ mock.call.reset_bridge(secure_mode=True), + mock.call.setup_controllers(mock.ANY), mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ @@ -199,48 +211,8 @@ class TunnelTest(base.BaseTestCase): ] self.mock_tun_bridge_expected += [ - mock.call.remove_all_flows(), - mock.call.add_flow(priority=1, - actions="resubmit(,%s)" % - constants.PATCH_LV_TO_TUN, - in_port=self.INT_OFPORT), - mock.call.add_flow(priority=0, actions="drop"), - mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN, - dl_dst=UCAST_MAC, - actions="resubmit(,%s)" % - constants.UCAST_TO_TUN), - mock.call.add_flow(priority=0, table=constants.PATCH_LV_TO_TUN, - dl_dst=BCAST_MAC, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN), - ] - for tunnel_type in constants.TUNNEL_NETWORK_TYPES: - self.mock_tun_bridge_expected.append( - mock.call.add_flow( - table=constants.TUN_TABLE[tunnel_type], - priority=0, - actions="drop")) - learned_flow = ("table=%s," - "priority=1," - "hard_timeout=300," - "NXM_OF_VLAN_TCI[0..11]," - "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," - "load:0->NXM_OF_VLAN_TCI[]," - "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," - "output:NXM_OF_IN_PORT[]" % - constants.UCAST_TO_TUN) - self.mock_tun_bridge_expected += [ - mock.call.add_flow(table=constants.LEARN_FROM_TUN, - priority=1, - actions="learn(%s),output:%s" % - (learned_flow, self.INT_OFPORT)), - mock.call.add_flow(table=constants.UCAST_TO_TUN, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN), - mock.call.add_flow(table=constants.FLOOD_TO_TUN, - priority=0, - actions="drop") + mock.call.delete_flows(), + mock.call.setup_default_table(self.INT_OFPORT, arp_responder), ] self.device_exists_expected = [] @@ -255,6 +227,12 @@ class TunnelTest(base.BaseTestCase): self.execute_expected = [] def _build_agent(self, **kwargs): + bridge_classes = { + 'br_int': self.mock_int_bridge_cls, + 'br_phys': self.mock_phys_bridge_cls, + 'br_tun': self.mock_tun_bridge_cls, + } + kwargs.setdefault('bridge_classes', bridge_classes) kwargs.setdefault('integ_br', self.INT_BRIDGE) kwargs.setdefault('tun_br', self.TUN_BRIDGE) kwargs.setdefault('local_ip', '10.0.0.1') @@ -264,14 +242,19 @@ class TunnelTest(base.BaseTestCase): kwargs.setdefault('veth_mtu', self.VETH_MTU) kwargs.setdefault('use_veth_interconnection', self.USE_VETH_INTERCONNECTION) - return ovs_neutron_agent.OVSNeutronAgent(**kwargs) + return self.mod_agent.OVSNeutronAgent(**kwargs) def _verify_mock_call(self, mock_obj, expected): mock_obj.assert_has_calls(expected) self.assertEqual(len(mock_obj.mock_calls), len(expected)) def _verify_mock_calls(self): - self._verify_mock_call(self.mock_bridge, self.mock_bridge_expected) + self._verify_mock_call(self.mock_int_bridge_cls, + self.mock_int_bridge_cls_expected) + self._verify_mock_call(self.mock_tun_bridge_cls, + self.mock_tun_bridge_cls_expected) + self._verify_mock_call(self.mock_phys_bridge_cls, + self.mock_phys_bridge_cls_expected) self._verify_mock_call(self.mock_int_bridge, self.mock_int_bridge_expected) self._verify_mock_call(self.mock_map_tun_bridge, @@ -296,20 +279,7 @@ class TunnelTest(base.BaseTestCase): # The next two tests use l2_pop flag to test ARP responder def test_construct_with_arp_responder(self): self._build_agent(l2_population=True, arp_responder=True) - self.mock_tun_bridge_expected.insert( - 5, mock.call.add_flow(table=constants.PATCH_LV_TO_TUN, - priority=1, - proto="arp", - dl_dst="ff:ff:ff:ff:ff:ff", - actions="resubmit(,%s)" % - constants.ARP_RESPONDER) - ) - self.mock_tun_bridge_expected.insert( - 12, mock.call.add_flow(table=constants.ARP_RESPONDER, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN) - ) + self._define_expected_calls(True) self._verify_mock_calls() def test_construct_without_arp_responder(self): @@ -321,18 +291,13 @@ class TunnelTest(base.BaseTestCase): self._verify_mock_calls() def test_provision_local_vlan(self): - ofports = ','.join(TUN_OFPORTS[p_const.TYPE_GRE].values()) + ofports = TUN_OFPORTS[p_const.TYPE_GRE].values() self.mock_tun_bridge_expected += [ - mock.call.mod_flow(table=constants.FLOOD_TO_TUN, - dl_vlan=LV_ID, - actions="strip_vlan," - "set_tunnel:%s,output:%s" % - (LS_ID, ofports)), - mock.call.add_flow(table=constants.TUN_TABLE['gre'], - priority=1, - tun_id=LS_ID, - actions="mod_vlan_vid:%s,resubmit(,%s)" % - (LV_ID, constants.LEARN_FROM_TUN)), + mock.call.install_flood_to_tun(LV_ID, LS_ID, ofports), + mock.call.provision_local_vlan( + network_type=p_const.TYPE_GRE, + lvid=LV_ID, + segmentation_id=LS_ID), ] a = self._build_agent() @@ -342,15 +307,17 @@ class TunnelTest(base.BaseTestCase): self._verify_mock_calls() def test_provision_local_vlan_flat(self): - action_string = 'strip_vlan,normal' self.mock_map_tun_bridge_expected.append( - mock.call.add_flow(priority=4, in_port=self.MAP_TUN_PHY_OFPORT, - dl_vlan=LV_ID, actions=action_string)) - - action_string = 'mod_vlan_vid:%s,normal' % LV_ID + mock.call.provision_local_vlan( + port=self.MAP_TUN_PHY_OFPORT, + lvid=LV_ID, + segmentation_id=None, + distributed=False)) self.mock_int_bridge_expected.append( - mock.call.add_flow(priority=3, in_port=self.INT_OFPORT, - dl_vlan=65535, actions=action_string)) + mock.call.provision_local_vlan( + port=self.INT_OFPORT, + lvid=LV_ID, + segmentation_id=None)) a = self._build_agent() a.available_local_vlans = set([LV_ID]) @@ -366,16 +333,17 @@ class TunnelTest(base.BaseTestCase): self._verify_mock_calls() def test_provision_local_vlan_vlan(self): - action_string = 'mod_vlan_vid:%s,normal' % LS_ID self.mock_map_tun_bridge_expected.append( - mock.call.add_flow(priority=4, in_port=self.MAP_TUN_PHY_OFPORT, - dl_vlan=LV_ID, actions=action_string)) - - action_string = 'mod_vlan_vid:%s,normal' % LV_ID + mock.call.provision_local_vlan( + port=self.MAP_TUN_PHY_OFPORT, + lvid=LV_ID, + segmentation_id=LS_ID, + distributed=False)) self.mock_int_bridge_expected.append( - mock.call.add_flow(priority=3, in_port=self.INT_OFPORT, - dl_vlan=LS_ID, actions=action_string)) - + mock.call.provision_local_vlan( + port=self.INT_OFPORT, + lvid=LV_ID, + segmentation_id=LS_ID)) a = self._build_agent() a.available_local_vlans = set([LV_ID]) a.phys_brs['net1'] = self.mock_map_tun_bridge @@ -391,54 +359,58 @@ class TunnelTest(base.BaseTestCase): def test_reclaim_local_vlan(self): self.mock_tun_bridge_expected += [ - mock.call.delete_flows( - table=constants.TUN_TABLE['gre'], tun_id=LS_ID), - mock.call.delete_flows(dl_vlan=LVM.vlan) + mock.call.reclaim_local_vlan(network_type='gre', + segmentation_id=LS_ID), + mock.call.delete_flood_to_tun(LV_ID), + mock.call.delete_unicast_to_tun(LV_ID, None), + mock.call.delete_arp_responder(LV_ID, None), ] a = self._build_agent() a.available_local_vlans = set() - a.local_vlan_map[NET_UUID] = LVM + a.local_vlan_map[NET_UUID] = self.LVM a.reclaim_local_vlan(NET_UUID) - self.assertIn(LVM.vlan, a.available_local_vlans) + self.assertIn(self.LVM.vlan, a.available_local_vlans) self._verify_mock_calls() def test_reclaim_local_vlan_flat(self): self.mock_map_tun_bridge_expected.append( - mock.call.delete_flows( - in_port=self.MAP_TUN_PHY_OFPORT, dl_vlan=LVM_FLAT.vlan)) + mock.call.reclaim_local_vlan( + port=self.MAP_TUN_PHY_OFPORT, + lvid=self.LVM_FLAT.vlan)) self.mock_int_bridge_expected.append( - mock.call.delete_flows( - dl_vlan=65535, in_port=self.INT_OFPORT)) - + mock.call.reclaim_local_vlan( + port=self.INT_OFPORT, + segmentation_id=None)) a = self._build_agent() a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.available_local_vlans = set() - a.local_vlan_map[NET_UUID] = LVM_FLAT + a.local_vlan_map[NET_UUID] = self.LVM_FLAT a.reclaim_local_vlan(NET_UUID) - self.assertIn(LVM_FLAT.vlan, a.available_local_vlans) + self.assertIn(self.LVM_FLAT.vlan, a.available_local_vlans) self._verify_mock_calls() def test_reclaim_local_vlan_vlan(self): self.mock_map_tun_bridge_expected.append( - mock.call.delete_flows( - in_port=self.MAP_TUN_PHY_OFPORT, dl_vlan=LVM_VLAN.vlan)) + mock.call.reclaim_local_vlan( + port=self.MAP_TUN_PHY_OFPORT, + lvid=self.LVM_VLAN.vlan)) self.mock_int_bridge_expected.append( - mock.call.delete_flows( - dl_vlan=LS_ID, in_port=self.INT_OFPORT)) - + mock.call.reclaim_local_vlan( + port=self.INT_OFPORT, + segmentation_id=LS_ID)) a = self._build_agent() a.phys_brs['net1'] = self.mock_map_tun_bridge a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT a.int_ofports['net1'] = self.INT_OFPORT a.available_local_vlans = set() - a.local_vlan_map[NET_UUID] = LVM_VLAN + a.local_vlan_map[NET_UUID] = self.LVM_VLAN a.reclaim_local_vlan(NET_UUID) - self.assertIn(LVM_VLAN.vlan, a.available_local_vlans) + self.assertIn(self.LVM_VLAN.vlan, a.available_local_vlans) self._verify_mock_calls() def test_port_bound(self): @@ -453,17 +425,18 @@ class TunnelTest(base.BaseTestCase): vlan_mapping)] a = self._build_agent() - a.local_vlan_map[NET_UUID] = LVM + a.local_vlan_map[NET_UUID] = self.LVM a.local_dvr_map = {} + self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = {} a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID, FIXED_IPS, VM_DEVICE_OWNER, False) self._verify_mock_calls() def test_port_unbound(self): - with mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'reclaim_local_vlan') as reclaim_local_vlan: a = self._build_agent() - a.local_vlan_map[NET_UUID] = LVM + a.local_vlan_map[NET_UUID] = self.LVM a.port_unbound(VIF_ID, NET_UUID) reclaim_local_vlan.assert_called_once_with(NET_UUID) @@ -474,14 +447,14 @@ class TunnelTest(base.BaseTestCase): mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag'), mock.call.set_db_attribute( 'Port', VIF_PORT.port_name, - 'tag', ovs_neutron_agent.DEAD_VLAN_TAG), - mock.call.add_flow(priority=2, in_port=VIF_PORT.ofport, - actions='drop') + 'tag', self.mod_agent.DEAD_VLAN_TAG), + mock.call.drop_port(in_port=VIF_PORT.ofport), ] a = self._build_agent() a.available_local_vlans = set([LV_ID]) - a.local_vlan_map[NET_UUID] = LVM + a.local_vlan_map[NET_UUID] = self.LVM + self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = mock.Mock() a.port_dead(VIF_PORT) self._verify_mock_calls() @@ -491,8 +464,7 @@ class TunnelTest(base.BaseTestCase): self.mock_tun_bridge_expected += [ mock.call.add_tunnel_port('gre-0a000a01', '10.0.10.1', '10.0.0.1', 'gre', 4789, True), - mock.call.add_flow(priority=1, in_port=tunnel_port, - actions='resubmit(,3)') + mock.call.setup_tunnel_port('gre', tunnel_port), ] a = self._build_agent() @@ -517,20 +489,22 @@ class TunnelTest(base.BaseTestCase): 'removed': set(['tap0'])} self.mock_int_bridge_expected += [ - mock.call.dump_flows_for_table(constants.CANARY_TABLE), - mock.call.dump_flows_for_table(constants.CANARY_TABLE) + mock.call.check_canary_table(), + mock.call.check_canary_table() ] + self.ovs_bridges[self.INT_BRIDGE].check_canary_table.return_value = \ + constants.OVS_NORMAL with contextlib.nested( mock.patch.object(log.KeywordArgumentAdapter, 'exception'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'scan_ports'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'process_network_ports'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'tunnel_sync'), mock.patch.object(time, 'sleep'), - mock.patch.object(ovs_neutron_agent.OVSNeutronAgent, + mock.patch.object(self.mod_agent.OVSNeutronAgent, 'update_stale_ofport_rules') ) as (log_exception, scan_ports, process_network_ports, ts, time_sleep, update_stale): @@ -569,29 +543,35 @@ class TunnelTest(base.BaseTestCase): self._verify_mock_calls() +class TunnelTestOFCtl(TunnelTest, ovs_test_base.OVSOFCtlTestBase): + pass + + class TunnelTestUseVethInterco(TunnelTest): USE_VETH_INTERCONNECTION = True - def _define_expected_calls(self): - self.mock_bridge_expected = [ + def _define_expected_calls(self, arp_responder=False): + self.mock_int_bridge_cls_expected = [ mock.call(self.INT_BRIDGE), + ] + self.mock_phys_bridge_cls_expected = [ mock.call(self.MAP_TUN_BRIDGE), + ] + self.mock_tun_bridge_cls_expected = [ mock.call(self.TUN_BRIDGE), ] self.mock_int_bridge_expected = [ mock.call.create(), mock.call.set_secure_mode(), + mock.call.setup_controllers(mock.ANY), mock.call.delete_port('patch-tun'), - mock.call.remove_all_flows(), - mock.call.add_flow(priority=1, actions='normal'), - mock.call.add_flow(table=constants.CANARY_TABLE, priority=0, - actions="drop") + mock.call.setup_default_table(), ] self.mock_map_tun_bridge_expected = [ - mock.call.remove_all_flows(), - mock.call.add_flow(priority=1, actions='normal'), + mock.call.setup_controllers(mock.ANY), + mock.call.setup_default_table(), mock.call.delete_port('phy-%s' % self.MAP_TUN_BRIDGE), mock.call.add_port(self.intb), ] @@ -601,18 +581,15 @@ class TunnelTestUseVethInterco(TunnelTest): ] self.mock_int_bridge_expected += [ - mock.call.add_flow(priority=2, - in_port=self.MAP_TUN_INT_OFPORT, - actions='drop') + mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT), ] self.mock_map_tun_bridge_expected += [ - mock.call.add_flow(priority=2, - in_port=self.MAP_TUN_PHY_OFPORT, - actions='drop') + mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT), ] self.mock_tun_bridge_expected = [ mock.call.reset_bridge(secure_mode=True), + mock.call.setup_controllers(mock.ANY), mock.call.add_patch_port('patch-int', 'patch-tun'), ] self.mock_int_bridge_expected += [ @@ -622,50 +599,8 @@ class TunnelTestUseVethInterco(TunnelTest): mock.call.get_vif_ports(), ] self.mock_tun_bridge_expected += [ - mock.call.remove_all_flows(), - mock.call.add_flow(priority=1, - in_port=self.INT_OFPORT, - actions="resubmit(,%s)" % - constants.PATCH_LV_TO_TUN), - mock.call.add_flow(priority=0, actions='drop'), - mock.call.add_flow(priority=0, - table=constants.PATCH_LV_TO_TUN, - dl_dst=UCAST_MAC, - actions="resubmit(,%s)" % - constants.UCAST_TO_TUN), - mock.call.add_flow(priority=0, - table=constants.PATCH_LV_TO_TUN, - dl_dst=BCAST_MAC, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN), - ] - for tunnel_type in constants.TUNNEL_NETWORK_TYPES: - self.mock_tun_bridge_expected.append( - mock.call.add_flow( - table=constants.TUN_TABLE[tunnel_type], - priority=0, - actions="drop")) - learned_flow = ("table=%s," - "priority=1," - "hard_timeout=300," - "NXM_OF_VLAN_TCI[0..11]," - "NXM_OF_ETH_DST[]=NXM_OF_ETH_SRC[]," - "load:0->NXM_OF_VLAN_TCI[]," - "load:NXM_NX_TUN_ID[]->NXM_NX_TUN_ID[]," - "output:NXM_OF_IN_PORT[]" % - constants.UCAST_TO_TUN) - self.mock_tun_bridge_expected += [ - mock.call.add_flow(table=constants.LEARN_FROM_TUN, - priority=1, - actions="learn(%s),output:%s" % - (learned_flow, self.INT_OFPORT)), - mock.call.add_flow(table=constants.UCAST_TO_TUN, - priority=0, - actions="resubmit(,%s)" % - constants.FLOOD_TO_TUN), - mock.call.add_flow(table=constants.FLOOD_TO_TUN, - priority=0, - actions="drop") + mock.call.delete_flows(), + mock.call.setup_default_table(self.INT_OFPORT, arp_responder), ] self.device_exists_expected = [ @@ -690,10 +625,20 @@ class TunnelTestUseVethInterco(TunnelTest): '--timeout=10'])] +class TunnelTestUseVethIntercoOFCtl(TunnelTestUseVethInterco, + ovs_test_base.OVSOFCtlTestBase): + pass + + class TunnelTestWithMTU(TunnelTestUseVethInterco): VETH_MTU = 1500 - def _define_expected_calls(self): - super(TunnelTestWithMTU, self)._define_expected_calls() + def _define_expected_calls(self, arp_responder=False): + super(TunnelTestWithMTU, self)._define_expected_calls(arp_responder) self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU)) self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU)) + + +class TunnelTestWithMTUOFCtl(TunnelTestWithMTU, + ovs_test_base.OVSOFCtlTestBase): + pass From 5f28762ae2a96b8062628d0cac81f5da94c6ed34 Mon Sep 17 00:00:00 2001 From: John Schwarz Date: Thu, 16 Apr 2015 12:01:26 +0300 Subject: [PATCH 044/292] Add callback prior to deleting a subnet When using LBaaS and trying to delete a subnet, neutron has no way of knowing if the subnet is associated to some pool. As a result, the subnet is deleted but the pool remains associated to the (now nonexistent) subnet_id. This patch lays the ground-work for adding a check in LBaaS' side to prevent such cases. Related-Bug: #1413817 Change-Id: I3d5e231b67c72ffd919c92d65b57da56c63e053c --- neutron/callbacks/resources.py | 2 ++ neutron/common/exceptions.py | 8 ++++- neutron/db/db_base_plugin_v2.py | 17 ++++++++++ neutron/plugins/ml2/plugin.py | 2 ++ .../plugins/opencontrail/contrail_plugin.py | 2 ++ .../tests/unit/db/test_db_base_plugin_v2.py | 31 +++++++++++++++++++ 6 files changed, 61 insertions(+), 1 deletion(-) diff --git a/neutron/callbacks/resources.py b/neutron/callbacks/resources.py index f7831b8efa5..d796faf4960 100644 --- a/neutron/callbacks/resources.py +++ b/neutron/callbacks/resources.py @@ -16,6 +16,7 @@ ROUTER_GATEWAY = 'router_gateway' ROUTER_INTERFACE = 'router_interface' SECURITY_GROUP = 'security_group' SECURITY_GROUP_RULE = 'security_group_rule' +SUBNET = 'subnet' VALID = ( PORT, @@ -24,4 +25,5 @@ VALID = ( ROUTER_INTERFACE, SECURITY_GROUP, SECURITY_GROUP_RULE, + SUBNET, ) diff --git a/neutron/common/exceptions.py b/neutron/common/exceptions.py index 227cf082150..aa164b9ac1d 100644 --- a/neutron/common/exceptions.py +++ b/neutron/common/exceptions.py @@ -119,7 +119,13 @@ class NetworkInUse(InUse): class SubnetInUse(InUse): message = _("Unable to complete operation on subnet %(subnet_id)s. " - "One or more ports have an IP allocation from this subnet.") + "%(reason)s") + + def __init__(self, **kwargs): + if 'reason' not in kwargs: + kwargs['reason'] = _("One or more ports have an IP allocation " + "from this subnet.") + super(SubnetInUse, self).__init__(**kwargs) class PortInUse(InUse): diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 29edc707d9c..e28aa3fde65 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -25,6 +25,10 @@ from sqlalchemy import orm from sqlalchemy.orm import exc from neutron.api.v2 import attributes +from neutron.callbacks import events +from neutron.callbacks import exceptions +from neutron.callbacks import registry +from neutron.callbacks import resources from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils @@ -56,6 +60,15 @@ LOG = logging.getLogger(__name__) AUTO_DELETE_PORT_OWNERS = [constants.DEVICE_OWNER_DHCP] +def _check_subnet_not_used(context, subnet_id): + try: + kwargs = {'context': context, 'subnet_id': subnet_id} + registry.notify( + resources.SUBNET, events.BEFORE_DELETE, None, **kwargs) + except exceptions.CallbackFailure as e: + raise n_exc.SubnetInUse(subnet_id=subnet_id, reason=e) + + class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, common_db_mixin.CommonDbMixin): """V2 Neutron plugin interface implementation using SQLAlchemy models. @@ -1572,6 +1585,10 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, def delete_subnet(self, context, id): with context.session.begin(subtransactions=True): subnet = self._get_subnet(context, id) + + # Make sure the subnet isn't used by other resources + _check_subnet_not_used(context, id) + # Delete all network owned ports qry_network_ports = ( context.session.query(models_v2.IPAllocation). diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 2f209db7723..bf184f1ddc4 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -900,6 +900,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, raise os_db_exception.RetryRequest( exc.SubnetInUse(subnet_id=id)) + db_base_plugin_v2._check_subnet_not_used(context, id) + # If allocated is None, then all the IPAllocation were # correctly deleted during the previous pass. if not allocated: diff --git a/neutron/plugins/opencontrail/contrail_plugin.py b/neutron/plugins/opencontrail/contrail_plugin.py index 50baba60867..caf97a233ba 100644 --- a/neutron/plugins/opencontrail/contrail_plugin.py +++ b/neutron/plugins/opencontrail/contrail_plugin.py @@ -20,6 +20,7 @@ import requests from neutron.api.v2 import attributes as attr from neutron.common import exceptions as exc +from neutron.db import db_base_plugin_v2 from neutron.db import portbindings_base from neutron.extensions import external_net from neutron.extensions import portbindings @@ -345,6 +346,7 @@ class NeutronPluginContrailCoreV2(neutron_plugin_base_v2.NeutronPluginBaseV2, belonging to the specified tenant. """ + db_base_plugin_v2._check_subnet_not_used(context, subnet_id) self._delete_resource('subnet', context, subnet_id) def get_subnets(self, context, filters=None, fields=None): diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 41624788472..1b3d7e2f57d 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -31,6 +31,8 @@ from neutron.api import api_common from neutron.api import extensions from neutron.api.v2 import attributes from neutron.api.v2 import router +from neutron.callbacks import exceptions +from neutron.callbacks import registry from neutron.common import constants from neutron.common import exceptions as n_exc from neutron.common import ipv6_utils @@ -4609,6 +4611,35 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): res = req.get_response(self.api) self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) + def test_delete_subnet_with_callback(self): + with contextlib.nested( + self.subnet(), + mock.patch.object(registry, 'notify')) as (subnet, notify): + + errors = [ + exceptions.NotificationError( + 'fake_id', n_exc.NeutronException()), + ] + notify.side_effect = [ + exceptions.CallbackFailure(errors=errors), None + ] + + # Make sure the delete request fails + delete_request = self.new_delete_request('subnets', + subnet['subnet']['id']) + delete_response = delete_request.get_response(self.api) + + self.assertTrue('NeutronError' in delete_response.json) + self.assertEqual('SubnetInUse', + delete_response.json['NeutronError']['type']) + + # Make sure the subnet wasn't deleted + list_request = self.new_list_request( + 'subnets', params="id=%s" % subnet['subnet']['id']) + list_response = list_request.get_response(self.api) + self.assertEqual(subnet['subnet']['id'], + list_response.json['subnets'][0]['id']) + def _helper_test_validate_subnet(self, option, exception): cfg.CONF.set_override(option, 0) with self.network() as network: From 3682e3391f188845d0c7f382f0ccd4b38db3904e Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Mon, 4 May 2015 23:36:19 +0200 Subject: [PATCH 045/292] Ensure non-overlapping cidrs in subnetpools without galera _get_allocated_cidrs[1] locks only allocated subnets in a subnetpool (with mysql/postgresql at least). It ensures we don't allocate a cidr overlapping with existent cidrs but nothing disallows a concurrent subnet allocation to create a subnet in the same subnetpool. This change replaces the lock on subnetpool subnets by a lock on the subnetpool itself. It disallows to allocate concurrently 2 subnets in the same subnetpool and ensure non-overlapping cidrs in the same subnetpool. Moreover this change solves a trouble with postgresql which disallows to lock an empty select with an outer join: it happens on first subnet allocation in a subnetpool when no specific cidr is provided. Moving the lock ensures the lock is done on a non-empty select. But this change does not ensure non-overlapping cidrs in subnetpools with galera because galera doesn't support SELECT FOR UPDATE locks. A follow-up change will (try to?) remove locks from subnet allocation[1] in order to ensure non-overlapping cidrs in subnetpools also with galera. [1] in neutron.ipam.subnet_alloc.SubnetAllocator Closes-Bug: #1451558 Partial-Bug: #1451576 Change-Id: I73854f9863f44621ae0d89c5dc4893ccc16d07e4 --- neutron/ipam/subnet_alloc.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/neutron/ipam/subnet_alloc.py b/neutron/ipam/subnet_alloc.py index 49b6eda2ab5..fdcabcca861 100644 --- a/neutron/ipam/subnet_alloc.py +++ b/neutron/ipam/subnet_alloc.py @@ -38,9 +38,20 @@ class SubnetAllocator(driver.Pool): super(SubnetAllocator, self).__init__(subnetpool, context) self._sp_helper = SubnetPoolHelper() + def _lock_subnetpool(self): + """Lock subnetpool associated row. + + This method disallows to allocate concurrently 2 subnets in the same + subnetpool, it's required to ensure non-overlapping cidrs in the same + subnetpool. + """ + # FIXME(cbrandily): not working with Galera + (self._context.session.query(models_v2.SubnetPool.id). + filter_by(id=self._subnetpool['id']). + with_lockmode('update').first()) + def _get_allocated_cidrs(self): - query = self._context.session.query( - models_v2.Subnet).with_lockmode('update') + query = self._context.session.query(models_v2.Subnet) subnets = query.filter_by(subnetpool_id=self._subnetpool['id']) return (x.cidr for x in subnets) @@ -62,8 +73,7 @@ class SubnetAllocator(driver.Pool): subnetpool_id = self._subnetpool['id'] tenant_id = self._subnetpool['tenant_id'] with self._context.session.begin(subtransactions=True): - qry = self._context.session.query( - models_v2.Subnet).with_lockmode('update') + qry = self._context.session.query(models_v2.Subnet) allocations = qry.filter_by(subnetpool_id=subnetpool_id, tenant_id=tenant_id) value = 0 @@ -88,6 +98,7 @@ class SubnetAllocator(driver.Pool): def _allocate_any_subnet(self, request): with self._context.session.begin(subtransactions=True): + self._lock_subnetpool() self._check_subnetpool_tenant_quota(request.tenant_id, request.prefixlen) prefix_pool = self._get_available_prefix_list() @@ -111,6 +122,7 @@ class SubnetAllocator(driver.Pool): def _allocate_specific_subnet(self, request): with self._context.session.begin(subtransactions=True): + self._lock_subnetpool() self._check_subnetpool_tenant_quota(request.tenant_id, request.prefixlen) cidr = request.subnet_cidr From c79d68a7ed347539026acf4c2347740adc617580 Mon Sep 17 00:00:00 2001 From: Sergey Kolekonov Date: Mon, 25 May 2015 18:11:35 +0300 Subject: [PATCH 046/292] Fix a typo in _schedule_network method Fix the small typo which prevented from writing correct DHCP agent id to logs Change-Id: Id8a872815f7cb2ba68ff1c674f17777da7858562 --- neutron/db/agentschedulers_db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index 6f40ee37511..bac33a78f6b 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -213,7 +213,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler return for agent in agents: LOG.info(_LI("Adding network %(net)s to agent " - "%(agent)%s on host %(host)s"), + "%(agent)s on host %(host)s"), {'net': network_id, 'agent': agent.id, 'host': agent.host}) From 9fc7f56565925a53f2212706431af479752bb8d9 Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Mon, 25 May 2015 18:00:58 -0400 Subject: [PATCH 047/292] Skip external tables for neutron-db-manage --autogenerate DB tables that do not have models in the neutron tree cause neutron-db-manage --autogenerate to create commands to drop the tables. This fix hooks into alembic's environment with a include_object callback that ignores external tables. We already had a list of external tables for use by the migration tests, so re-use them for --autogenerate. Partial-bug: #1458682 Change-Id: I2c0bc73f72840c401c578e87d8178a79f05aad82 --- .../db/migration/alembic_migrations/env.py | 12 ++++++++- .../migration/alembic_migrations/external.py | 27 +++++++++++++++++++ .../tests/functional/db/test_migrations.py | 23 ++-------------- 3 files changed, 40 insertions(+), 22 deletions(-) create mode 100644 neutron/db/migration/alembic_migrations/external.py diff --git a/neutron/db/migration/alembic_migrations/env.py b/neutron/db/migration/alembic_migrations/env.py index 9966f55e797..f90bf43c307 100644 --- a/neutron/db/migration/alembic_migrations/env.py +++ b/neutron/db/migration/alembic_migrations/env.py @@ -20,6 +20,7 @@ from oslo_db.sqlalchemy import session import sqlalchemy as sa from sqlalchemy import event +from neutron.db.migration.alembic_migrations import external from neutron.db.migration.models import head # noqa from neutron.db import model_base @@ -50,6 +51,13 @@ def set_mysql_engine(): model_base.BASEV2.__table_args__['mysql_engine']) +def include_object(object, name, type_, reflected, compare_to): + if type_ == 'table' and name in external.TABLES: + return False + else: + return True + + def run_migrations_offline(): """Run migrations in 'offline' mode. @@ -67,6 +75,7 @@ def run_migrations_offline(): kwargs['url'] = neutron_config.database.connection else: kwargs['dialect_name'] = neutron_config.database.engine + kwargs['include_object'] = include_object context.configure(**kwargs) with context.begin_transaction(): @@ -92,7 +101,8 @@ def run_migrations_online(): connection = engine.connect() context.configure( connection=connection, - target_metadata=target_metadata + target_metadata=target_metadata, + include_object=include_object ) try: diff --git a/neutron/db/migration/alembic_migrations/external.py b/neutron/db/migration/alembic_migrations/external.py new file mode 100644 index 00000000000..412992db5cc --- /dev/null +++ b/neutron/db/migration/alembic_migrations/external.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# These tables are in the neutron database, but their models have moved +# to separate repositories. We skip the migration checks for these tables. + +VPNAAS_TABLES = ['vpnservices', 'ipsecpolicies', 'ipsecpeercidrs', + 'ipsec_site_connections', 'cisco_csr_identifier_map', + 'ikepolicies'] + +LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools', 'healthmonitors', + 'poolstatisticss', 'members', 'poolloadbalanceragentbindings', + 'embrane_pool_port', 'poolmonitorassociations'] + +FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies'] + +TABLES = (FWAAS_TABLES + LBAAS_TABLES + VPNAAS_TABLES) diff --git a/neutron/tests/functional/db/test_migrations.py b/neutron/tests/functional/db/test_migrations.py index 98e178a158b..a05bbb215e8 100644 --- a/neutron/tests/functional/db/test_migrations.py +++ b/neutron/tests/functional/db/test_migrations.py @@ -27,6 +27,7 @@ from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import test_migrations import sqlalchemy +from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.db.migration.models import head as head_models @@ -37,26 +38,6 @@ cfg.CONF.import_opt('core_plugin', 'neutron.common.config') CORE_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' -# These tables are still in the neutron database, but their models have moved -# to the separate advanced services repositories. We skip the migration checks -# for these tables for now. The checks will be re-instated soon in the tests -# for each separate repository. -# TODO(akamyshnikova): delete these lists when the tables are removed from -# neutron database. -EXTERNAL_VPNAAS_TABLES = ['vpnservices', 'ipsecpolicies', 'ipsecpeercidrs', - 'ipsec_site_connections', 'cisco_csr_identifier_map', - 'ikepolicies'] - -EXTERNAL_LBAAS_TABLES = ['vips', 'sessionpersistences', 'pools', - 'healthmonitors', 'poolstatisticss', 'members', - 'poolloadbalanceragentbindings', 'embrane_pool_port', - 'poolmonitorassociations'] - -EXTERNAL_FWAAS_TABLES = ['firewall_rules', 'firewalls', 'firewall_policies'] - -EXTERNAL_TABLES = (EXTERNAL_FWAAS_TABLES + EXTERNAL_LBAAS_TABLES + - EXTERNAL_VPNAAS_TABLES) - class _TestModelsMigrations(test_migrations.ModelsMigrationsSync): '''Test for checking of equality models state and migrations. @@ -150,7 +131,7 @@ class _TestModelsMigrations(test_migrations.ModelsMigrationsSync): def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table' and (name == 'alembic_version' - or name in EXTERNAL_TABLES): + or name in external.TABLES): return False return super(_TestModelsMigrations, self).include_object( From 54e91b01cf7b311f48a40c14c7f9c1d8c0926ab4 Mon Sep 17 00:00:00 2001 From: Angus Lees Date: Fri, 15 May 2015 17:50:33 +1000 Subject: [PATCH 048/292] test_fork_error: Fix incorrect test mock The previous os.fork mock returned a *function* that returned an OSError object. Oops. Change-Id: I6e5dff930cf5614f7061efdfd8429963703cc214 --- neutron/tests/unit/agent/linux/test_daemon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neutron/tests/unit/agent/linux/test_daemon.py b/neutron/tests/unit/agent/linux/test_daemon.py index b4bb355e1a9..4c01b6dbf88 100644 --- a/neutron/tests/unit/agent/linux/test_daemon.py +++ b/neutron/tests/unit/agent/linux/test_daemon.py @@ -235,7 +235,7 @@ class TestDaemon(base.BaseTestCase): self.assertIsNone(d._fork()) def test_fork_error(self): - self.os.fork.side_effect = lambda: OSError(1) + self.os.fork.side_effect = OSError(1) with mock.patch.object(daemon.sys, 'stderr'): with testtools.ExpectedException(SystemExit): d = daemon.Daemon('pidfile', 'stdin') From f0decf6a4061be18999c87eab6ae152d9f75f99f Mon Sep 17 00:00:00 2001 From: Angus Lees Date: Fri, 15 May 2015 17:54:18 +1000 Subject: [PATCH 049/292] Use os._exit after forking As the docs point out(*), _exit should be used after a fork() to avoid both processes flushing filehandles, calling destructors with side effects, etc. This change does just that. (*) https://docs.python.org/2/library/os.html#os._exit Change-Id: I68da6283c44ab8857baf217ac1443bd17988257d --- neutron/agent/linux/daemon.py | 2 +- neutron/tests/unit/agent/linux/test_daemon.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/neutron/agent/linux/daemon.py b/neutron/agent/linux/daemon.py index f180f543228..b4c7853b54a 100644 --- a/neutron/agent/linux/daemon.py +++ b/neutron/agent/linux/daemon.py @@ -173,7 +173,7 @@ class Daemon(object): try: pid = os.fork() if pid > 0: - sys.exit(0) + os._exit(0) except OSError: LOG.exception(_LE('Fork failed')) sys.exit(1) diff --git a/neutron/tests/unit/agent/linux/test_daemon.py b/neutron/tests/unit/agent/linux/test_daemon.py index 4c01b6dbf88..1ff0cc12268 100644 --- a/neutron/tests/unit/agent/linux/test_daemon.py +++ b/neutron/tests/unit/agent/linux/test_daemon.py @@ -225,9 +225,9 @@ class TestDaemon(base.BaseTestCase): def test_fork_parent(self): self.os.fork.return_value = 1 - with testtools.ExpectedException(SystemExit): - d = daemon.Daemon('pidfile') - d._fork() + d = daemon.Daemon('pidfile') + d._fork() + self.os._exit.assert_called_once_with(mock.ANY) def test_fork_child(self): self.os.fork.return_value = 0 From 8ee3020c47cd7eb716216852a90c3db595e2c3c6 Mon Sep 17 00:00:00 2001 From: ankitagrawal Date: Thu, 14 May 2015 05:19:44 -0700 Subject: [PATCH 050/292] Remove use of contextlib.nested (api-tests) Removed use of contextlib.nested call from codebase, as it has been deprecated since Python 2.7. There are also known issues with contextlib.nested that were addressed by the native support for multiple "with" variables. For instance, if the first object is created but the second one throws an exception, the first object's __exit__ is never called. For more information see https://docs.python.org/2/library/contextlib.html#contextlib.nested contextlib.nested is also not compatible with Python 3. Multi-patch set for easier chunks. This one addresses the tests from neutron/tests/unit/api directory. Line continuation markers (e.g. '\') had to be used or syntax errors were thrown. While using parentheses is the preferred way for multiple line statements, but in case of long with statements backslashes are acceptable. Partial-Bug: 1428424 Change-Id: I09673f9d4c7f07d3043804676fef018905dd1239 --- neutron/hacking/checks.py | 1 - .../unit/api/rpc/handlers/test_securitygroups_rpc.py | 9 ++------- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/neutron/hacking/checks.py b/neutron/hacking/checks.py index e013b7deb36..c2eabbd7771 100644 --- a/neutron/hacking/checks.py +++ b/neutron/hacking/checks.py @@ -146,7 +146,6 @@ def check_no_contextlib_nested(logical_line, filename): # these issues. It should be removed completely # when bug 1428424 is closed. ignore_dirs = [ - "neutron/tests/unit/api", "neutron/tests/unit/db", "neutron/tests/unit/extensions", "neutron/tests/unit/plugins", diff --git a/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py index 7c8b79f67d1..6728062091e 100644 --- a/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py +++ b/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import mock from neutron.api.rpc.handlers import securitygroups_rpc @@ -24,12 +23,8 @@ class SecurityGroupServerRpcApiTestCase(base.BaseTestCase): def test_security_group_rules_for_devices(self): rpcapi = securitygroups_rpc.SecurityGroupServerRpcApi('fake_topic') - with contextlib.nested( - mock.patch.object(rpcapi.client, 'call'), - mock.patch.object(rpcapi.client, 'prepare'), - ) as ( - rpc_mock, prepare_mock - ): + with mock.patch.object(rpcapi.client, 'call') as rpc_mock,\ + mock.patch.object(rpcapi.client, 'prepare') as prepare_mock: prepare_mock.return_value = rpcapi.client rpcapi.security_group_rules_for_devices('context', ['fake_device']) From a1d679678daa560c86bb84303aee6163296ec653 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Tue, 26 May 2015 13:14:11 +0000 Subject: [PATCH 051/292] Force order of dhcp.needs_resync_reasons dictionary elements This fixes the test_periodoc_resync_helper unit test that breaks with a randomized PYTHONHASHSEED (see the bug report). The test assumed that the dhcp.needs_resync_reasons dictionary from neutron.agent.dhcp.agent had elements in a particular order. Found with PYTHONHASHSEED=2. The fix refactors the test case to force a sorted dhcp.needs_resync_reasons dictionary. Partial-bug: #1348818 Note: There are several other unrelated unit tests that also break with a randomized PYTHONHASHSEED, but they are not addressed here. They will be addressed in separate patches. Change-Id: Ia7fc2c3e605d92d8497d44e28054bdda613cebf2 --- neutron/tests/unit/agent/dhcp/test_agent.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index afc7cf7d258..ebbcca29780 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import collections import copy import sys import uuid @@ -404,7 +405,8 @@ class TestDhcpAgent(base.BaseTestCase): def test_periodoc_resync_helper(self): with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) - dhcp.needs_resync_reasons = {'a': 'reason1', 'b': 'reason2'} + dhcp.needs_resync_reasons = collections.OrderedDict( + (('a', 'reason1'), ('b', 'reason2'))) with mock.patch.object(dhcp, 'sync_state') as sync_state: sync_state.side_effect = RuntimeError with testtools.ExpectedException(RuntimeError): From eac9fb143cf3e6f4dd2b3413f81dab92d42354ca Mon Sep 17 00:00:00 2001 From: Jack McCann Date: Wed, 15 Apr 2015 22:32:51 +0000 Subject: [PATCH 052/292] Remove hack for sending gratuitous arp from fip ns I just saw this note toward bottom of the lartc [1] page: "On Linux 2.4, you may need to execute 'echo 1 > /proc/sys/net/ipv4/ip_nonlocal_bind' before being able to send out unsolicited ARP messages!" I wonder if we set that in fip ns, if it will let us send grat arp without adding/removing the IP. It couldn't be that easy, could it? [1] http://lartc.org/howto/lartc.bridging.proxy-arp.html Change-Id: Ie55b402a6c46af00c8d4875264489fc4318544b3 --- neutron/agent/l3/dvr_fip_ns.py | 4 +++ neutron/agent/l3/dvr_router.py | 8 ++--- neutron/agent/linux/ip_lib.py | 24 -------------- neutron/tests/unit/agent/l3/test_agent.py | 4 --- .../tests/unit/agent/l3/test_dvr_router.py | 2 +- neutron/tests/unit/agent/linux/test_ip_lib.py | 32 ------------------- 6 files changed, 9 insertions(+), 65 deletions(-) diff --git a/neutron/agent/l3/dvr_fip_ns.py b/neutron/agent/l3/dvr_fip_ns.py index 050cb6331ae..e2e63eb2700 100644 --- a/neutron/agent/l3/dvr_fip_ns.py +++ b/neutron/agent/l3/dvr_fip_ns.py @@ -126,6 +126,10 @@ class FipNamespace(namespaces.Namespace): def create(self): # TODO(Carl) Get this functionality from mlavelle's namespace baseclass ip_wrapper_root = ip_lib.IPWrapper() + ip_wrapper_root.netns.execute(['sysctl', + '-w', + 'net.ipv4.ip_nonlocal_bind=1'], + run_as_root=True) ip_wrapper = ip_wrapper_root.ensure_namespace(self.get_name()) ip_wrapper.netns.execute(['sysctl', '-w', 'net.ipv4.ip_forward=1']) if self.use_ipv6: diff --git a/neutron/agent/l3/dvr_router.py b/neutron/agent/l3/dvr_router.py index dfd9cf2c543..8c1313acc9f 100755 --- a/neutron/agent/l3/dvr_router.py +++ b/neutron/agent/l3/dvr_router.py @@ -100,10 +100,10 @@ class DvrRouter(router.RouterInfo): interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) - ip_lib.send_garp_for_proxyarp(fip_ns_name, - interface_name, - floating_ip, - self.agent_conf.send_arp_for_ha) + ip_lib.send_gratuitous_arp(fip_ns_name, + interface_name, + floating_ip, + self.agent_conf.send_arp_for_ha) # update internal structures self.dist_fip_count = self.dist_fip_count + 1 diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index a22b46b4f5a..407ccf7165a 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -732,30 +732,6 @@ def send_gratuitous_arp(ns_name, iface_name, address, count): eventlet.spawn_n(arping) -def send_garp_for_proxyarp(ns_name, iface_name, address, count): - """ - Send a gratuitous arp using given namespace, interface, and address - - This version should be used when proxy arp is in use since the interface - won't actually have the address configured. We actually need to configure - the address on the interface and then remove it when the proxy arp has been - sent. - """ - def arping_with_temporary_address(): - # Configure the address on the interface - device = IPDevice(iface_name, namespace=ns_name) - net = netaddr.IPNetwork(str(address)) - device.addr.add(str(net)) - - _arping(ns_name, iface_name, address, count) - - # Delete the address from the interface - device.addr.delete(str(net)) - - if count > 0: - eventlet.spawn_n(arping_with_temporary_address) - - def add_namespace_to_cmd(cmd, namespace=None): """Add an optional namespace to the command.""" diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index e628cd465c7..47dbd109e32 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -324,10 +324,6 @@ class BasicRouterOperationsFramework(base.BaseTestCase): 'neutron.agent.linux.ip_lib.send_gratuitous_arp') self.send_arp = self.send_arp_p.start() - self.send_arp_proxyarp_p = mock.patch( - 'neutron.agent.linux.ip_lib.send_garp_for_proxyarp') - self.send_arp_proxyarp = self.send_arp_proxyarp_p.start() - self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = self.dvr_cls_p.start() self.mock_driver = mock.MagicMock() diff --git a/neutron/tests/unit/agent/l3/test_dvr_router.py b/neutron/tests/unit/agent/l3/test_dvr_router.py index ecd092e47db..fbbf08c43a6 100644 --- a/neutron/tests/unit/agent/l3/test_dvr_router.py +++ b/neutron/tests/unit/agent/l3/test_dvr_router.py @@ -56,7 +56,7 @@ class TestDvrRouterOperations(base.BaseTestCase): self.assertEqual([{'host': mock.sentinel.myhost}], fips) - @mock.patch.object(ip_lib, 'send_garp_for_proxyarp') + @mock.patch.object(ip_lib, 'send_gratuitous_arp') @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'IPRule') def test_floating_ip_added_dist(self, mIPRule, mIPDevice, mock_arp): diff --git a/neutron/tests/unit/agent/linux/test_ip_lib.py b/neutron/tests/unit/agent/linux/test_ip_lib.py index 7b34d353742..01ddf39997b 100644 --- a/neutron/tests/unit/agent/linux/test_ip_lib.py +++ b/neutron/tests/unit/agent/linux/test_ip_lib.py @@ -1041,38 +1041,6 @@ class TestArpPing(TestIPCmdBase): self._test_arping( ip_lib.send_gratuitous_arp, '20.0.0.1', spawn_n, mIPWrapper) - @mock.patch.object(ip_lib, 'IPDevice') - @mock.patch.object(ip_lib, 'IPWrapper') - @mock.patch('eventlet.spawn_n') - def test_send_garp_for_proxy_arp(self, spawn_n, mIPWrapper, mIPDevice): - addr = '20.0.0.1' - ip_wrapper = mIPWrapper(namespace=mock.sentinel.ns_name) - mIPWrapper.reset_mock() - device = mIPDevice(mock.sentinel.iface_name, - namespace=mock.sentinel.ns_name) - mIPDevice.reset_mock() - - # Check that the address was added to the interface before arping - def check_added_address(*args, **kwargs): - mIPDevice.assert_called_once_with(mock.sentinel.iface_name, - namespace=mock.sentinel.ns_name) - device.addr.add.assert_called_once_with(addr + '/32') - self.assertFalse(device.addr.delete.called) - device.addr.reset_mock() - - ip_wrapper.netns.execute.side_effect = check_added_address - - self._test_arping( - ip_lib.send_garp_for_proxyarp, addr, spawn_n, mIPWrapper) - - # Test that the address was removed after arping - device = mIPDevice(mock.sentinel.iface_name, - namespace=mock.sentinel.ns_name) - device.addr.delete.assert_called_once_with(addr + '/32') - - # If this was called then check_added_address probably had a assert - self.assertFalse(device.addr.add.called) - class TestAddNamespaceToCmd(base.BaseTestCase): def test_add_namespace_to_cmd_with_namespace(self): From 7ea278087c32235efa793c8c815aff3861410f13 Mon Sep 17 00:00:00 2001 From: Gal Sagie Date: Tue, 26 May 2015 19:16:34 +0300 Subject: [PATCH 053/292] OVS_LIB support API for setting fail mode 'standalone' The current API only support setting a bridge fail mode to secure, this patch allow the user to set it to 'standalone' as well Change-Id: If7e6532dc7f8527c35834a37144ea4386fe1b861 Closes-Bug: #1458924 --- neutron/agent/common/ovs_lib.py | 5 +++++ neutron/tests/functional/agent/test_ovs_lib.py | 6 +++++- neutron/tests/unit/agent/common/test_ovs_lib.py | 4 ++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index 1b122ac099f..933ab67796c 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -40,6 +40,7 @@ UNASSIGNED_OFPORT = [] # OVS bridge fail modes FAILMODE_SECURE = 'secure' +FAILMODE_STANDALONE = 'standalone' OPTS = [ cfg.IntOpt('ovs_vsctl_timeout', @@ -160,6 +161,10 @@ class OVSBridge(BaseOVS): self.ovsdb.set_fail_mode(self.br_name, FAILMODE_SECURE).execute( check_error=True) + def set_standalone_mode(self): + self.ovsdb.set_fail_mode(self.br_name, FAILMODE_STANDALONE).execute( + check_error=True) + def set_protocols(self, protocols): self.set_db_attribute('Bridge', self.br_name, 'protocols', protocols, check_error=True) diff --git a/neutron/tests/functional/agent/test_ovs_lib.py b/neutron/tests/functional/agent/test_ovs_lib.py index 69589a56486..f430481899b 100644 --- a/neutron/tests/functional/agent/test_ovs_lib.py +++ b/neutron/tests/functional/agent/test_ovs_lib.py @@ -118,10 +118,14 @@ class OVSBridgeTestCase(OVSBridgeTestBase): self.br.db_get_val('Controller', self.br.br_name, 'connection_mode')) - def test_set_fail_mode(self): + def test_set_fail_mode_secure(self): self.br.set_secure_mode() self._assert_br_fail_mode(ovs_lib.FAILMODE_SECURE) + def test_set_fail_mode_standalone(self): + self.br.set_standalone_mode() + self._assert_br_fail_mode(ovs_lib.FAILMODE_STANDALONE) + def _assert_br_fail_mode(self, fail_mode): self.assertEqual( self.br.db_get_val('Bridge', self.br.br_name, 'fail_mode'), diff --git a/neutron/tests/unit/agent/common/test_ovs_lib.py b/neutron/tests/unit/agent/common/test_ovs_lib.py index 28633e8e478..d1bb0c88210 100644 --- a/neutron/tests/unit/agent/common/test_ovs_lib.py +++ b/neutron/tests/unit/agent/common/test_ovs_lib.py @@ -130,6 +130,10 @@ class OVS_Lib_Test(base.BaseTestCase): self.br.set_secure_mode() self._verify_vsctl_mock('set-fail-mode', self.BR_NAME, 'secure') + def test_set_standalone_mode(self): + self.br.set_standalone_mode() + self._verify_vsctl_mock('set-fail-mode', self.BR_NAME, 'standalone') + def test_set_protocols(self): protocols = 'OpenFlow13' self.br.set_protocols(protocols) From 874fa6e02c2f91d711e8186d4e7e7e26c6dc74b9 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 27 May 2015 05:10:33 +0900 Subject: [PATCH 054/292] OVS-agent: Remove optional flags from br_tun.deferred() method Currently they have no users. They are not appropriate for this higher-level interface anyway. Change-Id: I76b8d718e714728df88685ec41262958d2e78cb9 --- .../plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py index 4407e2fe01b..58301dfefe5 100644 --- a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py @@ -225,8 +225,8 @@ class OVSTunnelBridge(ovs_bridge.OVSAgentBridge, self.delete_flows(table_id=constants.DVR_NOT_LEARN, eth_src=mac) - def deferred(self, **kwargs): - return DeferredOVSTunnelBridge(self, **kwargs) + def deferred(self): + return DeferredOVSTunnelBridge(self) class DeferredOVSTunnelBridge(ovs_lib.DeferredOVSBridge): From 539738a8eeebcc9f22987fcb0a241c6b8c3d561a Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Wed, 27 May 2015 00:23:09 +0200 Subject: [PATCH 055/292] Do not assume order of get_sync_data_metering response elements This fixes the test_add_metering_label_rpc_call[1] unit tests that breaks with a randomized PYTHONHASHSEED (see the bug report). The test assumed that the get_sync_data_metering[2] had response elements in a particular order. Found with PYTHONHASHSEED=1. The fix refactors the test_add_metering_label_rpc_call test case to handle an unsorted get_sync_data_metering response. The fix defines the class UnorderedList[3] which is equal to any permutation of itself. Partial-bug: #1348818 Note: There are several other unrelated unit tests that also break with a randomized PYTHONHASHSEED, but they are not addressed here. They will be addressed in separate patches. [1] neutron.tests.unit.services.metering.test_metering_plugin: TestMeteringPluginL3AgentScheduler TestMeteringPluginL3AgentSchedulerServicePlugin [2] neutron.services.metering.metering_plugin.MeteringPlugin [3] neutron.tests.tools Change-Id: I5d42b827bc72dcacd38eaa2377ce16c47a9e7dbb --- neutron/tests/tools.py | 12 ++++++++++++ .../unit/services/metering/test_metering_plugin.py | 4 +++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/neutron/tests/tools.py b/neutron/tests/tools.py index 449dd574ae5..c9b80b70ee9 100644 --- a/neutron/tests/tools.py +++ b/neutron/tests/tools.py @@ -89,3 +89,15 @@ def fail(msg=None): testcase instance (usefully for reducing coupling). """ raise unittest.TestCase.failureException(msg) + + +class UnorderedList(list): + """A list that is equals to any permutation of itself.""" + + def __eq__(self, other): + if not isinstance(other, list): + return False + return sorted(self) == sorted(other) + + def __neq__(self, other): + return not self == other diff --git a/neutron/tests/unit/services/metering/test_metering_plugin.py b/neutron/tests/unit/services/metering/test_metering_plugin.py index 408ea1921b5..87de8029768 100644 --- a/neutron/tests/unit/services/metering/test_metering_plugin.py +++ b/neutron/tests/unit/services/metering/test_metering_plugin.py @@ -25,6 +25,7 @@ from neutron import manager from neutron.openstack.common import uuidutils from neutron.plugins.common import constants from neutron.tests.common import helpers +from neutron.tests import tools from neutron.tests.unit.db.metering import test_metering_db from neutron.tests.unit.db import test_db_base_plugin_v2 from neutron.tests.unit.extensions import test_l3 @@ -371,7 +372,8 @@ class TestMeteringPluginL3AgentScheduler( set_context=True): with self.metering_label(tenant_id=self.tenant_id, set_context=True): - self.mock_add.assert_called_with(self.ctx, expected) + self.mock_add.assert_called_with( + self.ctx, tools.UnorderedList(expected)) class TestMeteringPluginL3AgentSchedulerServicePlugin( From e9d17b8bfff31072a3d017ce64e36ac99007a052 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Tue, 26 May 2015 19:54:22 +0200 Subject: [PATCH 056/292] Do not assume order of iptables_firewall method responses This fixes the iptables_firewall group unit tests[1] that breaks with a randomized PYTHONHASHSEED (see the bug report). The test assumed that the _get_remote_sg_ids and _determine_remote_sgs_to_remove methods from neutron.agent.linux.iptables_firewall returned elements in a particular order. Found with PYTHONHASHSEED=1. The fix refactors the test case to handle unsorted responses from _get_remote_sg_ids and _determine_remote_sgs_to_remove. [1] neutron.tests.unit.agent.linux.test_iptables_firewall: test_prepare_port_filter_with_new_members test_prepare_port_filter_with_sg_no_member test_remove_port_filter_with_destroy_ipset_chain Partial-bug: #1348818 Note: There are several other unrelated unit tests that also break with a randomized PYTHONHASHSEED, but they are not addressed here. They will be addressed in separate patches. Change-Id: I19e51452a2bde0721559df746047239f68614336 --- neutron/tests/unit/agent/linux/test_iptables_firewall.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neutron/tests/unit/agent/linux/test_iptables_firewall.py b/neutron/tests/unit/agent/linux/test_iptables_firewall.py index 77d98e8b185..077cc5092d8 100644 --- a/neutron/tests/unit/agent/linux/test_iptables_firewall.py +++ b/neutron/tests/unit/agent/linux/test_iptables_firewall.py @@ -1465,7 +1465,7 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1']) ] - self.firewall.ipset.assert_has_calls(calls) + self.firewall.ipset.assert_has_calls(calls, any_order=True) def _setup_fake_firewall_members_and_rules(self, firewall): firewall.sg_rules = self._fake_sg_rules() @@ -1615,7 +1615,7 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): mock.call.destroy('fake_sgid', 'IPv4'), mock.call.destroy('fake_sgid', 'IPv6')] - self.firewall.ipset.assert_has_calls(calls) + self.firewall.ipset.assert_has_calls(calls, any_order=True) def test_prepare_port_filter_with_sg_no_member(self): self.firewall.sg_rules = self._fake_sg_rules() @@ -1633,7 +1633,7 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): ['10.0.0.1', '10.0.0.2']), mock.call.set_members('fake_sgid', 'IPv6', ['fe80::1'])] - self.firewall.ipset.assert_has_calls(calls) + self.firewall.ipset.assert_has_calls(calls, any_order=True) def test_filter_defer_apply_off_with_sg_only_ipv6_rule(self): self.firewall.sg_rules = self._fake_sg_rules() From ce957ac83a85a421f2e60cc39a3103291ba95e4a Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Wed, 27 May 2015 14:54:35 +0200 Subject: [PATCH 057/292] Do not assume order of convert_kvp_list_to_dict method responses This fixes the test_attributes unit tests[1] that breaks with a randomized PYTHONHASHSEED (see the bug report). The test assumed that the convert_kvp_list_to_dict method from neutron.api.v2.attributes returned elements in a particular order. Found with PYTHONHASHSEED=1. The fix refactors the test case to handle unsorted responses from convert_kvp_list_to_dict. [1] neutron.tests.unit.api.v2.test_attributes.TestConvertKvp Partial-bug: #1348818 Note: There are several other unrelated unit tests that also break with a randomized PYTHONHASHSEED, but they are not addressed here. They will be addressed in separate patches. Change-Id: I864904db1428b88c482ad17a69b33e876a8d042c --- neutron/tests/unit/api/v2/test_attributes.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/neutron/tests/unit/api/v2/test_attributes.py b/neutron/tests/unit/api/v2/test_attributes.py index 7a79d900a26..28b8d6621b2 100644 --- a/neutron/tests/unit/api/v2/test_attributes.py +++ b/neutron/tests/unit/api/v2/test_attributes.py @@ -21,6 +21,7 @@ import mock from neutron.api.v2 import attributes from neutron.common import exceptions as n_exc from neutron.tests import base +from neutron.tests import tools class TestAttributes(base.BaseTestCase): @@ -807,7 +808,8 @@ class TestConvertKvp(base.BaseTestCase): def test_convert_kvp_list_to_dict_succeeds_for_multiple_values(self): result = attributes.convert_kvp_list_to_dict( ['a=b', 'a=c', 'a=c', 'b=a']) - self.assertEqual({'a': ['c', 'b'], 'b': ['a']}, result) + expected = {'a': tools.UnorderedList(['c', 'b']), 'b': ['a']} + self.assertEqual(expected, result) def test_convert_kvp_list_to_dict_succeeds_for_values(self): result = attributes.convert_kvp_list_to_dict(['a=b', 'c=d']) From 21f0fac3efd6aaa83494341aead1b6eaddb4d8fb Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Wed, 27 May 2015 15:08:30 +0200 Subject: [PATCH 058/292] Use the correct name for the "Repository Creator's Guide" Change-Id: Iaa32ab4bfa658a079da08792a7355ad64471f0eb --- doc/source/devref/contribute.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/devref/contribute.rst b/doc/source/devref/contribute.rst index d316e6ba8a7..c966af3aee0 100644 --- a/doc/source/devref/contribute.rst +++ b/doc/source/devref/contribute.rst @@ -347,7 +347,7 @@ be the bare minimum you have to complete in order to get you off the ground. the previous step. In the latter case, you can do so by specifying the upstream section for your project in project-config/gerrit/project.yaml. Steps are documented on the - `Project Creators Manual `_. + `Repository Creator's Guide `_. * Ask for a Launchpad user to be assigned to the core team created. Steps are documented in `this section `_. From 1e0d201f1d71ce48a096f69700e4097e460ee53a Mon Sep 17 00:00:00 2001 From: Carl Baldwin Date: Tue, 26 May 2015 20:33:56 +0000 Subject: [PATCH 059/292] Isolate use of fixed_ips[0] to avoid confusion I was looking at the assumption around using fixed_ips[0] in a recent patch [1]. I thought at the least, the usage of fixed_ips[0] should be isolated with the comment explaining why it is okay. I thought that we could also use this patch as an opportunity to vet the validity of the claim made in the comment. [1] If4a310da06f9b0076a9f62926a16b574a8c109ce Change-Id: Iba5713dd7d852429997ff43d98266a9f022d5d86 --- .../openvswitch/agent/ovs_dvr_neutron_agent.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py index db7a41cd4dc..3c4f7886d71 100644 --- a/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py @@ -407,9 +407,10 @@ class OVSDVRNeutronAgent(object): def _bind_distributed_router_interface_port(self, port, lvm, fixed_ips, device_owner): - # since router port must have only one fixed IP, directly - # use fixed_ips[0] - subnet_uuid = fixed_ips[0]['subnet_id'] + # since distributed router port must have only one fixed + # IP, directly use fixed_ips[0] + fixed_ip = fixed_ips[0] + subnet_uuid = fixed_ip['subnet_id'] csnat_ofport = constants.OFPORT_INVALID ldm = None if subnet_uuid in self.local_dvr_map: @@ -595,24 +596,25 @@ class OVSDVRNeutronAgent(object): def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm, fixed_ips, device_owner): + # since centralized-SNAT (CSNAT) port must have only one fixed + # IP, directly use fixed_ips[0] + fixed_ip = fixed_ips[0] if port.vif_id in self.local_ports: # throw an error if CSNAT port is already on a different # dvr routed subnet ovsport = self.local_ports[port.vif_id] subs = list(ovsport.get_subnets()) - if subs[0] == fixed_ips[0]['subnet_id']: + if subs[0] == fixed_ip['subnet_id']: return LOG.error(_LE("Centralized-SNAT port %(port)s on subnet " "%(port_subnet)s already seen on a different " "subnet %(orig_subnet)s"), { "port": port.vif_id, - "port_subnet": fixed_ips[0]['subnet_id'], + "port_subnet": fixed_ip['subnet_id'], "orig_subnet": subs[0], }) return - # since centralized-SNAT (CSNAT) port must have only one fixed - # IP, directly use fixed_ips[0] - subnet_uuid = fixed_ips[0]['subnet_id'] + subnet_uuid = fixed_ip['subnet_id'] ldm = None subnet_info = None if subnet_uuid not in self.local_dvr_map: From 4beb7426587556e0592c055552e0fd70eeb7ac19 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Fri, 24 Apr 2015 16:24:39 +0000 Subject: [PATCH 060/292] Introduce the Lieutenant system into Neutron As Neutron has grown, the need to continually evolve the way we land code has changed. As the project has grown, it's become obvious we need to subdivide responsibilites and merge rights across the repository. The advanced services split [1] was one way in which we did this, and it's proven effective. Plugin decomposition was another [2]. As a next step to evolve things, this changes the core reviewer paradigm into a system of Lieutenants, all reporting to the PTL in the project. The Lieutenants will be able to maintain their own core review team. The idea of "super cores" will fade away. And we begin to see the benefits of a layered, structured approach to managing Neutron. This proposal makes an attempt to comment on existing core reviewers. While the idea of existing core reviewers being labeled as "supercores" has been floated, the reality is a bit more nuanced than that, and I've taken steps to address that with this patch. It should be noted that like all things in Neutron governance, the system is based on a mutual trust. [1] http://specs.openstack.org/openstack/neutron-specs/specs/kilo/services-split.html [2] http://specs.openstack.org/openstack/neutron-specs/specs/kilo/core-vendor-decomposition.html Change-Id: Ia4060066a300e851172c0fd7a2910ce53a6c649e --- doc/source/policies/core-reviewers.rst | 239 ++++++++++++++++++------- 1 file changed, 179 insertions(+), 60 deletions(-) diff --git a/doc/source/policies/core-reviewers.rst b/doc/source/policies/core-reviewers.rst index 58e67a76002..6ac4f25d8a0 100644 --- a/doc/source/policies/core-reviewers.rst +++ b/doc/source/policies/core-reviewers.rst @@ -1,29 +1,125 @@ Neutron Core Reviewers ====================== -The `Neutron Core Reviewer Team `_ is responsible -for many things related to Neutron. A lot of these things include mundane tasks such as the -following: +The `Neutron Core Reviewer Team +`_ is responsible +for many things related to Neutron. A lot of these things include mundane +tasks such as the following: * Ensuring the bug count is low * Curating the gate and triaging failures * Working on integrating shared code from projects such as Oslo * Ensuring documentation is up to date and remains relevant -* Ensuring the level of testing for Neutron is adequate and remains relevant as features are added -* Helping new contributors with questions as they peel back the covers of Neutron +* Ensuring the level of testing for Neutron is adequate and remains relevant + as features are added +* Helping new contributors with questions as they peel back the covers of + Neutron * Answering questions and participating in mailing list discussions -* Interfacing with other OpenStack teams and ensuring they are going in the same parallel direction +* Interfacing with other OpenStack teams and ensuring they are going in the + same parallel direction * Reviewing and merging code into the neutron tree In essence, core reviewers share the following common ideals: 1. They share responsibility in the project's success. -2. They have made a long-term, recurring time investment to improve the project. -3. They spend their time doing what needs to be done to ensure the projects success, not necessarily - what is the most interesting or fun. +2. They have made a long-term, recurring time investment to improve the + project. +3. They spend their time doing what needs to be done to ensure the projects + success, not necessarily what is the most interesting or fun. -Given the above, Neutron has the following core reviewer teams with responsibility over the areas of -code listed below: +A core reviewer's responsibility doesn't end up with merging code. The above +lists are adding context around these responsibilities. + +Core Review Hierarchy +--------------------- + +As Neutron has grown in complexity, it has become impossible for any one +person to know enough to merge changes across the entire codebase. Areas of +expertise have developed organically, and it is not uncommon for existing +cores to defer to these experts when changes are proposed. Existing cores +should be aware of the implications when they do merge changes outside the +scope of their knowledge. It is with this in mind we propose a new system +built around Lieutenants through a model of trust. + +In order to scale development and responsibility in Neutron, we have adopted +a Lieutenant system. The PTL is the leader of the Neutron project, and +ultimately responsible for decisions made in the project. The PTL has +designated Lieutenants in place to help run portions of the Neutron project. +The Lieutenants are in charge of their own areas, and they can propose core +reviewers for their areas as well. The core reviewer addition and removal +polices are in place below. The Lieutenants for each system, while responsible +for their area, ultimately report to the PTL. The PTL may opt to have regular +one on one meetings with the lieutenants. The PTL will resolve disputes in +the project that arise between areas of focus, core reviewers, and other +projects. Please note Lieutenants should be leading their own area of focus, +not doing all the work themselves. + +As was mentioned in the previous section, a core's responsibilities do not +end with merging code. They are responsible for bug triage and gate issues +among other things. Lieutenants have an increased responsibility to ensure +gate and bug triage for their area of focus is under control. + +The following are the current Neutron Lieutenants. + ++------------------------+---------------------------+----------------------+ +| Area | Lieutenant | IRC nic | ++========================+===========================+======================+ +| API and DB | Akihiro Motoki | amotoki | +| | Henry Gessau | HenryG | ++------------------------+---------------------------+----------------------+ +| Built-In Control Plane | Kevin Benton | kevinbenton | ++------------------------+---------------------------+----------------------+ +| Client | Akihiro Motoki | amotoki | ++------------------------+---------------------------+----------------------+ +| Docs | Edgar Magana | emagana | ++------------------------+---------------------------+----------------------+ +| L3 | Carl Baldwin | carl_baldwin | ++------------------------+---------------------------+----------------------+ +| Services | Doug Wiegley | dougwig | ++------------------------+---------------------------+----------------------+ +| Testing | Assaf Muller | amuller | ++------------------------+---------------------------+----------------------+ + +Some notes on the above: +* "Built-In Control Plane" means the L2 agents, DHCP agents, SGs, metadata + agents and the portion of ML2 which communicates with the agents. +* The client includes commands installed server side. +* L3 includes the L3 agent, DVR, and IPAM. +* Services includes FWaaS, LBaaS, and VPNaaS. +* Note these areas may change as the project evolves due to code refactoring, + new feature areas, and libification of certain pieces of code. + +Existing Core Reviewers +----------------------- + +Existing core reviewers have been reviewing code for a varying degree of +cycles. With the new plan of Lieutenants and ownership, it's fair to try to +understand how they fit into the new model. Existing core reviewers seem +to mostly focus in particular areas and are cognizant of their own strengths +and weaknesses. These members may not be experts in all areas, but know their +limits, and will not exceed those limits when reviewing changes outside their +area of expertise. The model is built on trust, and when that trust is broken, +responsibilities will be taken away. + +Lieutenant Responsibilities +--------------------------- + +In the hierarchy of Neutron responsibilities, Lieutenants are expected to +partake in the following additional activities compared to other core +reviewers: + +* Ensuring feature requests for their areas have adequate testing and + documentation coverage. +* Gate triage and resolution. Lieutenants are expected to work to keep the + Neutron gate running smoothly by triaging issues, filing elastic recheck + queries, and closing gate bugs. +* Triaging bugs for the specific areas. + +Neutron Core Reviewer Teams +=========================== + +Given all of the above, Neutron has the following core reviewer teams with +responsibility over the areas of code listed below: Neutron Core Reviewer Team -------------------------- @@ -31,27 +127,34 @@ Neutron core reviewers have merge rights to the following git repositories: * `openstack/neutron `_ * `openstack/python-neutronclient `_ +Please note that as we adopt to the system above with core specialty in +particular areas, we expect this broad core team to shrink as people naturally +evolve into an area of specialization. + Neutron FWaaS Core Reviewer Team -------------------------------- -Neutron FWaaS core reviewers have merge rights to the following git repositories: +Neutron FWaaS core reviewers have merge rights to the following git +repositories: * `openstack/neutron-fwaas `_ Neutron LBaaS Core Reviewer Team -------------------------------- -Neutron LBaaS core reviewers have merge rights to the following git repositories: +Neutron LBaaS core reviewers have merge rights to the following git +repositories: * `openstack/neutron-lbaas `_ Neutron VPNaaS Core Reviewer Team --------------------------------- -Neutron VPNaaS core reviewers have merge rights to the following git repositories: +Neutron VPNaaS core reviewers have merge rights to the following git +repositories: * `openstack/neutron-vpnaas `_ Neutron Core Reviewer Teams for Plugins and Drivers --------------------------------------------------- The plugin decomposition effort has led to having many drivers with code in -separate repositories with their own core reviewer teams. For each one of these -repositories in the following repository list, there is a core team associated -with it: +separate repositories with their own core reviewer teams. For each one of +these repositories in the following repository list, there is a core team +associated with it: * `Neutron project team `_ These teams are also responsible for handling their own specs/RFEs/features if @@ -61,50 +164,59 @@ arise. Neutron Specs Core Reviewer Team -------------------------------- -Neutron specs core reviewers have merge rights to the following git repositories: +Neutron specs core reviewers have merge rights to the following git +repositories: * `openstack/neutron-specs `_ -The Neutron specs core reviewer team is responsible for reviewing and merging specs into -the neutron-specs repository. For the Liberty release, the Specs core reviewer team will -review specs targeted to all neutron git repositories. +The Neutron specs core reviewer team is responsible for reviewing and merging +specs into the neutron-specs repository. For the Liberty release, the Specs +core reviewer team will review specs targeted to all neutron git repositories. -It's worth noting specs reviewers have the following attributes which are potentially -different than code reviewers: +It's worth noting specs reviewers have the following attributes which are +potentially different than code reviewers: * Broad understanding of cloud and networking technologies * Broad understanding of core OpenStack projects and technologies -* An understanding of the effect approved specs have on the teams development capacity - for each cycle +* An understanding of the effect approved specs have on the teams development + capacity for each cycle Code Merge Responsibilities =========================== -While everyone is encouraged to review changes for these repositories, members of the Neutron -core reviewer group have the ability to +2/-2 and +A changes to these repositories. This is an extra -level of responsibility not to be taken lightly. Correctly merging code requires not only -understanding the code itself, but also how the code affects things like documentation, testing, -and interactions with other projects. It also means you pay attention to release milestones and -understand if a patch you're merging is marked for the release, especially critical during the -feature freeze. +While everyone is encouraged to review changes for these repositories, members +of the Neutron core reviewer group have the ability to +2/-2 and +A changes to +these repositories. This is an extra level of responsibility not to be taken +lightly. Correctly merging code requires not only understanding the code +itself, but also how the code affects things like documentation, testing, and +interactions with other projects. It also means you pay attention to release +milestones and understand if a patch you're merging is marked for the release, +especially critical during the feature freeze. -The bottom line here is merging code is a responsibility Neutron core reviewers have. +The bottom line here is merging code is a responsibility Neutron core reviewers +have. Adding or Removing Core Reviewers --------------------------------- -A new Neutron core reviewer may be proposed at anytime on the openstack-dev mailing list. Typically, -the Neutron PTL will propose a new member after discussions with the existing core reviewers. Once -a proposal has been made, five existing Neutron core reviewers must respond to the email with a +1. -If the member is being added to a core reviewer team with less than five members, a simple majority -will be used to determine if the vote is successful. Another Neutron core reviewer can vote -1 to -veto the proposed new core reviewer. +A new Neutron core reviewer may be proposed at anytime on the openstack-dev +mailing list. Typically, the Lieutenant for a given area will propose a new +core reviewer for their specific area of coverage, though the Neutron PTL may +propose new core reviewers as well. The proposal is typically made after +discussions with existing core reviewers. Once a proposal has been made, +three existing Neutron core reviewers from the Lieutenant's area of focus must +respond to the email with a +1. If the member is being added by a Lieutenant +from an area of focus with less than three members, a simple majority will be +used to determine if the vote is successful. Another Neutron core reviewer +from the same area of focus can vote -1 to veto the proposed new core +reviewer. The PTL will mediate all disputes for core reviewer additions. -The PTL may remove a Neutron core reviewer at any time. Typically when a member has decreased their -involvement with the project through a drop in reviews and participation in general project development, -the PTL will propose their removal and remove them. Please note there is no voting or vetoing of -core reviewer removal. Members who have previously been a core reviewer may be fast-tracked back into -a core reviewer role if their involvement picks back up and the existing core reviewers support their -re-instatement. +The PTL may remove a Neutron core reviewer at any time. Typically when a +member has decreased their involvement with the project through a drop in +reviews and participation in general project development, the PTL will propose +their removal and remove them. Please note there is no voting or vetoing of +core reviewer removal. Members who have previously been a core reviewer may be +fast-tracked back into a core reviewer role if their involvement picks back up +and the existing core reviewers support their re-instatement. Neutron Core Reviewer Membership Expectations --------------------------------------------- @@ -112,27 +224,34 @@ Neutron Core Reviewer Membership Expectations Neutron core reviewers have the following expectations: * Reasonable attendance at the weekly Neutron IRC meetings. -* Participation in Neutron discussions on the mailing list, as well as in-channel in #openstack-neutron. -* Participation in Neutron related design summit sessions at the OpenStack Summits. +* Participation in Neutron discussions on the mailing list, as well as + in-channel in #openstack-neutron. +* Participation in Neutron related design summit sessions at the OpenStack + Summits. -Please note in-person attendance at design summits, mid-cycles, and other code sprints is not a requirement -to be a Neutron core reviewer. The Neutron team will do its best to facilitate virtual attendance at all events. -Travel is not to be taken lightly, and we realize the costs involved for those who partake in attending -these events. +Please note in-person attendance at design summits, mid-cycles, and other code +sprints is not a requirement to be a Neutron core reviewer. The Neutron team +will do its best to facilitate virtual attendance at all events. Travel is not +to be taken lightly, and we realize the costs involved for those who partake +in attending these events. -In addition to the above, code reviews are the most important requirement of Neutron core reviewers. -Neutron follows the documented OpenStack `code review guidelines `_. -We encourage all people to review Neutron patches, but core reviewers are required to maintain a level of -review numbers relatively close to other core reviewers. There are no hard statistics around code review -numbers, but in general we use 30, 60, 90 and 180 day stats when examining review stats. +In addition to the above, code reviews are the most important requirement of +Neutron core reviewers. Neutron follows the documented OpenStack `code review +guidelines `_. We encourage +all people to review Neutron patches, but core reviewers are required to +maintain a level of review numbers relatively close to other core reviewers. +There are no hard statistics around code review numbers, but in general we +use 30, 60, 90 and 180 day stats when examining review stats. * `30 day review stats `_ * `60 day review stats `_ * `90 day review stats `_ * `180 day review stats `_ -There are soft-touch items around being a Neutron core reviewer as well. Gaining trust with the existing Neutron -core reviewers is important. Being able to work together with the existing Neutron core reviewer team is critical -as well. Being a Neutron core reviewer means spending a significant amount of time with the existing Neutron -core reviewers team on IRC, the mailing list, at Summits, and in reviews. Ensuring you participate and engage +There are soft-touch items around being a Neutron core reviewer as well. +Gaining trust with the existing Neutron core reviewers is important. Being +able to work together with the existing Neutron core reviewer team is +critical as well. Being a Neutron core reviewer means spending a significant +amount of time with the existing Neutron core reviewers team on IRC, the +mailing list, at Summits, and in reviews. Ensuring you participate and engage here is critical to becoming and remaining a core reviewer. From e135bcea472e08347fc3dc3df01aa1fa909504fb Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Wed, 27 May 2015 19:57:04 +0200 Subject: [PATCH 061/292] Do not assume order of dictionary elements in init_l3 This fixes the test_interface unit tests[1] that breaks with a randomized PYTHONHASHSEED (see the bug report). The test assumed that the init_l3 method from neutron.agent.linux.interface had dictionary elements in a particular order. Found with PYTHONHASHSEED=2. The fix refactors the test case to handle unsorted dictionaries in init_l3. [1] neutron.tests.unit.agent.linux.test_interface.TestABCDriver Partial-bug: #1348818 Note: There are several other unrelated unit tests that also break with a randomized PYTHONHASHSEED, but they are not addressed here. They will be addressed in separate patches. Change-Id: I1948593b4d7a0069ef060512942b548c74a6b369 --- neutron/tests/unit/agent/linux/test_interface.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/neutron/tests/unit/agent/linux/test_interface.py b/neutron/tests/unit/agent/linux/test_interface.py index fe7cfca2f60..ad08444c157 100644 --- a/neutron/tests/unit/agent/linux/test_interface.py +++ b/neutron/tests/unit/agent/linux/test_interface.py @@ -184,7 +184,8 @@ class TestABCDriver(TestBase): mock.call().addr.delete('2001:db8:a::123/64'), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), - mock.call().route.add_onlink_route('172.20.0.0/24')]) + mock.call().route.add_onlink_route('172.20.0.0/24')], + any_order=True) def test_l3_init_with_ipv6_delete_onlink_routes(self): addresses = [dict(scope='global', From 3b8113306a28eab53f0e2e30b0b58c9d8d9db452 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Wed, 27 May 2015 17:26:00 +0000 Subject: [PATCH 062/292] Flesh out the new RFE process and set deadlines for it's use The new RFE process is great in concept, but as was discovered in the first neutron-drivers meeting where we discussed these, there exist some rough edges. Specifically around deadlines and the conversion to using RFEs, the gray area was very obvious. This patch attempts to put a stake in the ground for when we transition fully to this new model, including distinct timelines. Given that we will need to work with people during the transition, what is proposed is a way to let us do that while not blocking existing specs and work. Change-Id: Ife50f65caf4b13405ad60dd4be3d347aa31ef3a7 --- doc/source/policies/blueprints.rst | 52 +++++++++++++++++++++++++++--- 1 file changed, 47 insertions(+), 5 deletions(-) diff --git a/doc/source/policies/blueprints.rst b/doc/source/policies/blueprints.rst index a272fd2794b..c8b5c9e6115 100644 --- a/doc/source/policies/blueprints.rst +++ b/doc/source/policies/blueprints.rst @@ -51,10 +51,52 @@ list. RFEs can be submitted by anyone and by having the community vote on them in Launchpad, we can gauge interest in features. The drivers team will evaluate -these on a weekly basis along with the specs. +these on a weekly basis along with the specs. RFEs will be evaluated in the +current cycle against existing project priorities and available resources. The process for moving work from RFEs into the code involves someone assigning -themselves the RFE bug and filing a matching spec in the neutron-specs -repository. The spec will then be reviewed by the community and approved by -the drivers team before landing in a release. This is the same process as -before RFFs existed in Neutron. +themselves the RFE bug and filing a matching spec using the slimmed down +template in the neutron-specs repository. The spec will then be reviewed by the +community and approved by the drivers team before landing in a release. This is +the same process as before RFEs existed in Neutron. + +The workflow for the life an RFE in Launchpad is as follows: + +* The bug is submitted and will by default land in the "New" state. +* As soon as a member of the neutron-drivers team acknowledges the bug, it will + be moved into the "Confirmed" state. No priority, assignee, or milestone is + set at this time. +* The bug goes into the "Triaged" state once a discussion around the RFE has + taken place. +* The neutron-drivers team will evaluate the RFE and may advise the submitter + to file a spec in neutron-specs to elaborate on the feature request. +* The PTL will work with the Lieutenant for the area being identified by the + RFE to evaluate resources against the current workload. +* In either case (a spec being required or not), once discussion has happened + the bug will get an assignee, priority and milestone. +* Once a patchset targeting the bug is submitted the bug will move into the + "In Progress" state. +* When all patches targeting the bug are merged or abandoned, the bug will be + moved to the "Completed" state. + +Cutover to RFEs From Pure Specs +------------------------------- + +Prior to the Liberty release, Neutron relied purely on a waterfall model for +handling specs. During Liberty, the goal is to move to the above referenced +RFE process. This will allow for the separation of the "What" from the "How", +and ideally allow for better scheduling of work by the PTL and Lieutenants. +However, given the fact we have a backlog of specs already and new specs +proposed, we need a path forward to not create extra work for everyone. + +For Liberty-1, we will allow the old specs to be reviewed as-is. The drivers +team will ensure all specs submitted a week before the Liberty-1 deadline are +given a review and approved or rejected. After Liberty-1, people will not be +required to convert their specs over to RFE bugs during Liberty-1. Once +Liberty-1 passes, all old specs will be moved to a "liberty-backlog" directory +and anything new will follow the new RFE process fully. + +RFE Submission Guidelines +------------------------- + +#TODO(marun) From 98d8ad911d07a20af18edb0cac4bcf141a83d969 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Mon, 25 May 2015 18:55:44 -0700 Subject: [PATCH 063/292] Persist DHCP leases to a local database Due to issues caused by dnsmasq restarts sending DHCPNAKs, change Ieff0236670c1403b5d79ad8e50d7574c1b694e34 passed the 'dhcp-authoritative' option to dnsmasq. While this solved the restart issue, it broke the multi-DHCP server scenario because the dnsmasq instances will NAK requests to a server ID that isn't their own. Problem DHCP Request Lifecycle: Client: DHCPDISCOVER(broadcast) Server1: DHCPOFFER Server2: DHCPOFFER Client: DHCPREQUEST(broadcast with Server-ID=Server1) Server1: DHCPACK Server2: DHCPNAK(in response to observed DHCPREQUEST with other Server-ID) ^---Causes issues This change removes the authoritative option so NAKs are not send in response to DHCPREQUEST's to other servers. To handle the original issue that Ieff0236670c1403b5d79ad8e50d7574c1b694e34 was inteded to address, this patch also allows changes to be persisted to a local lease file. In order to handle the issue where a DHCP server may be scheduled to another agent, a fake lease file is generated for dnsmasq to start with. The contents are populated based on all of the known ports for a network. This should prevent dnsmasq from NAKing clients renewing leases issued before it was restarted/rescheduled. Closes-Bug: #1457900 Change-Id: Idc91602bf8c474467e596cbd5cbaa8898952c841 --- neutron/agent/linux/dhcp.py | 66 ++++++++++++++++++--- neutron/tests/unit/agent/linux/test_dhcp.py | 31 +++++++++- 2 files changed, 87 insertions(+), 10 deletions(-) diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index d4fbcb1d971..ba3431b8d4a 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -18,6 +18,7 @@ import collections import os import re import shutil +import time import netaddr from oslo_config import cfg @@ -313,8 +314,7 @@ class Dnsmasq(DhcpLocalProcess): '--dhcp-hostsfile=%s' % self.get_conf_file_name('host'), '--addn-hosts=%s' % self.get_conf_file_name('addn_hosts'), '--dhcp-optsfile=%s' % self.get_conf_file_name('opts'), - '--leasefile-ro', - '--dhcp-authoritative', + '--dhcp-leasefile=%s' % self.get_conf_file_name('leases'), ] possible_leases = 0 @@ -382,6 +382,9 @@ class Dnsmasq(DhcpLocalProcess): def spawn_process(self): """Spawn the process, if it's not spawned already.""" + # we only need to generate the lease file the first time dnsmasq starts + # rather than on every reload since dnsmasq will keep the file current + self._output_init_lease_file() self._spawn_or_reload_process(reload_with_HUP=False) def _spawn_or_reload_process(self, reload_with_HUP): @@ -469,6 +472,58 @@ class Dnsmasq(DhcpLocalProcess): def _get_port_extra_dhcp_opts(self, port): return getattr(port, edo_ext.EXTRADHCPOPTS, False) + def _output_init_lease_file(self): + """Write a fake lease file to bootstrap dnsmasq. + + The generated file is passed to the --dhcp-leasefile option of dnsmasq. + This is used as a bootstrapping mechanism to avoid NAKing active leases + when a dhcp server is scheduled to another agent. Using a leasefile + will also prevent dnsmasq from NAKing or ignoring renewals after a + restart. + + Format is as follows: + epoch-timestamp mac_addr ip_addr hostname client-ID + """ + filename = self.get_conf_file_name('leases') + buf = six.StringIO() + + LOG.debug('Building initial lease file: %s', filename) + # we make up a lease time for the database entry + if self.conf.dhcp_lease_duration == -1: + # Even with an infinite lease, a client may choose to renew a + # previous lease on reboot or interface bounce so we should have + # an entry for it. + # Dnsmasq timestamp format for an infinite lease is is 0. + timestamp = 0 + else: + timestamp = int(time.time()) + self.conf.dhcp_lease_duration + dhcp_enabled_subnet_ids = [s.id for s in self.network.subnets + if s.enable_dhcp] + for (port, alloc, hostname, name) in self._iter_hosts(): + # don't write ip address which belongs to a dhcp disabled subnet. + if not alloc or alloc.subnet_id not in dhcp_enabled_subnet_ids: + continue + + ip_address = self._format_address_for_dnsmasq(alloc.ip_address) + # all that matters is the mac address and IP. the hostname and + # client ID will be overwritten on the next renewal. + buf.write('%s %s %s * *\n' % + (timestamp, port.mac_address, ip_address)) + contents = buf.getvalue() + utils.replace_file(filename, contents) + LOG.debug('Done building initial lease file %s with contents:\n%s', + filename, contents) + return filename + + @staticmethod + def _format_address_for_dnsmasq(address): + # (dzyu) Check if it is legal ipv6 address, if so, need wrap + # it with '[]' to let dnsmasq to distinguish MAC address from + # IPv6 address. + if netaddr.valid_ipv6(address): + return '[%s]' % address + return address + def _output_hosts_file(self): """Writes a dnsmasq compatible dhcp hosts file. @@ -504,12 +559,7 @@ class Dnsmasq(DhcpLocalProcess): if alloc.subnet_id not in dhcp_enabled_subnet_ids: continue - # (dzyu) Check if it is legal ipv6 address, if so, need wrap - # it with '[]' to let dnsmasq to distinguish MAC address from - # IPv6 address. - ip_address = alloc.ip_address - if netaddr.valid_ipv6(ip_address): - ip_address = '[%s]' % ip_address + ip_address = self._format_address_for_dnsmasq(alloc.ip_address) if self._get_port_extra_dhcp_opts(port): client_id = self._get_client_id(port) diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py index fae303ab03c..380e8af804a 100644 --- a/neutron/tests/unit/agent/linux/test_dhcp.py +++ b/neutron/tests/unit/agent/linux/test_dhcp.py @@ -873,8 +873,7 @@ class TestDnsmasq(TestBase): '--dhcp-hostsfile=/dhcp/%s/host' % network.id, '--addn-hosts=/dhcp/%s/addn_hosts' % network.id, '--dhcp-optsfile=/dhcp/%s/opts' % network.id, - '--leasefile-ro', - '--dhcp-authoritative'] + '--dhcp-leasefile=/dhcp/%s/leases' % network.id] seconds = '' if lease_duration == -1: @@ -996,6 +995,34 @@ class TestDnsmasq(TestBase): self._test_spawn(['--conf-file=', '--domain=openstacklocal'], network) + def _test_output_init_lease_file(self, timestamp): + expected = [ + '00:00:80:aa:bb:cc 192.168.0.2 * *', + '00:00:f3:aa:bb:cc [fdca:3ba5:a17a:4ba3::2] * *', + '00:00:0f:aa:bb:cc 192.168.0.3 * *', + '00:00:0f:aa:bb:cc [fdca:3ba5:a17a:4ba3::3] * *', + '00:00:0f:rr:rr:rr 192.168.0.1 * *\n'] + expected = "\n".join(['%s %s' % (timestamp, l) for l in expected]) + with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: + conf_fn.return_value = '/foo/leases' + dm = self._get_dnsmasq(FakeDualNetwork()) + dm._output_init_lease_file() + self.safe.assert_called_once_with('/foo/leases', expected) + + @mock.patch('time.time') + def test_output_init_lease_file(self, tmock): + self.conf.set_override('dhcp_lease_duration', 500) + tmock.return_value = 1000000 + # lease duration should be added to current time + timestamp = 1000000 + 500 + self._test_output_init_lease_file(timestamp) + + def test_output_init_lease_file_infinite_duration(self): + self.conf.set_override('dhcp_lease_duration', -1) + # when duration is infinite, lease db timestamp should be 0 + timestamp = 0 + self._test_output_init_lease_file(timestamp) + def _test_output_opts_file(self, expected, network, ipm_retval=None): with mock.patch.object(dhcp.Dnsmasq, 'get_conf_file_name') as conf_fn: conf_fn.return_value = '/foo/opts' From 2b7e6552e82f899a91ce3d6973d90778440fac8c Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Wed, 27 May 2015 14:52:06 -0700 Subject: [PATCH 064/292] Remove time formatting in agent clock error This removes time formatting that may be hiding timezone issues that are leading to a delta being calculated between the agent and the server even when it shows none. It also adds logging of the difference so we can see how far off it thinks they are. Example message: during the registration of Open vSwitch agent has a timestamp: 2015-05-19T18:15:27Z. This differs from the current server timestamp: 2015-05-19T18:15:27Z by more than the threshold agent downtime: 75. Note that the timestamps are exactly the same after formatting. Change-Id: Ibfc30444b7a167fb18ae9051a775266236d4ecce Related-Bug: #1456760 --- neutron/db/agents_db.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index 26a6212d573..a9ba6dfe865 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -297,16 +297,17 @@ class AgentExtRpcCallback(object): diff = abs((time_server_now - agent_time).seconds) if diff > cfg.CONF.agent_down_time: agent_name = agent_state['agent_type'] - time_agent = timeutils.isotime(agent_time) host = agent_state['host'] log_dict = {'host': host, 'agent_name': agent_name, - 'agent_time': time_agent, + 'agent_time': agent_time, 'threshold': cfg.CONF.agent_down_time, - 'serv_time': timeutils.isotime(time_server_now)} + 'serv_time': time_server_now, + 'diff': diff} LOG.error(_LE("Message received from the host: %(host)s " "during the registration of %(agent_name)s has " "a timestamp: %(agent_time)s. This differs from " "the current server timestamp: %(serv_time)s by " - "more than the threshold agent down" + "%(diff)s seconds, which is more than the " + "threshold agent down" "time: %(threshold)s."), log_dict) From 817b45b6406d9730859535ac54d73ec5c85451d0 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Wed, 27 May 2015 17:38:32 -0700 Subject: [PATCH 065/292] Process port IP requests before subnet requests When a port requests multiple fixed IPs, process the requests for specific IP addresses before the ones asking for a subnet. This prevents an error where the IP that was requested happens to be the next up for allocation so the subnet request takes it and causes a DBDuplicateEntry. Closes-Bug: #1459467 Change-Id: I645565c7fe0c47c58d686b25020bb49a0b9089f5 --- neutron/db/db_base_plugin_v2.py | 5 +++++ neutron/tests/unit/db/test_db_base_plugin_v2.py | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index dab525befb0..29ffa85b4f2 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -459,6 +459,11 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, def _allocate_fixed_ips(self, context, fixed_ips, mac_address): """Allocate IP addresses according to the configured fixed_ips.""" ips = [] + + # we need to start with entries that asked for a specific IP in case + # those IPs happen to be next in the line for allocation for ones that + # didn't ask for a specific IP + fixed_ips.sort(key=lambda x: 'ip_address' not in x) for fixed in fixed_ips: subnet = self._get_subnet(context, fixed['subnet_id']) is_auto_addr = ipv6_utils.is_auto_address_subnet(subnet) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 7af0122d06a..f34beb85a30 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -903,6 +903,16 @@ class TestPortsV2(NeutronDbPluginV2TestCase): self.assertIn('mac_address', port['port']) self._delete('ports', port['port']['id']) + def test_create_port_anticipating_allocation(self): + with self.network(shared=True) as network: + with self.subnet(network=network, cidr='10.0.0.0/24') as subnet: + fixed_ips = [{'subnet_id': subnet['subnet']['id']}, + {'subnet_id': subnet['subnet']['id'], + 'ip_address': '10.0.0.2'}] + self._create_port(self.fmt, network['network']['id'], + webob.exc.HTTPCreated.code, + fixed_ips=fixed_ips) + def test_create_port_public_network_with_invalid_ip_no_subnet_id(self, expected_error='InvalidIpForNetwork'): with self.network(shared=True) as network: From 7a3934d982ef29d8851450b5586319201baa0122 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Fri, 15 May 2015 17:10:15 -0700 Subject: [PATCH 066/292] Switch to dictionary for iptables find The code to find the matching entry was scanning through a list of all rules for every rule. This became extremely slow as the number of rules became large, leading to long delays waiting for firewall rules to be applied. This patch switches to the use of a dictionary so the cost becomes a hash lookup instead of a list scan. Closes-Bug: #1453264 Closes-Bug: #1455675 Change-Id: I1e6fe5e50b9c13066c966c252cadc8ed1d08f686 --- neutron/agent/linux/iptables_manager.py | 49 +++++++++++++++---- .../unit/agent/linux/test_iptables_manager.py | 19 +++++-- 2 files changed, 56 insertions(+), 12 deletions(-) diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py index 63efcb3a1f7..38e40f0c735 100644 --- a/neutron/agent/linux/iptables_manager.py +++ b/neutron/agent/linux/iptables_manager.py @@ -18,6 +18,7 @@ """Implements iptables rules using linux utilities.""" +import collections import contextlib import os import re @@ -511,11 +512,12 @@ class IptablesManager(object): return rules_index - def _find_last_entry(self, filter_list, match_str): - # find a matching entry, starting from the bottom - for s in reversed(filter_list): - if match_str in s: - return s.strip() + def _find_last_entry(self, filter_map, match_str): + # find last matching entry + try: + return filter_map[match_str][-1] + except KeyError: + pass def _modify_rules(self, current_lines, table, table_name): # Chains are stored as sets to avoid duplicates. @@ -540,6 +542,9 @@ class IptablesManager(object): (old_filter if self.wrap_name in line else new_filter).append(line.strip()) + old_filter_map = make_filter_map(old_filter) + new_filter_map = make_filter_map(new_filter) + rules_index = self._find_rules_index(new_filter) all_chains = [':%s' % name for name in unwrapped_chains] @@ -551,9 +556,9 @@ class IptablesManager(object): for chain in all_chains: chain_str = str(chain).strip() - old = self._find_last_entry(old_filter, chain_str) + old = self._find_last_entry(old_filter_map, chain_str) if not old: - dup = self._find_last_entry(new_filter, chain_str) + dup = self._find_last_entry(new_filter_map, chain_str) new_filter = [s for s in new_filter if chain_str not in s.strip()] # if no old or duplicates, use original chain @@ -574,9 +579,9 @@ class IptablesManager(object): # Further down, we weed out duplicates from the bottom of the # list, so here we remove the dupes ahead of time. - old = self._find_last_entry(old_filter, rule_str) + old = self._find_last_entry(old_filter_map, rule_str) if not old: - dup = self._find_last_entry(new_filter, rule_str) + dup = self._find_last_entry(new_filter_map, rule_str) new_filter = [s for s in new_filter if rule_str not in s.strip()] # if no old or duplicates, use original rule @@ -722,3 +727,29 @@ class IptablesManager(object): acc['bytes'] += int(data[1]) return acc + + +def make_filter_map(filter_list): + filter_map = collections.defaultdict(list) + for data in filter_list: + # strip any [packet:byte] counts at start or end of lines, + # for example, chains look like ":neutron-foo - [0:0]" + # and rules look like "[0:0] -A neutron-foo..." + if data.startswith('['): + key = data.rpartition('] ')[2] + elif data.endswith(']'): + key = data.rsplit(' [', 1)[0] + if key.endswith(' -'): + key = key[:-2] + else: + # things like COMMIT, *filter, and *nat land here + continue + filter_map[key].append(data) + # regular IP(v6) entries are translated into /32s or /128s so we + # include a lookup without the CIDR here to match as well + for cidr in ('/32', '/128'): + if cidr in key: + alt_key = key.replace(cidr, '') + filter_map[alt_key].append(data) + # return a regular dict so readers don't accidentally add entries + return dict(filter_map) diff --git a/neutron/tests/unit/agent/linux/test_iptables_manager.py b/neutron/tests/unit/agent/linux/test_iptables_manager.py index 8637c35801a..674b1a872f7 100644 --- a/neutron/tests/unit/agent/linux/test_iptables_manager.py +++ b/neutron/tests/unit/agent/linux/test_iptables_manager.py @@ -1007,11 +1007,11 @@ class IptablesManagerStateFulTestCase(base.BaseTestCase): '[0:0] -A FORWARD -j neutron-filter-top', '[0:0] -A OUTPUT -j neutron-filter-top' % IPTABLES_ARG] - - return self.iptables._find_last_entry(filter_list, find_str) + filter_map = iptables_manager.make_filter_map(filter_list) + return self.iptables._find_last_entry(filter_map, find_str) def test_find_last_entry_old_dup(self): - find_str = 'neutron-filter-top' + find_str = '-A OUTPUT -j neutron-filter-top' match_str = '[0:0] -A OUTPUT -j neutron-filter-top' ret_str = self._test_find_last_entry(find_str) self.assertEqual(ret_str, match_str) @@ -1021,6 +1021,19 @@ class IptablesManagerStateFulTestCase(base.BaseTestCase): ret_str = self._test_find_last_entry(find_str) self.assertIsNone(ret_str) + def test_make_filter_map_cidr_stripping(self): + filter_rules = ('[0:0] -A OUTPUT -j DROP', + '[0:0] -A INPUT -d 192.168.0.2/32 -j DROP', + '[0:0] -A INPUT -d 1234:31::001F/128 -j DROP', + 'OUTPUT - [0:0]') + filter_map = iptables_manager.make_filter_map(filter_rules) + # make sure /128 works without CIDR + self.assertEqual(filter_rules[2], + filter_map['-A INPUT -d 1234:31::001F -j DROP'][0]) + # make sure /32 works without CIDR + self.assertEqual(filter_rules[1], + filter_map['-A INPUT -d 192.168.0.2 -j DROP'][0]) + class IptablesManagerStateLessTestCase(base.BaseTestCase): From 046f4a0881aa7aea5d231de83e79af12460d9910 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 27 May 2015 15:40:06 -0700 Subject: [PATCH 067/292] Add RFE submission guidelines Change-Id: I864c8638a92f5f94e6f059a477ffb56de274ef1c --- doc/source/policies/blueprints.rst | 48 ++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/doc/source/policies/blueprints.rst b/doc/source/policies/blueprints.rst index c8b5c9e6115..0c2d67edee0 100644 --- a/doc/source/policies/blueprints.rst +++ b/doc/source/policies/blueprints.rst @@ -35,8 +35,8 @@ The specs which are moved in this way can be fast-tracked into the next release. Please note that it is required to re-propose the spec for the new release however. -Neutron Feature Requests ------------------------- +Neutron Request for Feature Enhancements +---------------------------------------- We are introducing the concept of feature requests. Feature requests are tracked as Launchpad bugs, tagged with the new 'rfe' tag, and allow for @@ -99,4 +99,46 @@ and anything new will follow the new RFE process fully. RFE Submission Guidelines ------------------------- -#TODO(marun) +Before we dive into the guidelines for writing a good RFE, it is worth mentioning +that depending on your level of engagement with the Neutron project and your role +(user, developer, deployer, operator, etc.), you are more than welcome to have +a preliminary discussion of a potential RFE by reaching out to other people involved +in the project. This usually happens by posting mails on the relevant mailing +lists (e.g. `openstack-dev `_ - include [neutron] in +the subject) or on #openstack-neutron IRC channel on Freenode. If current ongoing +code reviews are related to your feature, posting comments/questions on gerrit +may also be a way to engage. Some amount of interaction with Neutron developers +will give you an idea of the plausibility and form of your RFE before you submit +it. That said, this is not mandatory. + +When you submit a bug report on https://bugs.launchpad.net/neutron/+filebug, +there are two fields that must be filled: 'summary' and 'further information'. +The 'summary' must be brief enough to fit in one line: if you can't describe it +in a few words it may mean that you are either trying to capture more than one +RFE at once, or that you are having a hard time defining what you are trying to +solve at all. + +The 'further information' section must be a description of what you would like +to see implemented in Neutron. The description should provide enough details for +a knowledgeable developer to understand what is the existing problem in the +current platform that needs to be addressed, or what is the enhancement that +would make the platform more capable, both for a functional and a non-functional +standpoint. To this aim it is important to describe 'why' you believe the RFE +should be accepted, and motivate the reason why without it Neutron is a poorer +platform. The description should be self contained, and no external references +should be necessary to further explain the RFE. + +In other words, when you write an RFE you should ask yourself the following +questions: + +* What is that I (specify what user - a user can be a human or another system) + cannot do today when interacting with Neutron? On the other hand, is there a + Neutron component X that is unable to accomplish something? +* Is there something that you would like Neutron handle better, ie. in a more + scalable, or in a more reliable way? +* What is that I would like to see happen after the RFE is accepted and + implemented? +* Why do you think it is important? + +Once you are happy with what you wrote, add 'rfe' as tag, and submit. Do not +worry, we are here to help you get it right! Happy hacking. From 88e499d1c10eaae59546d9f16c9c9c262766de84 Mon Sep 17 00:00:00 2001 From: Gong Zhang Date: Wed, 27 May 2015 17:10:17 +0800 Subject: [PATCH 068/292] Move pool dispose() before os.fork Currently pool dispose() is done after os.fork, but this will produce shared DB connections in child processes which may lead to DB errors. Move pool dispose() before os.fork. This will remove all existing connections in the parent process and child processes will create their own new ones. Change-Id: Ie36417a64f0eb39b53dad61517f834aec37bacfb Closes-Bug: 1458718 --- neutron/service.py | 8 ++++---- neutron/wsgi.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/neutron/service.py b/neutron/service.py index 4d8e9a85413..708882b7312 100644 --- a/neutron/service.py +++ b/neutron/service.py @@ -117,10 +117,6 @@ class RpcWorker(object): self._servers = [] def start(self): - # We may have just forked from parent process. A quick disposal of the - # existing sql connections avoids producing errors later when they are - # discovered to be broken. - session.dispose() self._servers = self._plugin.start_rpc_listeners() def wait(self): @@ -157,6 +153,10 @@ def serve_rpc(): rpc.start() return rpc else: + # dispose the whole pool before os.fork, otherwise there will + # be shared DB connections in child processes which may cause + # DB errors. + session.dispose() launcher = common_service.ProcessLauncher(wait_interval=1.0) launcher.launch_service(rpc, workers=cfg.CONF.rpc_workers) return launcher diff --git a/neutron/wsgi.py b/neutron/wsgi.py index a31367ac6a6..bbd2d8fd898 100644 --- a/neutron/wsgi.py +++ b/neutron/wsgi.py @@ -98,10 +98,6 @@ class WorkerService(object): self._server = None def start(self): - # We may have just forked from parent process. A quick disposal of the - # existing sql connections avoids producing 500 errors later when they - # are discovered to be broken. - api.dispose() if CONF.use_ssl: self._service._socket = self._service.wrap_ssl( self._service._socket) @@ -234,6 +230,10 @@ class Server(object): service.start() systemd.notify_once() else: + # dispose the whole pool before os.fork, otherwise there will + # be shared DB connections in child processes which may cause + # DB errors. + api.dispose() # The API service runs in a number of child processes. # Minimize the cost of checking for child exit by extending the # wait interval past the default of 0.01s. From 714c311871333c19c4d6203f79a948b184bf11cf Mon Sep 17 00:00:00 2001 From: Gal Sagie Date: Thu, 28 May 2015 09:17:15 +0300 Subject: [PATCH 069/292] Addressing follow up comments for OVS_LIB fail_mode setting API Review https://review.openstack.org/#/c/185659/ got merged before i could see and address the last comment. This is a follow up patch to address that change Change-Id: I19135a75cf25e03d6c1f89d7c18e8da3af2ec643 --- neutron/agent/common/ovs_lib.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index 933ab67796c..b316584f9eb 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -157,13 +157,14 @@ class OVSBridge(BaseOVS): return self.ovsdb.get_controller(self.br_name).execute( check_error=True) + def _set_bridge_fail_mode(self, mode): + self.ovsdb.set_fail_mode(self.br_name, mode).execute(check_error=True) + def set_secure_mode(self): - self.ovsdb.set_fail_mode(self.br_name, FAILMODE_SECURE).execute( - check_error=True) + self._set_bridge_fail_mode(FAILMODE_SECURE) def set_standalone_mode(self): - self.ovsdb.set_fail_mode(self.br_name, FAILMODE_STANDALONE).execute( - check_error=True) + self._set_bridge_fail_mode(FAILMODE_STANDALONE) def set_protocols(self, protocols): self.set_db_attribute('Bridge', self.br_name, 'protocols', protocols, From b4363a48214f1bb21bd7934f05a6440bb5d66bb7 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Thu, 28 May 2015 09:07:23 +0200 Subject: [PATCH 070/292] Fix PYTHONHASHSEED bugs in test_security_groups_rpc This fixes the test_security_group_member/rule_updated unit tests[1] that breaks with a randomized PYTHONHASHSEED (see the bug report). The test assumed that several dictionaries had elements in a particular order. Found with PYTHONHASHSEED=2. The fix refactors the test case by injecting values using the same ordering[2]. [1] neutron.tests.unit.agent.test_securitygroups_rpc.\ TestSecurityGroupAgentWithOVSIptables [2] https://github.com/openstack/neutron/blob/\ e8364a72e62d83e5a76bec1d7aa76ecfe2ed53ac/\ neutron/tests/unit/agent/test_securitygroups_rpc.py#L1630-L1635 Partial-bug: #1348818 Note: There are several other unrelated unit tests that also break with a randomized PYTHONHASHSEED, but they are not addressed here. They will be addressed in separate patches. Change-Id: I5077764045a34d1be0e85bb4b80f3655e87692cc --- neutron/tests/unit/agent/test_securitygroups_rpc.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index 5fd97ba1448..726ed3eab55 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -1670,10 +1670,12 @@ IPTABLES_RAW_DEVICE_2 = """# Generated by iptables_manager :%(bn)s-PREROUTING - [0:0] [0:0] -A PREROUTING -j %(bn)s-PREROUTING [0:0] -A OUTPUT -j %(bn)s-OUTPUT -[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in qvbtap_port1 -j CT --zone 1 -[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_port1 -j CT --zone 1 -[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in qvbtap_port2 -j CT --zone 1 -[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_port2 -j CT --zone 1 +[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in qvbtap_%(port1)s \ +-j CT --zone 1 +[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_%(port1)s -j CT --zone 1 +[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in qvbtap_%(port2)s \ +-j CT --zone 1 +[0:0] -A %(bn)s-PREROUTING -m physdev --physdev-in tap_%(port2)s -j CT --zone 1 COMMIT # Completed by iptables_manager """ % IPTABLES_ARG From 2fcbbf3031fe1d326ddb2b9bb1b06e0e39422022 Mon Sep 17 00:00:00 2001 From: John Schwarz Date: Thu, 7 May 2015 09:02:59 +0300 Subject: [PATCH 071/292] Move full-stack logs post-tests Currently, it's up to the developer who wants to run full-stack on his machine to make the directory in question (/opt/stack/logs). However, this also means that the files don't get compressed at the end of a gate run. Now, each full-stack test will have each own log directory in /tmp. Once the logs are there, post_test_hook.sh can run 'gzip' on all the log files before moving them to /opt/stack/logs on its own. Change-Id: I5c04d0af0b9858722ae0c4baf0ee478ffb078e02 --- TESTING.rst | 5 +- neutron/tests/contrib/post_test_hook.sh | 9 ++++ neutron/tests/fullstack/base.py | 5 ++ neutron/tests/fullstack/fullstack_fixtures.py | 46 ++++++++++++++----- neutron/tests/fullstack/test_l3_agent.py | 3 +- 5 files changed, 54 insertions(+), 14 deletions(-) diff --git a/TESTING.rst b/TESTING.rst index 27289b191d1..c05018e558d 100644 --- a/TESTING.rst +++ b/TESTING.rst @@ -245,7 +245,10 @@ advise to run ./stack.sh successfully to make sure all Neutron's dependencies are met. Also note that in order to preserve resources on the gate, running the dsvm-functional suite will also run all full-stack tests (and a new worker won't be assigned specifically for -dsvm-fullstack). +dsvm-fullstack). Full-stack based Neutron daemons produce logs to a +sub-folder in /tmp/fullstack-logs (for example, a test named +"test_example" will produce logs to /tmp/fullstack-logs/test_example/), +so that will be a good place to look if your test is failing. API Tests ========= diff --git a/neutron/tests/contrib/post_test_hook.sh b/neutron/tests/contrib/post_test_hook.sh index 2c8360a47a7..62d47ce01a8 100644 --- a/neutron/tests/contrib/post_test_hook.sh +++ b/neutron/tests/contrib/post_test_hook.sh @@ -19,6 +19,15 @@ function generate_testr_results { gzip -9 ./testr_results.html sudo mv ./*.gz /opt/stack/logs/ fi + + # Compress all /tmp/fullstack-*/*.txt files and move the directories + # holding those files to /opt/stack/logs. Files with .log suffix have their + # suffix changed to .txt (so browsers will know to open the compressed + # files and not download them). + if [ -d /tmp/fullstack-logs/ ]; then + sudo find /tmp/fullstack-logs -iname "*.log" -type f -exec mv {} {}.txt \; -exec gzip -9 {}.txt \; + sudo mv /tmp/fullstack-logs/* /opt/stack/logs/ + fi } if [ "$venv" == "dsvm-functional" ] diff --git a/neutron/tests/fullstack/base.py b/neutron/tests/fullstack/base.py index b783612360d..61b50d3a185 100644 --- a/neutron/tests/fullstack/base.py +++ b/neutron/tests/fullstack/base.py @@ -33,10 +33,15 @@ class BaseFullStackTestCase(test_base.MySQLOpportunisticTestCase): self.create_db_tables() if self.environment: + self.environment.test_name = self.get_name() self.useFixture(self.environment) self.client = self.environment.neutron_server.client + def get_name(self): + class_name, test_name = self.id().split(".")[-2:] + return "%s.%s" % (class_name, test_name) + def create_db_tables(self): """Populate the new database. diff --git a/neutron/tests/fullstack/fullstack_fixtures.py b/neutron/tests/fullstack/fullstack_fixtures.py index c06d450cb3d..f714273aeea 100644 --- a/neutron/tests/fullstack/fullstack_fixtures.py +++ b/neutron/tests/fullstack/fullstack_fixtures.py @@ -13,6 +13,7 @@ # under the License. from distutils import spawn +import errno import functools import os @@ -32,12 +33,13 @@ from neutron.tests.fullstack import config_fixtures LOG = logging.getLogger(__name__) # This should correspond the directory from which infra retrieves log files -DEFAULT_LOG_DIR = '/opt/stack/logs' +DEFAULT_LOG_DIR = '/tmp/fullstack-logs/' class ProcessFixture(fixtures.Fixture): - def __init__(self, name, exec_name, config_filenames): - self.name = name + def __init__(self, test_name, process_name, exec_name, config_filenames): + self.test_name = test_name + self.process_name = process_name self.exec_name = exec_name self.config_filenames = config_filenames self.process = None @@ -47,9 +49,19 @@ class ProcessFixture(fixtures.Fixture): self.start() def start(self): - fmt = self.name + "--%Y-%m-%d--%H%M%S.log" + fmt = self.process_name + "--%Y-%m-%d--%H%M%S.log" + log_dir = os.path.join(DEFAULT_LOG_DIR, self.test_name) + if not os.path.exists(log_dir): + try: + os.makedirs(log_dir) + except OSError as e: + # Make sure that the error was that the directory was created + # by a different (concurrent) worker. If not, raise the error. + if e.errno != errno.EEXIST: + raise + cmd = [spawn.find_executable(self.exec_name), - '--log-dir', DEFAULT_LOG_DIR, + '--log-dir', log_dir, '--log-file', timeutils.strtime(fmt=fmt)] for filename in self.config_filenames: cmd += ['--config-file', filename] @@ -88,6 +100,8 @@ class RabbitmqEnvironmentFixture(fixtures.Fixture): class FullstackFixture(fixtures.Fixture): + def __init__(self): + self.test_name = None def setUp(self): super(FullstackFixture, self).setUp() @@ -96,7 +110,8 @@ class FullstackFixture(fixtures.Fixture): rabbitmq_environment = self.useFixture(RabbitmqEnvironmentFixture()) self.neutron_server = self.useFixture( - NeutronServerFixture(self.temp_dir, rabbitmq_environment)) + NeutronServerFixture( + self.test_name, self.temp_dir, rabbitmq_environment)) def wait_until_env_is_up(self, agents_count=0): utils.wait_until_true( @@ -116,7 +131,8 @@ class NeutronServerFixture(fixtures.Fixture): NEUTRON_SERVER = "neutron-server" - def __init__(self, temp_dir, rabbitmq_environment): + def __init__(self, test_name, temp_dir, rabbitmq_environment): + self.test_name = test_name self.temp_dir = temp_dir self.rabbitmq_environment = rabbitmq_environment @@ -139,7 +155,8 @@ class NeutronServerFixture(fixtures.Fixture): self.plugin_cfg_fixture.filename] self.process_fixture = self.useFixture(ProcessFixture( - name=self.NEUTRON_SERVER, + test_name=self.test_name, + process_name=self.NEUTRON_SERVER, exec_name=self.NEUTRON_SERVER, config_filenames=config_filenames)) @@ -163,7 +180,8 @@ class OVSAgentFixture(fixtures.Fixture): NEUTRON_OVS_AGENT = "neutron-openvswitch-agent" - def __init__(self, neutron_cfg_fixture, ml2_cfg_fixture): + def __init__(self, test_name, neutron_cfg_fixture, ml2_cfg_fixture): + self.test_name = test_name self.neutron_cfg_fixture = neutron_cfg_fixture self.plugin_cfg_fixture = ml2_cfg_fixture @@ -180,7 +198,8 @@ class OVSAgentFixture(fixtures.Fixture): self.plugin_cfg_fixture.filename] self.process_fixture = self.useFixture(ProcessFixture( - name=self.NEUTRON_OVS_AGENT, + test_name=self.test_name, + process_name=self.NEUTRON_OVS_AGENT, exec_name=self.NEUTRON_OVS_AGENT, config_filenames=config_filenames)) @@ -195,7 +214,9 @@ class L3AgentFixture(fixtures.Fixture): NEUTRON_L3_AGENT = "neutron-l3-agent" - def __init__(self, temp_dir, neutron_cfg_fixture, integration_bridge_name): + def __init__(self, test_name, temp_dir, + neutron_cfg_fixture, integration_bridge_name): + self.test_name = test_name self.temp_dir = temp_dir self.neutron_cfg_fixture = neutron_cfg_fixture self.neutron_config = self.neutron_cfg_fixture.config @@ -215,7 +236,8 @@ class L3AgentFixture(fixtures.Fixture): self.plugin_cfg_fixture.filename] self.process_fixture = self.useFixture(ProcessFixture( - name=self.NEUTRON_L3_AGENT, + test_name=self.test_name, + process_name=self.NEUTRON_L3_AGENT, exec_name=spawn.find_executable( 'l3_agent.py', path=os.path.join(base.ROOTDIR, 'common', 'agents')), diff --git a/neutron/tests/fullstack/test_l3_agent.py b/neutron/tests/fullstack/test_l3_agent.py index 9b5e2476552..4bf86308bb9 100644 --- a/neutron/tests/fullstack/test_l3_agent.py +++ b/neutron/tests/fullstack/test_l3_agent.py @@ -30,10 +30,11 @@ class SingleNodeEnvironment(f_fixtures.FullstackFixture): self.ovs_agent = self.useFixture( f_fixtures.OVSAgentFixture( - neutron_config, ml2_config)) + self.test_name, neutron_config, ml2_config)) self.l3_agent = self.useFixture( f_fixtures.L3AgentFixture( + self.test_name, self.temp_dir, neutron_config, self.ovs_agent._get_br_int_name())) From 24ec6e214bb32a1bd79e1c98cecf737ed1fc5d10 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 28 May 2015 14:40:25 +0200 Subject: [PATCH 072/292] py34: don't run any tests except unit tests py34 job was intended for unit tests only. It's important to distinguish between different types of tests, because they all have different requirements to execution environment. E.g. functional tests are not expected to run in a restricted env designed for unit tests, and that's even more valid for fullstack tests. Otherwise, the job may fail or apply irrecoverable changes to test runner system, breaking the system. If we ever want to support py3 for other types of tests, we should add separate jobs just for that. Note that the neutron-python3 blueprint was not intended to introduce changes to support anything but unit test execution with the new Python version, so strictly speaking, any effort to make other test types work is out of scope. Change-Id: Ia96f03f05269c1938b51ee26f4b4075b69967bb8 --- tox.ini | 1 - 1 file changed, 1 deletion(-) diff --git a/tox.ini b/tox.ini index 8a1c5d76cf7..1c8761c7051 100644 --- a/tox.ini +++ b/tox.ini @@ -98,7 +98,6 @@ commands = sphinx-build -W -b html doc/source doc/build [testenv:py34] commands = python -m testtools.run \ - neutron.tests.fullstack.test_l3_agent \ neutron.tests.unit.common.test_rpc [flake8] From 818a797693d828d72083ac3b98e54df59574dbe7 Mon Sep 17 00:00:00 2001 From: Rich Curran Date: Thu, 28 May 2015 11:32:21 -0400 Subject: [PATCH 073/292] ML2: Incorrect commented cisco mechanism driver name The ml2_conf.ini example for the cisco mechanism driver is incorrect. Update to remove confusion. Change-Id: I0d3aff31a3bc78ef5ee042ff1f37dbb6e1459635 Closes-Bug: 1459723 --- etc/neutron/plugins/ml2/ml2_conf.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/neutron/plugins/ml2/ml2_conf.ini b/etc/neutron/plugins/ml2/ml2_conf.ini index ac9a3d0de3e..9aad25b7b8b 100644 --- a/etc/neutron/plugins/ml2/ml2_conf.ini +++ b/etc/neutron/plugins/ml2/ml2_conf.ini @@ -17,7 +17,7 @@ # mechanism_drivers = # Example: mechanism_drivers = openvswitch,mlnx # Example: mechanism_drivers = arista -# Example: mechanism_drivers = cisco,logger +# Example: mechanism_drivers = openvswitch,cisco_nexus,logger # Example: mechanism_drivers = openvswitch,brocade # Example: mechanism_drivers = linuxbridge,brocade From 36a1d14b58028d0294e67786129a2a09ce393cde Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Wed, 27 May 2015 20:30:28 +0200 Subject: [PATCH 074/292] Do not assume order of security group rules This fixes the unit tests[1] that breaks with a randomized PYTHONHASHSEED (see the bug report). The test assumed that the security_group_rules_for_devices method from neutron.agent.securitygroups_rpc returned security group rules in a particular order. Found with PYTHONHASHSEED=2. The fix refactors the test case to handle unsorted security group rules. [1] neutron.tests.unit.plugins.ml2.test_security_group.\ TestMl2SGServerRpcCallBack.\ test_security_group_rules_for_devices_ipv4_ingress_addr_pair Partial-bug: #1348818 Note: There are several other unrelated unit tests that also break with a randomized PYTHONHASHSEED, but they are not addressed here. They will be addressed in separate patches. Change-Id: I1b2bd4100c19004f12822c414aefc86aae9849db --- neutron/tests/unit/agent/test_securitygroups_rpc.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index 5fd97ba1448..7b6c8ee40f3 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -36,6 +36,7 @@ from neutron.extensions import securitygroup as ext_sg from neutron import manager from neutron.plugins.openvswitch.agent import ovs_neutron_agent from neutron.tests import base +from neutron.tests import tools from neutron.tests.unit.extensions import test_securitygroup as test_sg FAKE_PREFIX = {const.IPv4: '10.0.0.0/24', @@ -321,6 +322,7 @@ class SGServerRpcCallBackTestCase(test_sg.SecurityGroupDBTestCase): 'port_range_min': 23, 'source_ip_prefix': fake_prefix}, ] + expected = tools.UnorderedList(expected) self.assertEqual(expected, port_rpc['security_group_rules']) self.assertEqual(port['port']['allowed_address_pairs'], From a0c9745e90596d4a3207e844332e078ca7ecc5fa Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 28 May 2015 16:48:04 -0700 Subject: [PATCH 075/292] Use correct time delta function The .seconds attribute of a timedetla object cannot be taken in isolation because it can overflow into days. For example, a -1 second difference will become -1 day and 86399 seconds. This became a problem when the agent clock was slightly ahead of the server clock. When calling (server_time - agent_time).seconds in this scenario, it would go below 0 in the daily seconds and wraparound to 86399 seconds and -1 day. This patch corrects the issue by using a method in timeutils that ends up calling total_seconds(), which was designed for this usecase. It also restores the formatting that was removed in patch: Ibfc30444b7a167fb18ae9051a775266236d4ecce Closes-Bug: #1456760 Change-Id: Ie90249ab68bb5f8d117872d52180c7087d8fac9b --- neutron/db/agents_db.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/neutron/db/agents_db.py b/neutron/db/agents_db.py index a9ba6dfe865..52dccf5c411 100644 --- a/neutron/db/agents_db.py +++ b/neutron/db/agents_db.py @@ -294,15 +294,16 @@ class AgentExtRpcCallback(object): """ if agent_state.get('start_flag'): time_server_now = timeutils.utcnow() - diff = abs((time_server_now - agent_time).seconds) + diff = abs(timeutils.delta_seconds(time_server_now, agent_time)) if diff > cfg.CONF.agent_down_time: agent_name = agent_state['agent_type'] + time_agent = timeutils.isotime(agent_time) host = agent_state['host'] log_dict = {'host': host, 'agent_name': agent_name, - 'agent_time': agent_time, + 'agent_time': time_agent, 'threshold': cfg.CONF.agent_down_time, - 'serv_time': time_server_now, + 'serv_time': timeutils.isotime(time_server_now), 'diff': diff} LOG.error(_LE("Message received from the host: %(host)s " "during the registration of %(agent_name)s has " From c9fcbc2e7758acf08867db9a7bc89685cd916bbb Mon Sep 17 00:00:00 2001 From: shihanzhang Date: Tue, 26 May 2015 09:29:58 +0800 Subject: [PATCH 076/292] Fix ovs agent restore local_vlan_map failed when ovs agent restart, it will restore the local_vlan_map, but in some condition, if a device does not be set tag in ovsdb, the function 'db_get_val("Port", port.port_name, "tag")' will return a empty list, it does not need 'provision_local_vlan' for this device. Change-Id: I70ed30e6ea5d13e6f14bb41c957320cc21dbca1b Closes-bug: #1458709 --- .../openvswitch/agent/ovs_neutron_agent.py | 2 ++ .../agent/test_ovs_neutron_agent.py | 24 +++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index cb3d44827c1..003cbdf06d3 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -300,6 +300,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, local_vlan_map = self.int_br.db_get_val("Port", port.port_name, "other_config") local_vlan = self.int_br.db_get_val("Port", port.port_name, "tag") + if not local_vlan: + continue net_uuid = local_vlan_map.get('net_uuid') if (net_uuid and net_uuid not in self.local_vlan_map and local_vlan != DEAD_VLAN_TAG): diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index fb068790c35..af4c5496b58 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -159,6 +159,30 @@ class TestOvsNeutronAgent(object): int_br.set_db_attribute.assert_called_once_with( "Port", mock.ANY, "other_config", vlan_mapping) + def _test_restore_local_vlan_maps(self, tag): + port = mock.Mock() + port.port_name = 'fake_port' + local_vlan_map = {'net_uuid': 'fake_network_id', + 'network_type': 'vlan', + 'physical_network': 'fake_network', + 'segmentation_id': 1} + with mock.patch.object(self.agent, 'int_br') as int_br, \ + mock.patch.object(self.agent, 'provision_local_vlan') as \ + provision_local_vlan: + int_br.get_vif_ports.return_value = [port] + int_br.db_get_val.side_effect = [local_vlan_map, tag] + self.agent._restore_local_vlan_map() + if tag: + self.assertTrue(provision_local_vlan.called) + else: + self.assertFalse(provision_local_vlan.called) + + def test_restore_local_vlan_map_with_device_has_tag(self): + self._test_restore_local_vlan_maps(2) + + def test_restore_local_vlan_map_with_device_no_tag(self): + self._test_restore_local_vlan_maps([]) + def test_check_agent_configurations_for_dvr_raises(self): self.agent.enable_distributed_routing = True self.agent.enable_tunneling = True From c23619417f51f2bbd161cc5bfe314847eb20cd19 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Fri, 29 May 2015 06:16:46 +0000 Subject: [PATCH 077/292] Imported Translations from Transifex For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: I79fd889e390b436fd92fd07951517c50a617b961 --- .../locale/de/LC_MESSAGES/neutron-log-info.po | 392 +++++----- .../locale/es/LC_MESSAGES/neutron-log-info.po | 392 +++++----- .../locale/fr/LC_MESSAGES/neutron-log-info.po | 620 +++++++-------- .../locale/it/LC_MESSAGES/neutron-log-info.po | 382 +++++----- .../locale/ja/LC_MESSAGES/neutron-log-info.po | 354 ++++----- .../ko_KR/LC_MESSAGES/neutron-log-info.po | 408 +++++----- neutron/locale/neutron-log-error.pot | 119 +-- neutron/locale/neutron-log-info.pot | 87 ++- neutron/locale/neutron.pot | 266 +++---- .../pt_BR/LC_MESSAGES/neutron-log-info.po | 612 +++++++-------- .../zh_CN/LC_MESSAGES/neutron-log-info.po | 708 +++++++++--------- .../zh_TW/LC_MESSAGES/neutron-log-info.po | 336 ++++----- 12 files changed, 2340 insertions(+), 2336 deletions(-) diff --git a/neutron/locale/de/LC_MESSAGES/neutron-log-info.po b/neutron/locale/de/LC_MESSAGES/neutron-log-info.po index 8e69b2c67f9..44d9e4a3eb2 100644 --- a/neutron/locale/de/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/de/LC_MESSAGES/neutron-log-info.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-04 06:08+0000\n" -"PO-Revision-Date: 2015-04-28 07:35+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: German (http://www.transifex.com/projects/p/neutron/language/" "de/)\n" @@ -20,82 +20,34 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#, python-format -msgid "Loading Plugin: %s" -msgstr "Laden von Plug-in: %s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "Ausgelöste HTTP-Ausnahme: %s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s mit HTTP %(status)d zurückgegeben" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "%(plugin_key)s: %(function_name)s mit Argumenten %(args)s ignoriert" #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s hat einen Fehler zurückgegeben: %(exception)s" -msgid "Disabled security-group extension." -msgstr "Sicherheitsgruppenerweiterung wurde inaktiviert." +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s mit HTTP %(status)d zurückgegeben" + +msgid "Agent initialized successfully, now running... " +msgstr "Agent erfolgreich initialisiert, läuft jetzt... " + +msgid "Agent out of sync with plugin!" +msgstr "Agent nicht synchron mit Plug-in!" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "Agententunnel nicht synchron mit Plug-in!" #, python-format -msgid "Preparing filters for devices %s" -msgstr "Vorbereiten von Filtern für Geräte %s" - -#, python-format -msgid "Security group rule updated %r" -msgstr "Sicherheitsgruppenregel aktualisiert %r" - -#, python-format -msgid "Security group member updated %r" -msgstr "Sicherheitsgruppenmitglied aktualisiert %r" - -msgid "Provider rule updated" -msgstr "Provider-Regel aktualisiert" - -#, python-format -msgid "Remove device filter for %r" -msgstr "Gerätefilter für %r entfernen" - -msgid "Refresh firewall rules" -msgstr "Firewallregeln aktualisieren" - -msgid "DHCP agent started" -msgstr "DHCP-Agent gestartet" - -msgid "Synchronizing state" -msgstr "Synchronisation von Status" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "'agent_updated' (Agent aktualisiert) durch Serverseite %s!" - -msgid "L3 agent started" -msgstr "Agent der Ebene 3 gestartet" - -#, python-format -msgid "Device %s already exists" -msgstr "Gerät %s ist bereits vorhanden" - -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "Versuch, ungefilterten Portfilter %s zu aktualisieren" - -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "Versuch, ungefilterten Portfilter %r zu entfernen" - -msgid "Initializing extension manager." -msgstr "Erweiterungsmanager wird initialisiert." - -#, python-format -msgid "Loaded extension: %s" -msgstr "Geladene Erweiterung: %s" +msgid "Allocated vlan (%d) from the pool" +msgstr "Zugeordnetes VLAN (%d) aus dem Pool" msgid "" "Allow sorting is enabled because native pagination requires native sorting" @@ -103,31 +55,187 @@ msgstr "" "Das Zulassen der Sortierung ist aktiviert, da die native Paginierung die " "native Sortierung erfordert" -msgid "OVS cleanup completed successfully" -msgstr "OVS-Bereinigungsprozedur erfolgreich abgeschlossen" +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "" +"Zuweisung von %(vlan_id)s als lokale VLAN-Adresse für net-id=%(net_uuid)s" -msgid "Agent initialized successfully, now running... " -msgstr "Agent erfolgreich initialisiert, läuft jetzt... " +#, python-format +msgid "Attachment %s removed" +msgstr "Zusatzeinheit %s entfernt" -msgid "Logging enabled!" -msgstr "Protokollfunktion aktiviert!" +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "Versuch, ungefilterten Portfilter %r zu entfernen" + +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "Versuch, ungefilterten Portfilter %s zu aktualisieren" + +#, python-format +msgid "Caught %s, exiting" +msgstr "%s abgefangen. Vorgang wird beendet" + +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s abgefangen, untergeordnete Elemente werden gestoppt" + +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Untergeordnetes Element %(pid)d durch Signal %(sig)d abgebrochen" + +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Untergeordnete %(pid)s mit Status %(code)d beendet" + +#, python-format +msgid "Child caught %s, exiting" +msgstr "Untergeordnetes Element %s abgefangen; Vorgang wird beendet" #, python-format msgid "Config paste file: %s" msgstr "Konfigurations-Paste-Datei: %s" +msgid "DHCP agent started" +msgstr "DHCP-Agent gestartet" + #, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"Überprüfung für CIDR: %(new_cidr)s fehlgeschlagen - Überschneidung mit " -"Teilnetz %(subnet_id)s (CIDR: %(cidr)s)" +msgid "Device %s already exists" +msgstr "Gerät %s ist bereits vorhanden" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "Gerät %s nicht für Plug-in definiert" + +msgid "Disabled security-group extension." +msgstr "Sicherheitsgruppenerweiterung wurde inaktiviert." + +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Eventlet backdoor hört auf %(port)s für Prozess %(pid)d" + +msgid "Forking too fast, sleeping" +msgstr "Verzweigung zu schnell; im Ruhemodus" #, python-format msgid "Found invalid IP address in pool: %(start)s - %(end)s:" msgstr "Ungültige IP-Adresse in Pool gefunden: %(start)s - %(end)s:" +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "Überschneidungen bei Bereichen gefunden: %(l_range)s und %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "Pool gefunden, der größer ist als Teilnetz-CIDR:%(start)s - %(end)s" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Ausgelöste HTTP-Ausnahme: %s" + +msgid "Initializing extension manager." +msgstr "Erweiterungsmanager wird initialisiert." + +#, python-format +msgid "Interface mappings: %s" +msgstr "Schnittstellenzuordnungen: %s" + +msgid "L3 agent started" +msgstr "Agent der Ebene 3 gestartet" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "RPC-Dämon für Linux-Brückenagent gestartet!" + +#, python-format +msgid "Loaded extension: %s" +msgstr "Geladene Erweiterung: %s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "Laden von Plug-in: %s" + +msgid "Logging enabled!" +msgstr "Protokollfunktion aktiviert!" + +#, python-format +msgid "" +"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" +"Schleifeniteration hat Intervall (%(polling_interval)s contra %(elapsed)s) " +"überschritten!" + +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "" +"Zuordnung von physischem Netz %(physical_network)s zu Brücke %(bridge)s" + +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "Bereiche für Netz-VLAN: %s" + +#, python-format +msgid "No %s Plugin loaded" +msgstr "Kein %s-Plug-in geladen" + +msgid "OVS cleanup completed successfully" +msgstr "OVS-Bereinigungsprozedur erfolgreich abgeschlossen" + +msgid "Parent process has died unexpectedly, exiting" +msgstr "" +"Übergeordneter Prozess wurde unerwartet abgebrochen. Vorgang wird beendet" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "Port %(device)s aktualisiert. Details: %(details)s" + +#, python-format +msgid "Port %s updated." +msgstr "Port %s aktualisiert." + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "Vorbereiten von Filtern für Geräte %s" + +msgid "Provider rule updated" +msgstr "Provider-Regel aktualisiert" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "RPC-'agent_id': %s" + +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "Zurückfordern von vlan = %(vlan_id)s von net-id = %(net_uuid)s" + +msgid "Refresh firewall rules" +msgstr "Firewallregeln aktualisieren" + +#, python-format +msgid "Remove device filter for %r" +msgstr "Gerätefilter für %r entfernen" + +#, python-format +msgid "Security group member updated %r" +msgstr "Sicherheitsgruppenmitglied aktualisiert %r" + +#, python-format +msgid "Security group rule updated %r" +msgstr "Sicherheitsgruppenregel aktualisiert %r" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "Überspringe periodische Aufgabe %(task)s weil sie deaktiviert ist" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" +"Überspringe periodische Aufgabe %(task)s weil der Intervall negativ ist" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "" +"Port %s wird übersprungen, da keine IP-Adresse auf ihm konfiguriert ist" + msgid "Specified IP addresses do not match the subnet IP version" msgstr "" "Angegebene IP-Adressen stimmen nicht mit der Teilnetz-IP-Version überein" @@ -137,47 +245,6 @@ msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" msgstr "" "Anfangs-IP-Adresse (%(start)s) ist größer als Ende-IP-Adresse (%(end)s)" -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "Pool gefunden, der größer ist als Teilnetz-CIDR:%(start)s - %(end)s" - -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Überschneidungen bei Bereichen gefunden: %(l_range)s und %(r_range)s" - -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "" -"Port %s wird übersprungen, da keine IP-Adresse auf ihm konfiguriert ist" - -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet backdoor hört auf %(port)s für Prozess %(pid)d" - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "" -"Überspringe periodische Aufgabe %(task)s weil der Intervall negativ ist" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Überspringe periodische Aufgabe %(task)s weil sie deaktiviert ist" - -#, python-format -msgid "Caught %s, exiting" -msgstr "%s abgefangen. Vorgang wird beendet" - -msgid "Parent process has died unexpectedly, exiting" -msgstr "" -"Übergeordneter Prozess wurde unerwartet abgebrochen. Vorgang wird beendet" - -#, python-format -msgid "Child caught %s, exiting" -msgstr "Untergeordnetes Element %s abgefangen; Vorgang wird beendet" - -msgid "Forking too fast, sleeping" -msgstr "Verzweigung zu schnell; im Ruhemodus" - #, python-format msgid "Started child %d" msgstr "Untergeordnetes Element %d gestartet" @@ -186,88 +253,21 @@ msgstr "Untergeordnetes Element %d gestartet" msgid "Starting %d workers" msgstr "Starten von %d Workers" -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "Untergeordnetes Element %(pid)d durch Signal %(sig)d abgebrochen" +msgid "Synchronizing state" +msgstr "Synchronisation von Status" #, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "Untergeordnete %(pid)s mit Status %(code)d beendet" - -#, python-format -msgid "Caught %s, stopping children" -msgstr "%s abgefangen, untergeordnete Elemente werden gestoppt" +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"Überprüfung für CIDR: %(new_cidr)s fehlgeschlagen - Überschneidung mit " +"Teilnetz %(subnet_id)s (CIDR: %(cidr)s)" #, python-format msgid "Waiting on %d children to exit" msgstr "Warten auf Beenden von %d untergeordneten Elementen" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "Zugeordnetes VLAN (%d) aus dem Pool" - -#, python-format -msgid "No %s Plugin loaded" -msgstr "Kein %s-Plug-in geladen" - -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s: %(function_name)s mit Argumenten %(args)s ignoriert" - -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"Schleifeniteration hat Intervall (%(polling_interval)s contra %(elapsed)s) " -"überschritten!" - -#, python-format -msgid "RPC agent_id: %s" -msgstr "RPC-'agent_id': %s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Port %(device)s aktualisiert. Details: %(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "Gerät %s nicht für Plug-in definiert" - -#, python-format -msgid "Attachment %s removed" -msgstr "Zusatzeinheit %s entfernt" - -#, python-format -msgid "Port %s updated." -msgstr "Port %s aktualisiert." - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "RPC-Dämon für Linux-Brückenagent gestartet!" - -msgid "Agent out of sync with plugin!" -msgstr "Agent nicht synchron mit Plug-in!" - -#, python-format -msgid "Interface mappings: %s" -msgstr "Schnittstellenzuordnungen: %s" - -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Bereiche für Netz-VLAN: %s" - -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "" -"Zuweisung von %(vlan_id)s als lokale VLAN-Adresse für net-id=%(net_uuid)s" - -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "Zurückfordern von vlan = %(vlan_id)s von net-id = %(net_uuid)s" - -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "" -"Zuordnung von physischem Netz %(physical_network)s zu Brücke %(bridge)s" - -msgid "Agent tunnel out of sync with plugin!" -msgstr "Agententunnel nicht synchron mit Plug-in!" +msgid "agent_updated by server side %s!" +msgstr "'agent_updated' (Agent aktualisiert) durch Serverseite %s!" diff --git a/neutron/locale/es/LC_MESSAGES/neutron-log-info.po b/neutron/locale/es/LC_MESSAGES/neutron-log-info.po index c9492ae86f2..862493ba88c 100644 --- a/neutron/locale/es/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/es/LC_MESSAGES/neutron-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-04 06:08+0000\n" -"PO-Revision-Date: 2015-04-28 07:35+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/neutron/language/" "es/)\n" @@ -19,82 +19,36 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#, python-format -msgid "Loading Plugin: %s" -msgstr "Cargando complementos: %s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "Excepción de HTTP emitida: %s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "Se ha devuelto %(url)s con HTTP %(status)d" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "" +"Se ha ignorado %(plugin_key)s: %(function_name)s con los argumentos %(args)s " #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s ha devuelto un error: %(exception)s" -msgid "Disabled security-group extension." -msgstr "La extensión security-group se ha inhabilitado." +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "Se ha devuelto %(url)s con HTTP %(status)d" + +msgid "Agent initialized successfully, now running... " +msgstr "" +"El agente se ha inicializado satisfactoriamente, ahora se está ejecutando... " + +msgid "Agent out of sync with plugin!" +msgstr "El agente está fuera de sincronización con el plug-in." + +msgid "Agent tunnel out of sync with plugin!" +msgstr "Túnel de agente fuera de sincronización con el plug-in. " #, python-format -msgid "Preparing filters for devices %s" -msgstr "Preparando filtros para dispositivos %s" - -#, python-format -msgid "Security group rule updated %r" -msgstr "Se ha actualizado la regla de grupo de seguridad %r" - -#, python-format -msgid "Security group member updated %r" -msgstr "Se ha actualizado el miembro de grupo de seguridad %r" - -msgid "Provider rule updated" -msgstr "Se ha actualizado regla de proveedor" - -#, python-format -msgid "Remove device filter for %r" -msgstr "Eliminar filtro de dispositivo para %r" - -msgid "Refresh firewall rules" -msgstr "Renovar reglas de cortafuegos" - -msgid "DHCP agent started" -msgstr "Se ha iniciado al agente DHCP" - -msgid "Synchronizing state" -msgstr "Sincronizando estado" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated por el lado del servidor %s!" - -msgid "L3 agent started" -msgstr "Se ha iniciado al agente L3" - -#, python-format -msgid "Device %s already exists" -msgstr "El dispositivo %s ya existe" - -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "Se ha intentado actualizar el filtro de puerto que no está filtrado %s" - -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "Se ha intentado eliminar el filtro de puerto que no está filtrado %r" - -msgid "Initializing extension manager." -msgstr "Inicializando gestor de ampliación." - -#, python-format -msgid "Loaded extension: %s" -msgstr "Ampliación cargada: %s" +msgid "Allocated vlan (%d) from the pool" +msgstr "Vlan asignada (%d) de la agrupación" msgid "" "Allow sorting is enabled because native pagination requires native sorting" @@ -102,27 +56,66 @@ msgstr "" "Permitir ordenación está habilitado porque la paginación nativa requiere " "ordenación nativa" -msgid "OVS cleanup completed successfully" -msgstr "La limpieza de OVS se ha completado satisfactoriamente" +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "Asignando %(vlan_id)s como vlan local para net-id=%(net_uuid)s" -msgid "Agent initialized successfully, now running... " -msgstr "" -"El agente se ha inicializado satisfactoriamente, ahora se está ejecutando... " +#, python-format +msgid "Attachment %s removed" +msgstr "Se ha eliminado el adjunto %s" -msgid "Logging enabled!" -msgstr "Registro habilitado." +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "Se ha intentado eliminar el filtro de puerto que no está filtrado %r" + +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "Se ha intentado actualizar el filtro de puerto que no está filtrado %s" + +#, python-format +msgid "Caught %s, exiting" +msgstr "Se ha captado %s, saliendo" + +#, python-format +msgid "Caught %s, stopping children" +msgstr "Se ha captado %s, deteniendo hijos" + +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Hijo %(pid)d matado por señal %(sig)d" + +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "El hijo %(pid)s ha salido con el estado %(code)d" + +#, python-format +msgid "Child caught %s, exiting" +msgstr "Hijo captado %s, saliendo" #, python-format msgid "Config paste file: %s" msgstr "Archivo de configuración de pegar: %s" +msgid "DHCP agent started" +msgstr "Se ha iniciado al agente DHCP" + #, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"Se ha encontrado un error en validación para CIDR: %(new_cidr)s; se solapa " -"con la subred %(subnet_id)s (CIDR: %(cidr)s)" +msgid "Device %s already exists" +msgstr "El dispositivo %s ya existe" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "El dispositivo %s no está definido en el plug-in" + +msgid "Disabled security-group extension." +msgstr "La extensión security-group se ha inhabilitado." + +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Eventlet oculto escuchando en %(port)s para el proceso %(pid)d" + +msgid "Forking too fast, sleeping" +msgstr "Bifurcación demasiado rápida, en reposo" #, python-format msgid "Found invalid IP address in pool: %(start)s - %(end)s:" @@ -130,6 +123,120 @@ msgstr "" "Se ha encontrado una dirección IP no válida en la agrupación: %(start)s - " "%(end)s:" +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "Rangos de solapamiento encontrados: %(l_range)s y %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "" +"Se ha encontrado una agrupación mayor que el CIDR de subred: %(start)s - " +"%(end)s" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Excepción de HTTP emitida: %s" + +msgid "Initializing extension manager." +msgstr "Inicializando gestor de ampliación." + +#, python-format +msgid "Interface mappings: %s" +msgstr "Correlaciones de interfaz: %s" + +msgid "L3 agent started" +msgstr "Se ha iniciado al agente L3" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "Se ha iniciado el daemon RPC de agente de LinuxBridge." + +#, python-format +msgid "Loaded extension: %s" +msgstr "Ampliación cargada: %s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "Cargando complementos: %s" + +msgid "Logging enabled!" +msgstr "Registro habilitado." + +#, python-format +msgid "" +"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" +"La iteración de bucle ha superado el intervalo (%(polling_interval)s frente " +"a %(elapsed)s)." + +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "" +"Correlacionando la red física %(physical_network)s con el puente %(bridge)s" + +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "Rangos de VLAN de red: %s" + +#, python-format +msgid "No %s Plugin loaded" +msgstr "No se ha cargado ningún plug-in de %s" + +msgid "OVS cleanup completed successfully" +msgstr "La limpieza de OVS se ha completado satisfactoriamente" + +msgid "Parent process has died unexpectedly, exiting" +msgstr "El proceso padre se ha detenido inesperadamente, saliendo" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "Se ha actualizado el puerto %(device)s. Detalles: %(details)s" + +#, python-format +msgid "Port %s updated." +msgstr "El puerto %s se ha actualizado." + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "Preparando filtros para dispositivos %s" + +msgid "Provider rule updated" +msgstr "Se ha actualizado regla de proveedor" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "agent_id de RPC: %s" + +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "Reclamando vlan = %(vlan_id)s de net-id = %(net_uuid)s" + +msgid "Refresh firewall rules" +msgstr "Renovar reglas de cortafuegos" + +#, python-format +msgid "Remove device filter for %r" +msgstr "Eliminar filtro de dispositivo para %r" + +#, python-format +msgid "Security group member updated %r" +msgstr "Se ha actualizado el miembro de grupo de seguridad %r" + +#, python-format +msgid "Security group rule updated %r" +msgstr "Se ha actualizado la regla de grupo de seguridad %r" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "Omitiendo la tarea periódica %(task)s porque está inhabilitada" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "Omitiendo la tarea periódica %(task)s porque el intervalo es negativo" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "Saltando el puerto %s, ya que no hay ninguna IP configurada en él" + msgid "Specified IP addresses do not match the subnet IP version" msgstr "" "Las direcciones IP especificadas no coinciden con la versión de IP de subred " @@ -139,46 +246,6 @@ msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" msgstr "" "La IP de inicio (%(start)s) es mayor que la IP de finalización (%(end)s)" -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "" -"Se ha encontrado una agrupación mayor que el CIDR de subred: %(start)s - " -"%(end)s" - -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Rangos de solapamiento encontrados: %(l_range)s y %(r_range)s" - -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "Saltando el puerto %s, ya que no hay ninguna IP configurada en él" - -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet oculto escuchando en %(port)s para el proceso %(pid)d" - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "Omitiendo la tarea periódica %(task)s porque el intervalo es negativo" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Omitiendo la tarea periódica %(task)s porque está inhabilitada" - -#, python-format -msgid "Caught %s, exiting" -msgstr "Se ha captado %s, saliendo" - -msgid "Parent process has died unexpectedly, exiting" -msgstr "El proceso padre se ha detenido inesperadamente, saliendo" - -#, python-format -msgid "Child caught %s, exiting" -msgstr "Hijo captado %s, saliendo" - -msgid "Forking too fast, sleeping" -msgstr "Bifurcación demasiado rápida, en reposo" - #, python-format msgid "Started child %d" msgstr "Se ha iniciado el hijo %d" @@ -187,88 +254,21 @@ msgstr "Se ha iniciado el hijo %d" msgid "Starting %d workers" msgstr "Iniciando %d trabajadores" -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "Hijo %(pid)d matado por señal %(sig)d" +msgid "Synchronizing state" +msgstr "Sincronizando estado" #, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "El hijo %(pid)s ha salido con el estado %(code)d" - -#, python-format -msgid "Caught %s, stopping children" -msgstr "Se ha captado %s, deteniendo hijos" +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"Se ha encontrado un error en validación para CIDR: %(new_cidr)s; se solapa " +"con la subred %(subnet_id)s (CIDR: %(cidr)s)" #, python-format msgid "Waiting on %d children to exit" msgstr "En espera de %d hijos para salir" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "Vlan asignada (%d) de la agrupación" - -#, python-format -msgid "No %s Plugin loaded" -msgstr "No se ha cargado ningún plug-in de %s" - -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "" -"Se ha ignorado %(plugin_key)s: %(function_name)s con los argumentos %(args)s " - -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"La iteración de bucle ha superado el intervalo (%(polling_interval)s frente " -"a %(elapsed)s)." - -#, python-format -msgid "RPC agent_id: %s" -msgstr "agent_id de RPC: %s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Se ha actualizado el puerto %(device)s. Detalles: %(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "El dispositivo %s no está definido en el plug-in" - -#, python-format -msgid "Attachment %s removed" -msgstr "Se ha eliminado el adjunto %s" - -#, python-format -msgid "Port %s updated." -msgstr "El puerto %s se ha actualizado." - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "Se ha iniciado el daemon RPC de agente de LinuxBridge." - -msgid "Agent out of sync with plugin!" -msgstr "El agente está fuera de sincronización con el plug-in." - -#, python-format -msgid "Interface mappings: %s" -msgstr "Correlaciones de interfaz: %s" - -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Rangos de VLAN de red: %s" - -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "Asignando %(vlan_id)s como vlan local para net-id=%(net_uuid)s" - -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "Reclamando vlan = %(vlan_id)s de net-id = %(net_uuid)s" - -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "" -"Correlacionando la red física %(physical_network)s con el puente %(bridge)s" - -msgid "Agent tunnel out of sync with plugin!" -msgstr "Túnel de agente fuera de sincronización con el plug-in. " +msgid "agent_updated by server side %s!" +msgstr "agent_updated por el lado del servidor %s!" diff --git a/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po b/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po index 93304dd9c74..91f486b41aa 100644 --- a/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po @@ -9,8 +9,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-10 06:14+0000\n" -"PO-Revision-Date: 2015-04-28 07:35+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" "fr/)\n" @@ -22,110 +22,54 @@ msgstr "" "Plural-Forms: nplurals=2; plural=(n > 1);\n" #, python-format -msgid "Loading core plugin: %s" -msgstr "Chargement du plugin core: %s" - -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "Le service %s est supporté par le core plugin" - -#, python-format -msgid "Loading Plugin: %s" -msgstr "Chargement du plug-in : %s" - -#, python-format -msgid "Loaded quota_driver: %s." -msgstr "Chargement quota_driver: %s." - -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "Service Neutron démarré, en écoute sur %(host)s:%(port)s" +msgid "%(action)s failed (client error): %(exc)s" +msgstr "Échec %(action)s (Erreur client): %(exc)s" #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "Exception HTTP générée : %s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s retourné avec HTTP %(status)d" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "%(plugin_key)s : %(function_name)s avec les arguments %(args)s ignoré" #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s a retourné une erreur : %(exception)s." -msgid "Disabled security-group extension." -msgstr "Extension du groupe de sécurité désactivée." +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s retourné avec HTTP %(status)d" + +msgid "APIC service agent started" +msgstr "service de l'agent APIC démarré" + +msgid "APIC service agent starting ..." +msgstr "Démarrage du service de l'agent APIC" #, python-format -msgid "Preparing filters for devices %s" -msgstr "Préparation des filtres pour les unités %s" +msgid "Adding %s to list of bridges." +msgstr "Ajout %s à la liste de ponts." #, python-format -msgid "Security group rule updated %r" -msgstr "Règle de groupe de sécurité mise à jour %r" +msgid "Agent %s already present" +msgstr "Agent %s déjà présent" + +msgid "Agent initialised successfully, now running... " +msgstr "Agent initialisé avec succès, en cours d'exécution..." + +msgid "Agent initialized successfully, now running... " +msgstr "Agent initialisé avec succès, en cours d'exécution... " + +msgid "Agent out of sync with plugin!" +msgstr "Agent non synchronisé avec le plug-in !" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "Tunnel d'agent désynchronisé avec le plug-in !" #, python-format -msgid "Security group member updated %r" -msgstr "Membre de groupe de sécurité mis à jour %r" - -msgid "Provider rule updated" -msgstr "Règle de fournisseur mise à jour" - -#, python-format -msgid "Remove device filter for %r" -msgstr "Suppression du filtre d'unités pour %r" - -msgid "Refresh firewall rules" -msgstr "Régénération des règles de pare-feu" - -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "Port %(port_id)s n'est pas présent dans le pont %(br_name)s" - -msgid "DHCP agent started" -msgstr "Agent DHCP démarré" - -msgid "Synchronizing state" -msgstr "Etat de synchronisation" - -msgid "Synchronizing state complete" -msgstr "Etat de synchronisation complet" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated au niveau du serveur %s !" - -msgid "L3 agent started" -msgstr "Agent de niveau 3 démarré" - -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "Le processus est exécuté avec uid/gid: %(uid)s/%(gid)s" - -#, python-format -msgid "Device %s already exists" -msgstr "L'unité %s existe déjà" - -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "" -"Tentative effectuée de mise à jour du filtre de ports (sans filtrage %s)" - -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "" -"Tentative effectuée de suppression du filtre de ports (sans filtrage %r)" - -msgid "Initializing extension manager." -msgstr "Initialisation du gestionnaire d'extension." - -#, python-format -msgid "Loaded extension: %s" -msgstr "Extension chargée : %s" +msgid "Allocated vlan (%d) from the pool" +msgstr "Réseau VLAN alloué (%d) depuis le pool" msgid "" "Allow sorting is enabled because native pagination requires native sorting" @@ -133,37 +77,283 @@ msgstr "" "Autorisation de tri activée car la mise en page native nécessite le tri natif" #, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "Échec %(action)s (Erreur client): %(exc)s" +msgid "Ancillary Port %s added" +msgstr "Port auxiliaire %s ajouté" -msgid "OVS cleanup completed successfully" -msgstr "Le nettoyage d'OVS s'est terminé avec succès." +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "" +"Affectation de %(vlan_id)s comme réseau local virtuel pour net-id = " +"%(net_uuid)s" -msgid "Agent initialized successfully, now running... " -msgstr "Agent initialisé avec succès, en cours d'exécution... " +#, python-format +msgid "Attachment %s removed" +msgstr "Connexion %s retirée" -msgid "Logging enabled!" -msgstr "Consignation activée !" +#, python-format +msgid "Attempt %(count)s to bind port %(port)s" +msgstr "Tentative %(count)s de liaison port %(port)s" + +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "" +"Tentative effectuée de suppression du filtre de ports (sans filtrage %r)" + +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "" +"Tentative effectuée de mise à jour du filtre de ports (sans filtrage %s)" + +#, python-format +msgid "" +"Binding info for port %s was not found, it might have been deleted already." +msgstr "" +"L'information de liaison pour le port %s n'a pas été trouvée, elle peut déjà " +"avoir été effacée." + +#, python-format +msgid "Caught %s, exiting" +msgstr "%s interceptée, sortie" + +#, python-format +msgid "Caught %s, stopping children" +msgstr "%s interceptée, arrêt de l'enfant" + +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Enfant %(pid)d arrêté par le signal %(sig)d" + +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Processus fils %(pid)s terminé avec le status %(code)d" + +#, python-format +msgid "Child caught %s, exiting" +msgstr "L'enfant a reçu %s, sortie" #, python-format msgid "Config paste file: %s" msgstr "Config du fichier de collage : %s" -msgid "IPv6 is not enabled on this system." -msgstr "IPv6 n'est pas activé sur le système." +#, python-format +msgid "Configuration for device %s completed." +msgstr "Configuration complète de l'équipement %s" + +msgid "DHCP agent started" +msgstr "Agent DHCP démarré" #, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" +msgid "Default provider is not specified for service type %s" msgstr "" -"La validation du routage CIDR %(new_cidr)s a échoué : il chevauche le sous-" -"réseau %(subnet_id)s (CIDR : %(cidr)s) " +"Le fournisseur par défaut n'est pas spécifié pour le type de service %s" + +#, python-format +msgid "Device %s already exists" +msgstr "L'unité %s existe déjà" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "Unité %s non définie sur le plug-in" + +#, python-format +msgid "Device with MAC %s not defined on plugin" +msgstr "Appareil avec adresse MAC %s non-défini dans le plugin" + +msgid "Disabled security-group extension." +msgstr "Extension du groupe de sécurité désactivée." + +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d" + +#, python-format +msgid "Exclude Devices: %s" +msgstr "Equipements exclus: %s" + +msgid "Forking too fast, sleeping" +msgstr "Bifurcation trop rapide, pause" #, python-format msgid "Found invalid IP address in pool: %(start)s - %(end)s:" msgstr "Adresse IP non valide trouvée dans le pool : %(start)s - %(end)s :" +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "Chevauchement d'intervalles trouvés : %(l_range)s et %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "" +"Un pool plus volumineux que le routage CIDR de sous-réseau %(start)s - " +"%(end)s a été trouvé." + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Exception HTTP générée : %s" + +msgid "IPv6 is not enabled on this system." +msgstr "IPv6 n'est pas activé sur le système." + +msgid "Initializing extension manager." +msgstr "Initialisation du gestionnaire d'extension." + +#, python-format +msgid "Interface mappings: %s" +msgstr "Mappages d'interface : %s" + +msgid "L3 agent started" +msgstr "Agent de niveau 3 démarré" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "Serveur démon RPC de l'agent LinuxBridge démarré !" + +#, python-format +msgid "Loaded extension: %s" +msgstr "Extension chargée : %s" + +#, python-format +msgid "Loaded quota_driver: %s." +msgstr "Chargement quota_driver: %s." + +#, python-format +msgid "Loading Metering driver %s" +msgstr "Chargement du pilote de Mesures %s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "Chargement du plug-in : %s" + +#, python-format +msgid "Loading core plugin: %s" +msgstr "Chargement du plugin core: %s" + +#, python-format +msgid "Loading interface driver %s" +msgstr "Chargement de pilote d'interface %s" + +msgid "Logging enabled!" +msgstr "Consignation activée !" + +#, python-format +msgid "" +"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" +"L'itération de boucle a dépassé l'intervalle (%(polling_interval)s contre " +"%(elapsed)s) !" + +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "Mappage du réseau physique %(physical_network)s sur le pont %(bridge)s" + +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "Plages de réseau local virtuel de réseau : %s" + +#, python-format +msgid "Network name changed to %s" +msgstr "Nom du réseau changé en %s" + +#, python-format +msgid "Neutron service started, listening on %(host)s:%(port)s" +msgstr "Service Neutron démarré, en écoute sur %(host)s:%(port)s" + +#, python-format +msgid "No %s Plugin loaded" +msgstr "Aucun plug-in %s chargé" + +#, python-format +msgid "No device with MAC %s defined on agent." +msgstr "Aucun équipement avec MAC %s défini sur l'agent." + +msgid "OVS cleanup completed successfully" +msgstr "Le nettoyage d'OVS s'est terminé avec succès." + +msgid "Parent process has died unexpectedly, exiting" +msgstr "Processus parent arrêté de manière inattendue, sortie" + +#, python-format +msgid "Physical Devices mappings: %s" +msgstr "Mappages d'Équipements Physiques: %s" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "Port %(device)s mis à jour. Détails : %(details)s" + +#, python-format +msgid "Port %(port_id)s not present in bridge %(br_name)s" +msgstr "Port %(port_id)s n'est pas présent dans le pont %(br_name)s" + +#, python-format +msgid "Port %s updated." +msgstr "Port %s mis à jour." + +#, python-format +msgid "Port %s was deleted concurrently" +msgstr "Le port %s a été effacé en même temps" + +#, python-format +msgid "Port name changed to %s" +msgstr "Nom de port changé en %s" + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "Préparation des filtres pour les unités %s" + +#, python-format +msgid "Process runs with uid/gid: %(uid)s/%(gid)s" +msgstr "Le processus est exécuté avec uid/gid: %(uid)s/%(gid)s" + +msgid "Provider rule updated" +msgstr "Règle de fournisseur mise à jour" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "agent_id RPC : %s" + +msgid "RPC was already started in parent process by plugin." +msgstr "Le plugin avait déjà lancé les RPC dans le processus parent." + +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "" +"Récupération du réseau local virtuel = %(vlan_id)s à partir de net-id = " +"%(net_uuid)s" + +msgid "Refresh firewall rules" +msgstr "Régénération des règles de pare-feu" + +#, python-format +msgid "Remove device filter for %r" +msgstr "Suppression du filtre d'unités pour %r" + +#, python-format +msgid "Removing device with mac_address %s" +msgstr "Retrait de l'appareil ayant pour mac_address %s" + +#, python-format +msgid "Security group member updated %r" +msgstr "Membre de groupe de sécurité mis à jour %r" + +#, python-format +msgid "Security group rule updated %r" +msgstr "Règle de groupe de sécurité mise à jour %r" + +#, python-format +msgid "Service %s is supported by the core plugin" +msgstr "Le service %s est supporté par le core plugin" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "Tâche périodique %(task)s car elle est désactivée" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "Tâche périodique %(task)s ignorée car son intervalle est négatif" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "Ignorer le port %s car aucune adresse IP n'est configurée" + msgid "Specified IP addresses do not match the subnet IP version" msgstr "" "Les adresses IP spécifiées ne correspondent à la version IP du sous-réseau" @@ -174,46 +364,6 @@ msgstr "" "L'adresse IP de début (%(start)s) est supérieure à l'adresse IP de fin " "(%(end)s)." -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "" -"Un pool plus volumineux que le routage CIDR de sous-réseau %(start)s - " -"%(end)s a été trouvé." - -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Chevauchement d'intervalles trouvés : %(l_range)s et %(r_range)s" - -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "Ignorer le port %s car aucune adresse IP n'est configurée" - -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet backdoor en écoute sur le port %(port)s for process %(pid)d" - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "Tâche périodique %(task)s ignorée car son intervalle est négatif" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Tâche périodique %(task)s car elle est désactivée" - -#, python-format -msgid "Caught %s, exiting" -msgstr "%s interceptée, sortie" - -msgid "Parent process has died unexpectedly, exiting" -msgstr "Processus parent arrêté de manière inattendue, sortie" - -#, python-format -msgid "Child caught %s, exiting" -msgstr "L'enfant a reçu %s, sortie" - -msgid "Forking too fast, sleeping" -msgstr "Bifurcation trop rapide, pause" - #, python-format msgid "Started child %d" msgstr "Enfant démarré %d" @@ -223,16 +373,22 @@ msgid "Starting %d workers" msgstr "Démarrage des travailleurs %d" #, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "Enfant %(pid)d arrêté par le signal %(sig)d" +msgid "Subnet %s was deleted concurrently" +msgstr "Le sous-réseau %s a été effacé en même temps" + +msgid "Synchronizing state" +msgstr "Etat de synchronisation" + +msgid "Synchronizing state complete" +msgstr "Etat de synchronisation complet" #, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "Processus fils %(pid)s terminé avec le status %(code)d" - -#, python-format -msgid "Caught %s, stopping children" -msgstr "%s interceptée, arrêt de l'enfant" +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"La validation du routage CIDR %(new_cidr)s a échoué : il chevauche le sous-" +"réseau %(subnet_id)s (CIDR : %(cidr)s) " msgid "Wait called after thread killed. Cleaning up." msgstr "Pause demandée après suppression de thread. Nettoyage." @@ -242,161 +398,5 @@ msgid "Waiting on %d children to exit" msgstr "En attente %d enfants pour sortie" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "Réseau VLAN alloué (%d) depuis le pool" - -#, python-format -msgid "No %s Plugin loaded" -msgstr "Aucun plug-in %s chargé" - -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s : %(function_name)s avec les arguments %(args)s ignoré" - -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"L'itération de boucle a dépassé l'intervalle (%(polling_interval)s contre " -"%(elapsed)s) !" - -#, python-format -msgid "RPC agent_id: %s" -msgstr "agent_id RPC : %s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Port %(device)s mis à jour. Détails : %(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "Unité %s non définie sur le plug-in" - -#, python-format -msgid "Attachment %s removed" -msgstr "Connexion %s retirée" - -#, python-format -msgid "Port %s updated." -msgstr "Port %s mis à jour." - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "Serveur démon RPC de l'agent LinuxBridge démarré !" - -msgid "Agent out of sync with plugin!" -msgstr "Agent non synchronisé avec le plug-in !" - -#, python-format -msgid "Interface mappings: %s" -msgstr "Mappages d'interface : %s" - -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "Tentative %(count)s de liaison port %(port)s" - -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "Le port %s a été effacé en même temps" - -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "Le sous-réseau %s a été effacé en même temps" - -#, python-format -msgid "" -"Binding info for port %s was not found, it might have been deleted already." -msgstr "" -"L'information de liaison pour le port %s n'a pas été trouvée, elle peut déjà " -"avoir été effacée." - -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Plages de réseau local virtuel de réseau : %s" - -#, python-format -msgid "Network name changed to %s" -msgstr "Nom du réseau changé en %s" - -#, python-format -msgid "Port name changed to %s" -msgstr "Nom de port changé en %s" - -msgid "APIC service agent starting ..." -msgstr "Démarrage du service de l'agent APIC" - -msgid "APIC service agent started" -msgstr "service de l'agent APIC démarré" - -msgid "Agent initialised successfully, now running... " -msgstr "Agent initialisé avec succès, en cours d'exécution..." - -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "" -"Affectation de %(vlan_id)s comme réseau local virtuel pour net-id = " -"%(net_uuid)s" - -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "" -"Récupération du réseau local virtuel = %(vlan_id)s à partir de net-id = " -"%(net_uuid)s" - -#, python-format -msgid "Configuration for device %s completed." -msgstr "Configuration complète de l'équipement %s" - -#, python-format -msgid "Adding %s to list of bridges." -msgstr "Ajout %s à la liste de ponts." - -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "Mappage du réseau physique %(physical_network)s sur le pont %(bridge)s" - -#, python-format -msgid "Ancillary Port %s added" -msgstr "Port auxiliaire %s ajouté" - -msgid "Agent tunnel out of sync with plugin!" -msgstr "Tunnel d'agent désynchronisé avec le plug-in !" - -#, python-format -msgid "No device with MAC %s defined on agent." -msgstr "Aucun équipement avec MAC %s défini sur l'agent." - -#, python-format -msgid "Device with MAC %s not defined on plugin" -msgstr "Appareil avec adresse MAC %s non-défini dans le plugin" - -#, python-format -msgid "Removing device with mac_address %s" -msgstr "Retrait de l'appareil ayant pour mac_address %s" - -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "Mappages d'Équipements Physiques: %s" - -#, python-format -msgid "Exclude Devices: %s" -msgstr "Equipements exclus: %s" - -#, python-format -msgid "Agent %s already present" -msgstr "Agent %s déjà présent" - -msgid "RPC was already started in parent process by plugin." -msgstr "Le plugin avait déjà lancé les RPC dans le processus parent." - -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "" -"Le fournisseur par défaut n'est pas spécifié pour le type de service %s" - -#, python-format -msgid "Loading Metering driver %s" -msgstr "Chargement du pilote de Mesures %s" - -#, python-format -msgid "Loading interface driver %s" -msgstr "Chargement de pilote d'interface %s" +msgid "agent_updated by server side %s!" +msgstr "agent_updated au niveau du serveur %s !" diff --git a/neutron/locale/it/LC_MESSAGES/neutron-log-info.po b/neutron/locale/it/LC_MESSAGES/neutron-log-info.po index b50185d1097..643b7fb4716 100644 --- a/neutron/locale/it/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/it/LC_MESSAGES/neutron-log-info.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-04 06:08+0000\n" -"PO-Revision-Date: 2015-04-28 07:35+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Italian (http://www.transifex.com/projects/p/neutron/language/" "it/)\n" @@ -20,82 +20,34 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" -#, python-format -msgid "Loading Plugin: %s" -msgstr "Caricamento plugin: %s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "Generata eccezione HTTP: %s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s restituito con HTTP %(status)d" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "%(plugin_key)s: %(function_name)s con argomenti %(args)s ignorato" #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s ha restituito un errore: %(exception)s" -msgid "Disabled security-group extension." -msgstr "Estensione di security-group disabilitata." +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s restituito con HTTP %(status)d" + +msgid "Agent initialized successfully, now running... " +msgstr "Agent inizializzato correttamente, ora in esecuzione... " + +msgid "Agent out of sync with plugin!" +msgstr "Agent non sincronizzato con il plugin!" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "Il tunnel agent non è sincronizzato con il plugin!" #, python-format -msgid "Preparing filters for devices %s" -msgstr "Preparazione filtri per i dispositivi %s" - -#, python-format -msgid "Security group rule updated %r" -msgstr "Regola gruppo di sicurezza aggiornata %r" - -#, python-format -msgid "Security group member updated %r" -msgstr "Membro gruppo di sicurezza aggiornato %r" - -msgid "Provider rule updated" -msgstr "Provider regola aggiornato" - -#, python-format -msgid "Remove device filter for %r" -msgstr "Rimuovi filtro dispositivo per %r" - -msgid "Refresh firewall rules" -msgstr "Aggiorna regole firewall" - -msgid "DHCP agent started" -msgstr "Agent DHCP avviato" - -msgid "Synchronizing state" -msgstr "Stato sincronizzazione" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated dal lato server %s!" - -msgid "L3 agent started" -msgstr "Agent L3 avviato" - -#, python-format -msgid "Device %s already exists" -msgstr "L'unità %s già esiste" - -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "Tentativo di aggiornare il filtro della porta che non è filtrata %s" - -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "Tentativo di rimuovere il filtro della porta che non è filtrata %r" - -msgid "Initializing extension manager." -msgstr "Inizializzazione gestore estensioni." - -#, python-format -msgid "Loaded extension: %s" -msgstr "Estensione caricata: %s" +msgid "Allocated vlan (%d) from the pool" +msgstr "vlan (%d) allocata dal pool" msgid "" "Allow sorting is enabled because native pagination requires native sorting" @@ -103,31 +55,182 @@ msgstr "" "Consenti ordinamento è abilitato in quanto la paginaziona nativa richiede " "l'ordinamento nativo" -msgid "OVS cleanup completed successfully" -msgstr "Ripulitura di OVS completata correttamente" +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "Assegnazione %(vlan_id)s come vlan locale per net-id=%(net_uuid)s" -msgid "Agent initialized successfully, now running... " -msgstr "Agent inizializzato correttamente, ora in esecuzione... " +#, python-format +msgid "Attachment %s removed" +msgstr "Collegamento %s rimosso" -msgid "Logging enabled!" -msgstr "Accesso abilitato!" +#, python-format +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "Tentativo di rimuovere il filtro della porta che non è filtrata %r" + +#, python-format +msgid "Attempted to update port filter which is not filtered %s" +msgstr "Tentativo di aggiornare il filtro della porta che non è filtrata %s" + +#, python-format +msgid "Caught %s, exiting" +msgstr "Rilevato %s, esistente" + +#, python-format +msgid "Caught %s, stopping children" +msgstr "Intercettato %s, arresto in corso dei children" + +#, python-format +msgid "Child %(pid)d killed by signal %(sig)d" +msgstr "Child %(pid)d interrotto dal segnale %(sig)d" + +#, python-format +msgid "Child %(pid)s exited with status %(code)d" +msgstr "Child %(pid)s terminato con stato %(code)d" + +#, python-format +msgid "Child caught %s, exiting" +msgstr "Cogliere Child %s, uscendo" #, python-format msgid "Config paste file: %s" msgstr "Configurazione file paste: %s" +msgid "DHCP agent started" +msgstr "Agent DHCP avviato" + #, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"Convalida per CIDR: %(new_cidr)s non riuscita - si sovrappone con la " -"sottorete %(subnet_id)s (CIDR: %(cidr)s)" +msgid "Device %s already exists" +msgstr "L'unità %s già esiste" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "Unità %s non definita nel plugin" + +msgid "Disabled security-group extension." +msgstr "Estensione di security-group disabilitata." + +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Ascolto di eventlet backdoor su %(port)s per il processo %(pid)d" + +msgid "Forking too fast, sleeping" +msgstr "Sblocco troppo veloce, attendere" #, python-format msgid "Found invalid IP address in pool: %(start)s - %(end)s:" msgstr "Trovato un indirizzo IP invalido nel pool: %(start)s - %(end)s:" +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "Trovati gli intervalli di sovrapposizione: %(l_range)s e %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "Trovato un pool più grande della sottorete CIDR:%(start)s - %(end)s" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Generata eccezione HTTP: %s" + +msgid "Initializing extension manager." +msgstr "Inizializzazione gestore estensioni." + +#, python-format +msgid "Interface mappings: %s" +msgstr "Associazioni interfaccia: %s" + +msgid "L3 agent started" +msgstr "Agent L3 avviato" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "LinuxBridge Agent RPC Daemon avviato!" + +#, python-format +msgid "Loaded extension: %s" +msgstr "Estensione caricata: %s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "Caricamento plugin: %s" + +msgid "Logging enabled!" +msgstr "Accesso abilitato!" + +#, python-format +msgid "" +"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" +"L'iterazione loop supera l'intervallo (%(polling_interval)s vs. %(elapsed)s)!" + +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "Associazione rete fisica %(physical_network)s al bridge %(bridge)s" + +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "Intervalli di rete VLAN: %s" + +#, python-format +msgid "No %s Plugin loaded" +msgstr "Nessun plugin %s caricato" + +msgid "OVS cleanup completed successfully" +msgstr "Ripulitura di OVS completata correttamente" + +msgid "Parent process has died unexpectedly, exiting" +msgstr "Il processo principale è stato interrotto inaspettatamente, uscire" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "Porta %(device)s aggiornata. Dettagli: %(details)s" + +#, python-format +msgid "Port %s updated." +msgstr "Porta %s aggiornata." + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "Preparazione filtri per i dispositivi %s" + +msgid "Provider rule updated" +msgstr "Provider regola aggiornato" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "agent-id RPC: %s" + +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "Recupero vlan = %(vlan_id)s da net-id = %(net_uuid)s" + +msgid "Refresh firewall rules" +msgstr "Aggiorna regole firewall" + +#, python-format +msgid "Remove device filter for %r" +msgstr "Rimuovi filtro dispositivo per %r" + +#, python-format +msgid "Security group member updated %r" +msgstr "Membro gruppo di sicurezza aggiornato %r" + +#, python-format +msgid "Security group rule updated %r" +msgstr "Regola gruppo di sicurezza aggiornata %r" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "Abbadono dell'attività periodica %(task)s perché è disabilitata" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "" +"Abbadono dell'attività periodica %(task)s perché l'intervalo è negativo" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "La porta %s viene ignorata in quanto non ha nessun IP configurato" + msgid "Specified IP addresses do not match the subnet IP version" msgstr "" "Gli indirizzi IP specificati non corrispondono alla versione IP della " @@ -137,45 +240,6 @@ msgstr "" msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" msgstr "L'IP iniziale (%(start)s) è superiore all'IP finale (%(end)s)" -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "Trovato un pool più grande della sottorete CIDR:%(start)s - %(end)s" - -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Trovati gli intervalli di sovrapposizione: %(l_range)s e %(r_range)s" - -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "La porta %s viene ignorata in quanto non ha nessun IP configurato" - -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Ascolto di eventlet backdoor su %(port)s per il processo %(pid)d" - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "" -"Abbadono dell'attività periodica %(task)s perché l'intervalo è negativo" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Abbadono dell'attività periodica %(task)s perché è disabilitata" - -#, python-format -msgid "Caught %s, exiting" -msgstr "Rilevato %s, esistente" - -msgid "Parent process has died unexpectedly, exiting" -msgstr "Il processo principale è stato interrotto inaspettatamente, uscire" - -#, python-format -msgid "Child caught %s, exiting" -msgstr "Cogliere Child %s, uscendo" - -msgid "Forking too fast, sleeping" -msgstr "Sblocco troppo veloce, attendere" - #, python-format msgid "Started child %d" msgstr "Child avviato %d" @@ -184,85 +248,21 @@ msgstr "Child avviato %d" msgid "Starting %d workers" msgstr "Avvio %d operatori" -#, python-format -msgid "Child %(pid)d killed by signal %(sig)d" -msgstr "Child %(pid)d interrotto dal segnale %(sig)d" +msgid "Synchronizing state" +msgstr "Stato sincronizzazione" #, python-format -msgid "Child %(pid)s exited with status %(code)d" -msgstr "Child %(pid)s terminato con stato %(code)d" - -#, python-format -msgid "Caught %s, stopping children" -msgstr "Intercettato %s, arresto in corso dei children" +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"Convalida per CIDR: %(new_cidr)s non riuscita - si sovrappone con la " +"sottorete %(subnet_id)s (CIDR: %(cidr)s)" #, python-format msgid "Waiting on %d children to exit" msgstr "In attesa %d degli elementi secondari per uscire" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "vlan (%d) allocata dal pool" - -#, python-format -msgid "No %s Plugin loaded" -msgstr "Nessun plugin %s caricato" - -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s: %(function_name)s con argomenti %(args)s ignorato" - -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"L'iterazione loop supera l'intervallo (%(polling_interval)s vs. %(elapsed)s)!" - -#, python-format -msgid "RPC agent_id: %s" -msgstr "agent-id RPC: %s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Porta %(device)s aggiornata. Dettagli: %(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "Unità %s non definita nel plugin" - -#, python-format -msgid "Attachment %s removed" -msgstr "Collegamento %s rimosso" - -#, python-format -msgid "Port %s updated." -msgstr "Porta %s aggiornata." - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "LinuxBridge Agent RPC Daemon avviato!" - -msgid "Agent out of sync with plugin!" -msgstr "Agent non sincronizzato con il plugin!" - -#, python-format -msgid "Interface mappings: %s" -msgstr "Associazioni interfaccia: %s" - -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Intervalli di rete VLAN: %s" - -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "Assegnazione %(vlan_id)s come vlan locale per net-id=%(net_uuid)s" - -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "Recupero vlan = %(vlan_id)s da net-id = %(net_uuid)s" - -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "Associazione rete fisica %(physical_network)s al bridge %(bridge)s" - -msgid "Agent tunnel out of sync with plugin!" -msgstr "Il tunnel agent non è sincronizzato con il plugin!" +msgid "agent_updated by server side %s!" +msgstr "agent_updated dal lato server %s!" diff --git a/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po b/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po index f27b5bb3bfa..c3c631e35d8 100644 --- a/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/ja/LC_MESSAGES/neutron-log-info.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-04 06:08+0000\n" -"PO-Revision-Date: 2015-04-28 07:35+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Japanese (http://www.transifex.com/projects/p/neutron/" "language/ja/)\n" @@ -20,82 +20,35 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" -#, python-format -msgid "Loading Plugin: %s" -msgstr "プラグインの読み込み中: %s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP 例外がスローされました: %s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "HTTP %(status)d の %(url)s が返されました" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "" +"%(plugin_key)s: 引数 %(args)s が指定された %(function_name)s は無視されます" #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s が障害を返しました: %(exception)s" -msgid "Disabled security-group extension." -msgstr "security-group 拡張を無効にしました。" +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "HTTP %(status)d の %(url)s が返されました" + +msgid "Agent initialized successfully, now running... " +msgstr "エージェントが正常に初期化されました。現在実行中です... " + +msgid "Agent out of sync with plugin!" +msgstr "エージェントがプラグインと非同期です。" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "エージェント・トンネルがプラグインと非同期です" #, python-format -msgid "Preparing filters for devices %s" -msgstr "デバイス %s のフィルターを準備中" - -#, python-format -msgid "Security group rule updated %r" -msgstr "セキュリティー・グループ・ルールが %r を更新しました" - -#, python-format -msgid "Security group member updated %r" -msgstr "セキュリティー・グループ・メンバーが %r を更新しました" - -msgid "Provider rule updated" -msgstr "プロバイダー・ルールが更新されました" - -#, python-format -msgid "Remove device filter for %r" -msgstr "%r のデバイス・フィルターを削除" - -msgid "Refresh firewall rules" -msgstr "ファイアウォール・ルールの最新表示" - -msgid "DHCP agent started" -msgstr "DHCP エージェントが始動しました" - -msgid "Synchronizing state" -msgstr "状態の同期中" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "サーバー・サイド %s による agent_updated!" - -msgid "L3 agent started" -msgstr "L3 エージェントが始動しました" - -#, python-format -msgid "Device %s already exists" -msgstr "デバイス %s は既に存在します" - -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "フィルター処理されていないポート・フィルター %s を更新しようとしました" - -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "フィルター処理されていないポート・フィルター %r を削除しようとしました" - -msgid "Initializing extension manager." -msgstr "拡張マネージャーを初期化しています。" - -#, python-format -msgid "Loaded extension: %s" -msgstr "拡張をロードしました: %s" +msgid "Allocated vlan (%d) from the pool" +msgstr "プールからの割り振り済み VLAN (%d)" msgid "" "Allow sorting is enabled because native pagination requires native sorting" @@ -103,83 +56,30 @@ msgstr "" "ネイティブ・ページ編集にはネイティブ・ソートが必要なため、ソートの許可が有効" "になっています" -msgid "OVS cleanup completed successfully" -msgstr "OVS のクリーンアップが正常に完了しました" - -msgid "Agent initialized successfully, now running... " -msgstr "エージェントが正常に初期化されました。現在実行中です... " - -msgid "Logging enabled!" -msgstr "ロギングは有効です" - #, python-format -msgid "Config paste file: %s" -msgstr "構成貼り付けファイル: %s" - -msgid "IPv6 is not enabled on this system." -msgstr " このシステムでは、 IPv6が有効ではありません。" - -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" msgstr "" -"CIDR %(new_cidr)s の検証が失敗しました。サブネット %(subnet_id)s (CIDR: " -"%(cidr)s) とオーバーラップしています" +"%(vlan_id)s を net-id=%(net_uuid)s のローカル VLAN として割り当てています" #, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "プールで無効な IP アドレスが見つかりました: %(start)s から %(end)s:" - -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "指定された IP アドレスが、サブネット IP バージョンと一致しません" +msgid "Attachment %s removed" +msgstr "接続機構 %s が削除されました" #, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "開始 IP (%(start)s) が終了 IP (%(end)s) より大きくなっています" +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "フィルター処理されていないポート・フィルター %r を削除しようとしました" #, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "" -"サブネット CIDR より大きいプールが見つかりました: %(start)s から %(end)s" - -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "オーバーラップする範囲が見つかりました: %(l_range)s から %(r_range)s" - -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "ポート %s には IP が構成されていないため、このポートをスキップします" - -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet backdoorは、プロセス%(pid)dの%(port)sをリスニングしています。" - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "タスクの間隔が負であるため、定期タスク %(task)s をスキップしています" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "タスクが使用不可であるため、定期タスク %(task)s をスキップしています" +msgid "Attempted to update port filter which is not filtered %s" +msgstr "フィルター処理されていないポート・フィルター %s を更新しようとしました" #, python-format msgid "Caught %s, exiting" msgstr "%s が見つかりました。終了しています" -msgid "Parent process has died unexpectedly, exiting" -msgstr "親プロセスが予期せずに停止しました。終了しています" - -msgid "Forking too fast, sleeping" -msgstr "fork が早すぎます。スリープ状態にしています" - #, python-format -msgid "Started child %d" -msgstr "子 %d を開始しました" - -#, python-format -msgid "Starting %d workers" -msgstr "%d ワーカーを開始しています" +msgid "Caught %s, stopping children" +msgstr "%s が見つかりました。子を停止しています" #, python-format msgid "Child %(pid)d killed by signal %(sig)d" @@ -190,25 +90,73 @@ msgid "Child %(pid)s exited with status %(code)d" msgstr "子 %(pid)s が状況 %(code)d で終了しました" #, python-format -msgid "Caught %s, stopping children" -msgstr "%s が見つかりました。子を停止しています" +msgid "Config paste file: %s" +msgstr "構成貼り付けファイル: %s" + +msgid "DHCP agent started" +msgstr "DHCP エージェントが始動しました" #, python-format -msgid "Waiting on %d children to exit" -msgstr "%d 個の子で終了を待機しています" +msgid "Device %s already exists" +msgstr "デバイス %s は既に存在します" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "プールからの割り振り済み VLAN (%d)" +msgid "Device %s not defined on plugin" +msgstr "デバイス %s がプラグインで定義されていません" + +msgid "Disabled security-group extension." +msgstr "security-group 拡張を無効にしました。" #, python-format -msgid "No %s Plugin loaded" -msgstr "%s プラグインはロードされませんでした" +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Eventlet backdoorは、プロセス%(pid)dの%(port)sをリスニングしています。" + +msgid "Forking too fast, sleeping" +msgstr "fork が早すぎます。スリープ状態にしています" #, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "プールで無効な IP アドレスが見つかりました: %(start)s から %(end)s:" + +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "オーバーラップする範囲が見つかりました: %(l_range)s から %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" msgstr "" -"%(plugin_key)s: 引数 %(args)s が指定された %(function_name)s は無視されます" +"サブネット CIDR より大きいプールが見つかりました: %(start)s から %(end)s" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP 例外がスローされました: %s" + +msgid "IPv6 is not enabled on this system." +msgstr " このシステムでは、 IPv6が有効ではありません。" + +msgid "Initializing extension manager." +msgstr "拡張マネージャーを初期化しています。" + +#, python-format +msgid "Interface mappings: %s" +msgstr "インターフェース・マッピング: %s" + +msgid "L3 agent started" +msgstr "L3 エージェントが始動しました" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "LinuxBridge Agent RPC デーモンが開始しました。" + +#, python-format +msgid "Loaded extension: %s" +msgstr "拡張をロードしました: %s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "プラグインの読み込み中: %s" + +msgid "Logging enabled!" +msgstr "ロギングは有効です" #, python-format msgid "" @@ -217,53 +165,105 @@ msgstr "" "ループ反復が間隔を超えました (%(polling_interval)s に対して %(elapsed)s)。" #, python-format -msgid "RPC agent_id: %s" -msgstr "RPC agent_id: %s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "ポート %(device)s が更新されました。詳細: %(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "デバイス %s がプラグインで定義されていません" - -#, python-format -msgid "Attachment %s removed" -msgstr "接続機構 %s が削除されました" - -#, python-format -msgid "Port %s updated." -msgstr "ポート %s が更新されました。" - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "LinuxBridge Agent RPC デーモンが開始しました。" - -msgid "Agent out of sync with plugin!" -msgstr "エージェントがプラグインと非同期です。" - -#, python-format -msgid "Interface mappings: %s" -msgstr "インターフェース・マッピング: %s" +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "" +"物理ネットワーク %(physical_network)s をブリッジ %(bridge)s にマップしていま" +"す" #, python-format msgid "Network VLAN ranges: %s" msgstr "ネットワーク VLAN の範囲: %s" #, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "" -"%(vlan_id)s を net-id=%(net_uuid)s のローカル VLAN として割り当てています" +msgid "No %s Plugin loaded" +msgstr "%s プラグインはロードされませんでした" + +msgid "OVS cleanup completed successfully" +msgstr "OVS のクリーンアップが正常に完了しました" + +msgid "Parent process has died unexpectedly, exiting" +msgstr "親プロセスが予期せずに停止しました。終了しています" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "ポート %(device)s が更新されました。詳細: %(details)s" + +#, python-format +msgid "Port %s updated." +msgstr "ポート %s が更新されました。" + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "デバイス %s のフィルターを準備中" + +msgid "Provider rule updated" +msgstr "プロバイダー・ルールが更新されました" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "RPC agent_id: %s" #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "VLAN = %(vlan_id)s を net-id = %(net_uuid)s から再利用中" -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "" -"物理ネットワーク %(physical_network)s をブリッジ %(bridge)s にマップしていま" -"す" +msgid "Refresh firewall rules" +msgstr "ファイアウォール・ルールの最新表示" -msgid "Agent tunnel out of sync with plugin!" -msgstr "エージェント・トンネルがプラグインと非同期です" +#, python-format +msgid "Remove device filter for %r" +msgstr "%r のデバイス・フィルターを削除" + +#, python-format +msgid "Security group member updated %r" +msgstr "セキュリティー・グループ・メンバーが %r を更新しました" + +#, python-format +msgid "Security group rule updated %r" +msgstr "セキュリティー・グループ・ルールが %r を更新しました" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "タスクが使用不可であるため、定期タスク %(task)s をスキップしています" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "タスクの間隔が負であるため、定期タスク %(task)s をスキップしています" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "ポート %s には IP が構成されていないため、このポートをスキップします" + +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "指定された IP アドレスが、サブネット IP バージョンと一致しません" + +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "開始 IP (%(start)s) が終了 IP (%(end)s) より大きくなっています" + +#, python-format +msgid "Started child %d" +msgstr "子 %d を開始しました" + +#, python-format +msgid "Starting %d workers" +msgstr "%d ワーカーを開始しています" + +msgid "Synchronizing state" +msgstr "状態の同期中" + +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"CIDR %(new_cidr)s の検証が失敗しました。サブネット %(subnet_id)s (CIDR: " +"%(cidr)s) とオーバーラップしています" + +#, python-format +msgid "Waiting on %d children to exit" +msgstr "%d 個の子で終了を待機しています" + +#, python-format +msgid "agent_updated by server side %s!" +msgstr "サーバー・サイド %s による agent_updated!" diff --git a/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po index b766a68756e..c0b6cc04be2 100644 --- a/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/ko_KR/LC_MESSAGES/neutron-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-04 06:08+0000\n" -"PO-Revision-Date: 2015-04-28 07:35+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Korean (Korea) (http://www.transifex.com/projects/p/neutron/" "language/ko_KR/)\n" @@ -19,82 +19,34 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" -#, python-format -msgid "Loading Plugin: %s" -msgstr "로딩 플러그인: %s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP 예외 처리: %s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s이(가) HTTP %(status)d(으)로 리턴되었음" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "%(plugin_key)s: %(args)s 인수를 갖는 %(function_name)s이(가) 무시됨" #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s이(가) 결함을 리턴함: %(exception)s" -msgid "Disabled security-group extension." -msgstr "보안 그룹 확장을 사용하지 않습니다. " +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s이(가) HTTP %(status)d(으)로 리턴되었음" + +msgid "Agent initialized successfully, now running... " +msgstr "에이전트가 초기화되었으며, 지금 실행 중... " + +msgid "Agent out of sync with plugin!" +msgstr "에이전트가 플러그인과 동기화되지 않았습니다!" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "에이전트 터널이 플러그인과 동기화되지 않았습니다!" #, python-format -msgid "Preparing filters for devices %s" -msgstr "%s 디바이스에 대한 필터 준비" - -#, python-format -msgid "Security group rule updated %r" -msgstr "보안 그룹 규칙이 %r을(를) 업데이트함" - -#, python-format -msgid "Security group member updated %r" -msgstr "보안 그룹 멤버가 %r을(를) 업데이트함" - -msgid "Provider rule updated" -msgstr "제공자 규칙이 업데이트됨" - -#, python-format -msgid "Remove device filter for %r" -msgstr "%r의 디바이스 필터 제거" - -msgid "Refresh firewall rules" -msgstr "방화벽 규칙 새로 고치기" - -msgid "DHCP agent started" -msgstr "DHCP 에이전트가 시작됨" - -msgid "Synchronizing state" -msgstr "상태 동기화 중" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "서버측 %s!에 의한 agent_updated" - -msgid "L3 agent started" -msgstr "L3 에이전트가 시작됨" - -#, python-format -msgid "Device %s already exists" -msgstr "%s 디바이스가 이미 존재함" - -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "필터링된 %s이(가) 아닌 포트 필터를 업데이트하려고 시도함" - -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "필터링된 %r이(가) 아닌 포트 필터를 제거하려고 시도함" - -msgid "Initializing extension manager." -msgstr "확장기능 관리자를 초기화 중입니다. " - -#, python-format -msgid "Loaded extension: %s" -msgstr "로드된 확장: %s" +msgid "Allocated vlan (%d) from the pool" +msgstr "풀에서 할당된 vlan(%d)" msgid "" "Allow sorting is enabled because native pagination requires native sorting" @@ -102,83 +54,29 @@ msgstr "" "네이티브 페이지 번호 매기기에 네이티브 정렬이 필요하므로 정렬을 사용할 수 있" "음" -msgid "OVS cleanup completed successfully" -msgstr "OVS 정리가 완료됨" - -msgid "Agent initialized successfully, now running... " -msgstr "에이전트가 초기화되었으며, 지금 실행 중... " - -msgid "Logging enabled!" -msgstr "로깅 사용!" +#, python-format +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "%(vlan_id)s을(를) net-id=%(net_uuid)s에 대한 로컬 vlan으로 지정 중" #, python-format -msgid "Config paste file: %s" -msgstr "구성 붙여넣기 파일: %s" +msgid "Attachment %s removed" +msgstr "첨부 %s이(가) 제거됨" #, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"CIDR %(new_cidr)s 유효성 검증 실패 - 서브넷 %(subnet_id)s(CIDR: %(cidr)s)과" -"(와) 겹침" +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "필터링된 %r이(가) 아닌 포트 필터를 제거하려고 시도함" #, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "풀에서 올바르지 않은 IP 주소 발견: %(start)s - %(end)s:" - -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "지정된 IP 주소가 서브넷 IP 버전과 일치하지 않음" - -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "시작 IP(%(start)s)가 끝 IP(%(end)s)보다 큼" - -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "서브넷 CIDR보다 큰 풀 발견: %(start)s - %(end)s" - -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "겹치는 범위 발견: %(l_range)s 및 %(r_range)s" - -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "구성된 IP가 없어서 포트 %s을(를) 건너뜀" - -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet 백도어는 프로세스 %(pid)d 일 동안 %(port)s에서 수신" - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "간격이 음수이기 때문에 주기적 태스크 %(task)s을(를) 건너뜀" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "사용 안하기 때문에 주기적 태스크 %(task)s을(를) 건너뜀" +msgid "Attempted to update port filter which is not filtered %s" +msgstr "필터링된 %s이(가) 아닌 포트 필터를 업데이트하려고 시도함" #, python-format msgid "Caught %s, exiting" msgstr "%s 발견, 종료 중" -msgid "Parent process has died unexpectedly, exiting" -msgstr "상위 프로세스가 예기치 않게 정지했습니다. 종료 중" - #, python-format -msgid "Child caught %s, exiting" -msgstr "자식으로 된 %s가 존재함." - -msgid "Forking too fast, sleeping" -msgstr "포크가 너무 빠름. 정지 중" - -#, python-format -msgid "Started child %d" -msgstr "%d 하위를 시작했음" - -#, python-format -msgid "Starting %d workers" -msgstr "%d 작업자 시작 중" +msgid "Caught %s, stopping children" +msgstr "%s 발견, 하위 중지 중" #, python-format msgid "Child %(pid)d killed by signal %(sig)d" @@ -189,24 +87,93 @@ msgid "Child %(pid)s exited with status %(code)d" msgstr "%(pid)s 하위가 %(code)d 상태와 함께 종료했음" #, python-format -msgid "Caught %s, stopping children" -msgstr "%s 발견, 하위 중지 중" +msgid "Child caught %s, exiting" +msgstr "자식으로 된 %s가 존재함." #, python-format -msgid "Waiting on %d children to exit" -msgstr "%d 하위에서 종료하기를 대기 중임" +msgid "Config paste file: %s" +msgstr "구성 붙여넣기 파일: %s" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "풀에서 할당된 vlan(%d)" +msgid "Configured mechanism driver names: %s" +msgstr "매커니즘 드라이버 이름을 설정했습니다: %s" #, python-format -msgid "No %s Plugin loaded" -msgstr "로드된 %s 플러그인이 없음" +msgid "Configured type driver names: %s" +msgstr "형식 드라이버 이름을 설정했습니다: %s" + +msgid "DHCP agent started" +msgstr "DHCP 에이전트가 시작됨" #, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s: %(args)s 인수를 갖는 %(function_name)s이(가) 무시됨" +msgid "Device %s already exists" +msgstr "%s 디바이스가 이미 존재함" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "%s 디바이스가 플러그인에서 정의되지 않음" + +msgid "Disabled security-group extension." +msgstr "보안 그룹 확장을 사용하지 않습니다. " + +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Eventlet 백도어는 프로세스 %(pid)d 일 동안 %(port)s에서 수신" + +msgid "Forking too fast, sleeping" +msgstr "포크가 너무 빠름. 정지 중" + +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "풀에서 올바르지 않은 IP 주소 발견: %(start)s - %(end)s:" + +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "겹치는 범위 발견: %(l_range)s 및 %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "서브넷 CIDR보다 큰 풀 발견: %(start)s - %(end)s" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP 예외 처리: %s" + +#, python-format +msgid "Initializing driver for type '%s'" +msgstr "'%s' 형식 드라이버 초기화중" + +msgid "Initializing extension manager." +msgstr "확장기능 관리자를 초기화 중입니다. " + +#, python-format +msgid "Interface mappings: %s" +msgstr "인터페이스 맵핑: %s" + +msgid "L3 agent started" +msgstr "L3 에이전트가 시작됨" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "LinuxBridge 에이전트 RPC 디먼이 시작되었습니다!" + +#, python-format +msgid "Loaded extension: %s" +msgstr "로드된 확장: %s" + +#, python-format +msgid "Loaded mechanism driver names: %s" +msgstr "매커니즘 드라이버 이름을 불러왔습니다: %s" + +#, python-format +msgid "Loaded type driver names: %s" +msgstr "형식 드라이버 이름을 불러왔습니다: %s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "로딩 플러그인: %s" + +msgid "Logging enabled!" +msgstr "로깅 사용!" #, python-format msgid "" @@ -214,62 +181,8 @@ msgid "" msgstr "루프 반복이 간격을 초과했습니다(%(polling_interval)s 대 %(elapsed)s)!" #, python-format -msgid "RPC agent_id: %s" -msgstr "RPC agent_id: %s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "%(device)s 포트가 업데이트되었습니다. 세부사항: %(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "%s 디바이스가 플러그인에서 정의되지 않음" - -#, python-format -msgid "Attachment %s removed" -msgstr "첨부 %s이(가) 제거됨" - -#, python-format -msgid "Port %s updated." -msgstr "%s 포트가 업데이트되었습니다. " - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "LinuxBridge 에이전트 RPC 디먼이 시작되었습니다!" - -msgid "Agent out of sync with plugin!" -msgstr "에이전트가 플러그인과 동기화되지 않았습니다!" - -#, python-format -msgid "Interface mappings: %s" -msgstr "인터페이스 맵핑: %s" - -#, python-format -msgid "Configured type driver names: %s" -msgstr "형식 드라이버 이름을 설정했습니다: %s" - -#, python-format -msgid "Loaded type driver names: %s" -msgstr "형식 드라이버 이름을 불러왔습니다: %s" - -#, python-format -msgid "Registered types: %s" -msgstr "등록된 형식: %s" - -#, python-format -msgid "Tenant network_types: %s" -msgstr "network_types를 임대합니다: %s" - -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "'%s' 형식 드라이버 초기화중" - -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "매커니즘 드라이버 이름을 설정했습니다: %s" - -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "매커니즘 드라이버 이름을 불러왔습니다: %s" +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "실제 네트워크 %(physical_network)s을(를) 브릿지 %(bridge)s에 맵핑 중" msgid "Modular L2 Plugin initialization complete" msgstr "모듈러 L2 플러그인 초기화를 완료했습니다" @@ -279,16 +192,103 @@ msgid "Network VLAN ranges: %s" msgstr "네트워크 VLAN 범위: %s" #, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "%(vlan_id)s을(를) net-id=%(net_uuid)s에 대한 로컬 vlan으로 지정 중" +msgid "No %s Plugin loaded" +msgstr "로드된 %s 플러그인이 없음" + +msgid "OVS cleanup completed successfully" +msgstr "OVS 정리가 완료됨" + +msgid "Parent process has died unexpectedly, exiting" +msgstr "상위 프로세스가 예기치 않게 정지했습니다. 종료 중" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "%(device)s 포트가 업데이트되었습니다. 세부사항: %(details)s" + +#, python-format +msgid "Port %s updated." +msgstr "%s 포트가 업데이트되었습니다. " + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "%s 디바이스에 대한 필터 준비" + +msgid "Provider rule updated" +msgstr "제공자 규칙이 업데이트됨" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "RPC agent_id: %s" #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "net-id = %(net_uuid)s에서 vlan = %(vlan_id)s 재확보 중" -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "실제 네트워크 %(physical_network)s을(를) 브릿지 %(bridge)s에 맵핑 중" +msgid "Refresh firewall rules" +msgstr "방화벽 규칙 새로 고치기" -msgid "Agent tunnel out of sync with plugin!" -msgstr "에이전트 터널이 플러그인과 동기화되지 않았습니다!" +#, python-format +msgid "Registered types: %s" +msgstr "등록된 형식: %s" + +#, python-format +msgid "Remove device filter for %r" +msgstr "%r의 디바이스 필터 제거" + +#, python-format +msgid "Security group member updated %r" +msgstr "보안 그룹 멤버가 %r을(를) 업데이트함" + +#, python-format +msgid "Security group rule updated %r" +msgstr "보안 그룹 규칙이 %r을(를) 업데이트함" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "사용 안하기 때문에 주기적 태스크 %(task)s을(를) 건너뜀" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "간격이 음수이기 때문에 주기적 태스크 %(task)s을(를) 건너뜀" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "구성된 IP가 없어서 포트 %s을(를) 건너뜀" + +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "지정된 IP 주소가 서브넷 IP 버전과 일치하지 않음" + +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "시작 IP(%(start)s)가 끝 IP(%(end)s)보다 큼" + +#, python-format +msgid "Started child %d" +msgstr "%d 하위를 시작했음" + +#, python-format +msgid "Starting %d workers" +msgstr "%d 작업자 시작 중" + +msgid "Synchronizing state" +msgstr "상태 동기화 중" + +#, python-format +msgid "Tenant network_types: %s" +msgstr "network_types를 임대합니다: %s" + +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"CIDR %(new_cidr)s 유효성 검증 실패 - 서브넷 %(subnet_id)s(CIDR: %(cidr)s)과" +"(와) 겹침" + +#, python-format +msgid "Waiting on %d children to exit" +msgstr "%d 하위에서 종료하기를 대기 중임" + +#, python-format +msgid "agent_updated by server side %s!" +msgstr "서버측 %s!에 의한 agent_updated" diff --git a/neutron/locale/neutron-log-error.pot b/neutron/locale/neutron-log-error.pot index 1382968ca1d..1bf18a27974 100644 --- a/neutron/locale/neutron-log-error.pot +++ b/neutron/locale/neutron-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev422\n" +"Project-Id-Version: neutron 2015.2.0.dev485\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-25 06:15+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -79,17 +79,17 @@ msgstr "" msgid "Internal error" msgstr "" -#: neutron/agent/common/ovs_lib.py:212 neutron/agent/common/ovs_lib.py:307 +#: neutron/agent/common/ovs_lib.py:217 neutron/agent/common/ovs_lib.py:312 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:233 +#: neutron/agent/common/ovs_lib.py:238 #, python-format msgid "Timed out retrieving ofport on port %(pname)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:526 +#: neutron/agent/common/ovs_lib.py:531 #, python-format msgid "OVS flows could not be applied on bridge %s" msgstr "" @@ -118,7 +118,7 @@ msgstr "" #: neutron/plugins/hyperv/agent/l2_agent.py:94 #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:108 #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:787 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:289 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:295 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:129 #: neutron/services/metering/agents/metering_agent.py:283 msgid "Failed reporting state!" @@ -129,7 +129,7 @@ msgstr "" msgid "Error importing interface driver '%s'" msgstr "" -#: neutron/agent/l3/agent.py:232 neutron/agent/linux/dhcp.py:825 +#: neutron/agent/l3/agent.py:232 neutron/agent/linux/dhcp.py:875 msgid "An interface driver must be specified" msgstr "" @@ -244,7 +244,7 @@ msgstr "" msgid "Pidfile %s already exist. Daemon already running?" msgstr "" -#: neutron/agent/linux/dhcp.py:831 +#: neutron/agent/linux/dhcp.py:881 #, python-format msgid "Error importing interface driver '%(driver)s': %(inner)s" msgstr "" @@ -318,11 +318,11 @@ msgstr "" msgid "Unable to parse route \"%s\"" msgstr "" -#: neutron/agent/linux/iptables_manager.py:402 +#: neutron/agent/linux/iptables_manager.py:403 msgid "Failure applying iptables rules" msgstr "" -#: neutron/agent/linux/iptables_manager.py:480 +#: neutron/agent/linux/iptables_manager.py:481 #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables " @@ -522,13 +522,13 @@ msgid "" "package is installed. Error: %s" msgstr "" -#: neutron/db/agents_db.py:307 +#: neutron/db/agents_db.py:308 #, python-format msgid "" "Message received from the host: %(host)s during the registration of " "%(agent_name)s has a timestamp: %(agent_time)s. This differs from the " -"current server timestamp: %(serv_time)s by more than the threshold agent " -"downtime: %(threshold)s." +"current server timestamp: %(serv_time)s by %(diff)s seconds, which is " +"more than the threshold agent downtime: %(threshold)s." msgstr "" #: neutron/db/agentschedulers_db.py:226 @@ -543,12 +543,12 @@ msgid "" "%(agent)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:935 neutron/plugins/ml2/plugin.py:571 +#: neutron/db/db_base_plugin_v2.py:953 neutron/plugins/ml2/plugin.py:570 #, python-format msgid "An exception occurred while creating the %(resource)s:%(item)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1799 +#: neutron/db/db_base_plugin_v2.py:1821 #, python-format msgid "Unable to generate mac address after %s attempts" msgstr "" @@ -722,8 +722,7 @@ msgid "" msgstr "" #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:255 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1770 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1782 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1656 #, python-format msgid "%s Agent terminated!" msgstr "" @@ -779,7 +778,7 @@ msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" msgstr "" #: neutron/plugins/ml2/db.py:241 neutron/plugins/ml2/db.py:325 -#: neutron/plugins/ml2/plugin.py:1339 +#: neutron/plugins/ml2/plugin.py:1340 #, python-format msgid "Multiple ports have port_id starting with %s" msgstr "" @@ -838,97 +837,97 @@ msgstr "" msgid "Extension driver '%(name)s' failed in %(method)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:289 +#: neutron/plugins/ml2/plugin.py:288 #, python-format msgid "Failed to commit binding results for %(port)s after %(max)s tries" msgstr "" -#: neutron/plugins/ml2/plugin.py:451 +#: neutron/plugins/ml2/plugin.py:450 #, python-format msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:462 +#: neutron/plugins/ml2/plugin.py:461 #, python-format msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:548 +#: neutron/plugins/ml2/plugin.py:547 #, python-format msgid "Could not find %s to delete." msgstr "" -#: neutron/plugins/ml2/plugin.py:551 +#: neutron/plugins/ml2/plugin.py:550 #, python-format msgid "Could not delete %(res)s %(id)s." msgstr "" -#: neutron/plugins/ml2/plugin.py:584 +#: neutron/plugins/ml2/plugin.py:583 #, python-format msgid "" "mechanism_manager.create_%(res)s_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:630 +#: neutron/plugins/ml2/plugin.py:629 #, python-format msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:700 +#: neutron/plugins/ml2/plugin.py:699 #, python-format msgid "Exception auto-deleting port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:713 +#: neutron/plugins/ml2/plugin.py:712 #, python-format msgid "Exception auto-deleting subnet %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:795 +#: neutron/plugins/ml2/plugin.py:794 msgid "mechanism_manager.delete_network_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:816 +#: neutron/plugins/ml2/plugin.py:815 #, python-format msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:937 +#: neutron/plugins/ml2/plugin.py:938 #, python-format msgid "Exception deleting fixed_ip from port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:946 +#: neutron/plugins/ml2/plugin.py:947 msgid "mechanism_manager.delete_subnet_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:1011 +#: neutron/plugins/ml2/plugin.py:1012 #, python-format msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1023 +#: neutron/plugins/ml2/plugin.py:1024 #, python-format msgid "_bind_port_if_needed failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1053 +#: neutron/plugins/ml2/plugin.py:1054 #, python-format msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1198 +#: neutron/plugins/ml2/plugin.py:1199 #, python-format msgid "No Host supplied to bind DVR Port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1320 +#: neutron/plugins/ml2/plugin.py:1321 #, python-format msgid "mechanism_manager.delete_port_postcommit failed for port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1352 +#: neutron/plugins/ml2/plugin.py:1353 #, python-format msgid "Binding info for DVR port %s not found" msgstr "" @@ -1078,122 +1077,126 @@ msgid "" "in Non-DVR Mode" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:419 +#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:371 #, python-format msgid "DVR: Duplicate DVR router interface detected for subnet %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:427 +#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:379 #, python-format msgid "DVR: Unable to retrieve subnet information for subnet_id %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:603 +#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:512 #, python-format msgid "" "Centralized-SNAT port %(port)s on subnet %(port_subnet)s already seen on " "a different subnet %(orig_subnet)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:376 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:382 msgid "No tunnel_type specified, cannot create tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:379 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:402 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:385 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:408 #, python-format msgid "tunnel_type %s not supported by agent" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:395 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:401 msgid "No tunnel_ip specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:399 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:405 msgid "No tunnel_type specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:541 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:551 #, python-format msgid "No local VLAN available for net-id=%s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:580 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:582 #, python-format msgid "" "Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " "tunneling disabled" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:599 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:590 #, python-format msgid "" "Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:627 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:600 #, python-format msgid "" "Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:636 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:609 #, python-format msgid "" "Cannot provision unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:692 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:669 #, python-format msgid "" "Cannot reclaim unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:911 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:866 msgid "" "Failed to create OVS patch port. Cannot have tunneling enabled on this " "agent, since this version of OVS does not support tunnels or patch ports." " Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1038 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:925 #, python-format msgid "" "Bridge %(bridge)s for physical network %(physical_network)s does not " "exist. Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1233 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1119 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1426 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1308 #, python-format msgid "" "process_network_ports - iteration:%d - failure while retrieving port " "details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1462 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1344 #, python-format msgid "" "process_ancillary_network_ports - iteration:%d - failure while retrieving" " port details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1608 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1486 msgid "Error while synchronizing tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1683 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1561 msgid "Error while processing VIF ports" msgstr "" +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1650 +msgid "Agent failed to create agent config map" +msgstr "" + #: neutron/plugins/sriovnicagent/eswitch_manager.py:48 #, python-format msgid "Failed to get devices for %s" diff --git a/neutron/locale/neutron-log-info.pot b/neutron/locale/neutron-log-info.pot index b890cee162d..056383ecbc4 100644 --- a/neutron/locale/neutron-log-info.pot +++ b/neutron/locale/neutron-log-info.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev319\n" +"Project-Id-Version: neutron 2015.2.0.dev485\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-10 06:14+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -39,13 +39,13 @@ msgid "" "policy:%(old_policy)s" msgstr "" -#: neutron/quota.py:217 +#: neutron/quota.py:221 msgid "" "ConfDriver is used as quota_driver because the loaded plugin does not " "support 'quotas' table." msgstr "" -#: neutron/quota.py:228 +#: neutron/quota.py:232 #, python-format msgid "Loaded quota_driver: %s." msgstr "" @@ -122,7 +122,7 @@ msgstr "" msgid "No ports here to refresh firewall" msgstr "" -#: neutron/agent/common/ovs_lib.py:416 +#: neutron/agent/common/ovs_lib.py:421 #, python-format msgid "Port %(port_id)s not present in bridge %(br_name)s" msgstr "" @@ -161,20 +161,19 @@ msgid "" "concurrently." msgstr "" -#: neutron/agent/linux/daemon.py:102 +#: neutron/agent/linux/daemon.py:104 #, python-format msgid "Process runs with uid/gid: %(uid)s/%(gid)s" msgstr "" -#: neutron/agent/linux/dhcp.py:659 +#: neutron/agent/linux/dhcp.py:745 #, python-format msgid "" "Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is " "not in port's address IP versions" msgstr "" -#: neutron/agent/linux/interface.py:268 neutron/agent/linux/interface.py:319 -#: neutron/agent/linux/interface.py:377 neutron/agent/linux/interface.py:420 +#: neutron/agent/linux/interface.py:196 #, python-format msgid "Device %s already exists" msgstr "" @@ -224,7 +223,7 @@ msgstr "" #: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1025 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1788 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1570 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 msgid "Agent initialized successfully, now running... " msgstr "" @@ -267,41 +266,41 @@ msgstr "" #: neutron/db/agentschedulers_db.py:215 #, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" +msgid "Adding network %(net)s to agent %(agent)s on host %(host)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:636 +#: neutron/db/db_base_plugin_v2.py:642 #, python-format msgid "" "Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " "%(subnet_id)s (CIDR: %(cidr)s)" msgstr "" -#: neutron/db/db_base_plugin_v2.py:663 +#: neutron/db/db_base_plugin_v2.py:679 #, python-format msgid "Found invalid IP address in pool: %(start)s - %(end)s:" msgstr "" -#: neutron/db/db_base_plugin_v2.py:670 +#: neutron/db/db_base_plugin_v2.py:686 msgid "Specified IP addresses do not match the subnet IP version" msgstr "" -#: neutron/db/db_base_plugin_v2.py:674 +#: neutron/db/db_base_plugin_v2.py:690 #, python-format msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" msgstr "" -#: neutron/db/db_base_plugin_v2.py:679 +#: neutron/db/db_base_plugin_v2.py:695 #, python-format msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:703 +#: neutron/db/db_base_plugin_v2.py:719 #, python-format msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1616 neutron/plugins/ml2/plugin.py:889 +#: neutron/db/db_base_plugin_v2.py:1639 neutron/plugins/ml2/plugin.py:895 #, python-format msgid "" "Found port (%(port_id)s, %(ip)s) having IP allocation on subnet " @@ -319,17 +318,17 @@ msgstr "" msgid "Skipping port %s as no IP is configure on it" msgstr "" -#: neutron/db/l3_dvr_db.py:87 +#: neutron/db/l3_dvr_db.py:88 #, python-format msgid "Centralizing distributed router %s is not supported" msgstr "" -#: neutron/db/l3_dvr_db.py:549 +#: neutron/db/l3_dvr_db.py:550 #, python-format msgid "Agent Gateway port does not exist, so create one: %s" msgstr "" -#: neutron/db/l3_dvr_db.py:632 +#: neutron/db/l3_dvr_db.py:633 #, python-format msgid "SNAT interface port list does not exist, so create one: %s" msgstr "" @@ -352,7 +351,7 @@ msgid "" "available: %s" msgstr "" -#: neutron/db/migration/alembic_migrations/heal_script.py:221 +#: neutron/db/migration/alembic_migrations/heal_script.py:222 #, python-format msgid "Table %(old_t)r was renamed to %(new_t)r" msgstr "" @@ -560,7 +559,7 @@ msgid "RPC agent_id: %s" msgstr "" #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:871 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1308 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1187 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 #, python-format msgid "Port %(device)s updated. Details: %(details)s" @@ -572,14 +571,14 @@ msgid "Device %s not defined on plugin" msgstr "" #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:911 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1355 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1372 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1234 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1251 #, python-format msgid "Attachment %s removed" msgstr "" #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:923 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1384 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1263 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 #, python-format msgid "Port %s updated." @@ -590,7 +589,7 @@ msgid "LinuxBridge Agent RPC Daemon Started!" msgstr "" #: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:986 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1577 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1454 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 msgid "Agent out of sync with plugin!" msgstr "" @@ -601,7 +600,7 @@ msgstr "" msgid "Interface mappings: %s" msgstr "" -#: neutron/plugins/ml2/db.py:60 +#: neutron/plugins/ml2/db.py:59 #, python-format msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s" msgstr "" @@ -691,7 +690,7 @@ msgstr "" msgid "Extended port dict for driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:146 +#: neutron/plugins/ml2/plugin.py:143 msgid "Modular L2 Plugin initialization complete" msgstr "" @@ -700,17 +699,17 @@ msgstr "" msgid "Attempt %(count)s to bind port %(port)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:691 +#: neutron/plugins/ml2/plugin.py:696 #, python-format msgid "Port %s was deleted concurrently" msgstr "" -#: neutron/plugins/ml2/plugin.py:702 +#: neutron/plugins/ml2/plugin.py:708 #, python-format msgid "Subnet %s was deleted concurrently" msgstr "" -#: neutron/plugins/ml2/plugin.py:1358 +#: neutron/plugins/ml2/plugin.py:1366 #, python-format msgid "" "Binding info for port %s was not found, it might have been deleted " @@ -812,66 +811,66 @@ msgstr "" msgid "NVSD Agent initialized successfully, now running... " msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:204 +#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:205 #, python-format msgid "L2 Agent operating in DVR Mode with MAC %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:550 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:560 #, python-format msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:651 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:624 #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:764 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:743 #, python-format msgid "Configuration for device %s completed." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:774 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:750 #, python-format msgid "" "Skipping ARP spoofing rules for port '%s' because it has port security " "disabled" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:823 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:778 #, python-format msgid "port_unbound(): net_uuid %s not in local_vlan_map" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:892 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:843 #, python-format msgid "Adding %s to list of bridges." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1035 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:919 #, python-format msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1184 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1067 #, python-format msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1302 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1181 #, python-format msgid "" "Port %s was not found on the integration bridge and will therefore not be" " processed" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1343 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1222 #, python-format msgid "Ancillary Port %s added" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1605 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1482 msgid "Agent tunnel out of sync with plugin!" msgstr "" diff --git a/neutron/locale/neutron.pot b/neutron/locale/neutron.pot index a6c4be0300a..deaa73bb339 100644 --- a/neutron/locale/neutron.pot +++ b/neutron/locale/neutron.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev422\n" +"Project-Id-Version: neutron 2015.2.0.dev485\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-25 06:14+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -271,20 +271,20 @@ msgstr "" msgid "Top-level directory for maintaining dhcp state" msgstr "" -#: neutron/agent/common/ovs_lib.py:47 +#: neutron/agent/common/ovs_lib.py:48 msgid "Timeout in seconds for ovs-vsctl commands" msgstr "" -#: neutron/agent/common/ovs_lib.py:434 +#: neutron/agent/common/ovs_lib.py:439 #, python-format msgid "Unable to determine mac address for %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:542 +#: neutron/agent/common/ovs_lib.py:547 msgid "Cannot match priority on flow deletion or modification" msgstr "" -#: neutron/agent/common/ovs_lib.py:547 +#: neutron/agent/common/ovs_lib.py:552 msgid "Must specify one or more actions on flow addition or modification" msgstr "" @@ -517,17 +517,17 @@ msgstr "" msgid "Unable to unlock pid file" msgstr "" -#: neutron/agent/linux/dhcp.py:241 +#: neutron/agent/linux/dhcp.py:242 #, python-format msgid "Error while reading %s" msgstr "" -#: neutron/agent/linux/dhcp.py:248 +#: neutron/agent/linux/dhcp.py:249 #, python-format msgid "Unable to convert value in %s" msgstr "" -#: neutron/agent/linux/dhcp.py:250 +#: neutron/agent/linux/dhcp.py:251 #, python-format msgid "Unable to access %s" msgstr "" @@ -537,7 +537,7 @@ msgid "Location of temporary ebtables table files." msgstr "" #: neutron/agent/linux/ebtables_manager.py:210 -#: neutron/agent/linux/iptables_manager.py:210 +#: neutron/agent/linux/iptables_manager.py:211 #, python-format msgid "Unknown chain: %r" msgstr "" @@ -991,9 +991,9 @@ msgid "Duplicate hostroute '%s'" msgstr "" #: neutron/api/v2/attributes.py:321 -#: neutron/tests/unit/api/v2/test_attributes.py:501 -#: neutron/tests/unit/api/v2/test_attributes.py:515 -#: neutron/tests/unit/api/v2/test_attributes.py:523 +#: neutron/tests/unit/api/v2/test_attributes.py:502 +#: neutron/tests/unit/api/v2/test_attributes.py:516 +#: neutron/tests/unit/api/v2/test_attributes.py:524 #, python-format msgid "'%(data)s' isn't a recognized IP subnet cidr, '%(cidr)s' is recommended" msgstr "" @@ -1449,361 +1449,363 @@ msgstr "" #: neutron/common/exceptions.py:121 #, python-format -msgid "" -"Unable to complete operation on subnet %(subnet_id)s. One or more ports " -"have an IP allocation from this subnet." +msgid "Unable to complete operation on subnet %(subnet_id)s. %(reason)s" msgstr "" #: neutron/common/exceptions.py:126 +msgid "One or more ports have an IP allocation from this subnet." +msgstr "" + +#: neutron/common/exceptions.py:132 #, python-format msgid "" "Unable to complete operation on port %(port_id)s for network %(net_id)s. " "Port already has an attached device %(device_id)s." msgstr "" -#: neutron/common/exceptions.py:132 +#: neutron/common/exceptions.py:138 #, python-format msgid "Port %(port_id)s cannot be deleted directly via the port API: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:137 +#: neutron/common/exceptions.py:143 #, python-format msgid "" "Unable to complete operation on port %(port_id)s, port is already bound, " "port type: %(vif_type)s, old_mac %(old_mac)s, new_mac %(new_mac)s" msgstr "" -#: neutron/common/exceptions.py:143 +#: neutron/common/exceptions.py:149 #, python-format msgid "" "Unable to complete operation for network %(net_id)s. The mac address " "%(mac)s is in use." msgstr "" -#: neutron/common/exceptions.py:149 +#: neutron/common/exceptions.py:155 #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of host routes" " exceeds the limit %(quota)s." msgstr "" -#: neutron/common/exceptions.py:155 +#: neutron/common/exceptions.py:161 #, python-format msgid "" "Unable to complete operation for %(subnet_id)s. The number of DNS " "nameservers exceeds the limit %(quota)s." msgstr "" -#: neutron/common/exceptions.py:160 +#: neutron/common/exceptions.py:166 #, python-format msgid "" "IP address %(ip_address)s is not a valid IP for any of the subnets on the" " specified network." msgstr "" -#: neutron/common/exceptions.py:165 +#: neutron/common/exceptions.py:171 #, python-format msgid "IP address %(ip_address)s is not a valid IP for the specified subnet." msgstr "" -#: neutron/common/exceptions.py:170 +#: neutron/common/exceptions.py:176 #, python-format msgid "" "Unable to complete operation for network %(net_id)s. The IP address " "%(ip_address)s is in use." msgstr "" -#: neutron/common/exceptions.py:175 +#: neutron/common/exceptions.py:181 #, python-format msgid "" "Unable to create the network. The VLAN %(vlan_id)s on physical network " "%(physical_network)s is in use." msgstr "" -#: neutron/common/exceptions.py:181 +#: neutron/common/exceptions.py:187 #, python-format msgid "" "Unable to create the flat network. Physical network %(physical_network)s " "is in use." msgstr "" -#: neutron/common/exceptions.py:186 +#: neutron/common/exceptions.py:192 #, python-format msgid "Unable to create the network. The tunnel ID %(tunnel_id)s is in use." msgstr "" -#: neutron/common/exceptions.py:191 +#: neutron/common/exceptions.py:197 msgid "Tenant network creation is not enabled." msgstr "" -#: neutron/common/exceptions.py:199 +#: neutron/common/exceptions.py:205 msgid "" "Unable to create the network. No tenant network is available for " "allocation." msgstr "" -#: neutron/common/exceptions.py:204 +#: neutron/common/exceptions.py:210 msgid "" "Unable to create the network. No available network found in maximum " "allowed attempts." msgstr "" -#: neutron/common/exceptions.py:209 +#: neutron/common/exceptions.py:215 #, python-format msgid "" "Subnet on port %(port_id)s does not match the requested subnet " "%(subnet_id)s" msgstr "" -#: neutron/common/exceptions.py:214 +#: neutron/common/exceptions.py:220 #, python-format msgid "Malformed request body: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:224 +#: neutron/common/exceptions.py:230 #, python-format msgid "Invalid input for operation: %(error_message)s." msgstr "" -#: neutron/common/exceptions.py:228 +#: neutron/common/exceptions.py:234 #, python-format msgid "The allocation pool %(pool)s is not valid." msgstr "" -#: neutron/common/exceptions.py:232 +#: neutron/common/exceptions.py:238 #, python-format msgid "" "Operation %(op)s is not supported for device_owner %(device_owner)s on " "port %(port_id)s." msgstr "" -#: neutron/common/exceptions.py:237 +#: neutron/common/exceptions.py:243 #, python-format msgid "" "Found overlapping allocation pools: %(pool_1)s %(pool_2)s for subnet " "%(subnet_cidr)s." msgstr "" -#: neutron/common/exceptions.py:242 +#: neutron/common/exceptions.py:248 #, python-format msgid "The allocation pool %(pool)s spans beyond the subnet cidr %(subnet_cidr)s." msgstr "" -#: neutron/common/exceptions.py:247 +#: neutron/common/exceptions.py:253 #, python-format msgid "Unable to generate unique mac on network %(net_id)s." msgstr "" -#: neutron/common/exceptions.py:251 +#: neutron/common/exceptions.py:257 #, python-format msgid "No more IP addresses available on network %(net_id)s." msgstr "" -#: neutron/common/exceptions.py:255 +#: neutron/common/exceptions.py:261 #, python-format msgid "Bridge %(bridge)s does not exist." msgstr "" -#: neutron/common/exceptions.py:259 +#: neutron/common/exceptions.py:265 #, python-format msgid "Creation failed. %(dev_name)s already exists." msgstr "" -#: neutron/common/exceptions.py:263 +#: neutron/common/exceptions.py:269 #, python-format msgid "Unknown quota resources %(unknown)s." msgstr "" -#: neutron/common/exceptions.py:267 +#: neutron/common/exceptions.py:273 #, python-format msgid "Quota exceeded for resources: %(overs)s" msgstr "" -#: neutron/common/exceptions.py:271 +#: neutron/common/exceptions.py:277 msgid "Tenant-id was missing from Quota request" msgstr "" -#: neutron/common/exceptions.py:275 +#: neutron/common/exceptions.py:281 #, python-format msgid "" "Change would make usage less than 0 for the following resources: " "%(unders)s" msgstr "" -#: neutron/common/exceptions.py:280 +#: neutron/common/exceptions.py:286 #, python-format msgid "" "Unable to reconfigure sharing settings for network %(network)s. Multiple " "tenants are using it" msgstr "" -#: neutron/common/exceptions.py:285 +#: neutron/common/exceptions.py:291 #, python-format msgid "Invalid extension environment: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:289 +#: neutron/common/exceptions.py:295 #, python-format msgid "Extensions not found: %(extensions)s" msgstr "" -#: neutron/common/exceptions.py:293 +#: neutron/common/exceptions.py:299 #, python-format msgid "Invalid content type %(content_type)s" msgstr "" -#: neutron/common/exceptions.py:297 +#: neutron/common/exceptions.py:303 #, python-format msgid "Unable to find any IP address on external network %(net_id)s." msgstr "" -#: neutron/common/exceptions.py:302 +#: neutron/common/exceptions.py:308 msgid "More than one external network exists" msgstr "" -#: neutron/common/exceptions.py:306 +#: neutron/common/exceptions.py:312 #, python-format msgid "An invalid value was provided for %(opt_name)s: %(opt_value)s" msgstr "" -#: neutron/common/exceptions.py:311 +#: neutron/common/exceptions.py:317 #, python-format msgid "Gateway ip %(ip_address)s conflicts with allocation pool %(pool)s" msgstr "" -#: neutron/common/exceptions.py:316 +#: neutron/common/exceptions.py:322 #, python-format msgid "" "Current gateway ip %(ip_address)s already in use by port %(port_id)s. " "Unable to update." msgstr "" -#: neutron/common/exceptions.py:321 +#: neutron/common/exceptions.py:327 #, python-format msgid "Invalid network VLAN range: '%(vlan_range)s' - '%(error)s'" msgstr "" -#: neutron/common/exceptions.py:331 +#: neutron/common/exceptions.py:337 msgid "Empty physical network name." msgstr "" -#: neutron/common/exceptions.py:335 +#: neutron/common/exceptions.py:341 #, python-format msgid "Invalid network Tunnel range: '%(tunnel_range)s' - %(error)s" msgstr "" -#: neutron/common/exceptions.py:346 +#: neutron/common/exceptions.py:352 #, python-format msgid "Invalid network VXLAN port range: '%(vxlan_range)s'" msgstr "" -#: neutron/common/exceptions.py:350 +#: neutron/common/exceptions.py:356 msgid "VXLAN Network unsupported." msgstr "" -#: neutron/common/exceptions.py:354 +#: neutron/common/exceptions.py:360 #, python-format msgid "Found duplicate extension: %(alias)s" msgstr "" -#: neutron/common/exceptions.py:358 +#: neutron/common/exceptions.py:364 #, python-format msgid "" "The following device_id %(device_id)s is not owned by your tenant or " "matches another tenants router." msgstr "" -#: neutron/common/exceptions.py:363 +#: neutron/common/exceptions.py:369 #, python-format msgid "Invalid CIDR %(input)s given as IP prefix" msgstr "" -#: neutron/common/exceptions.py:367 +#: neutron/common/exceptions.py:373 #, python-format msgid "Router '%(router_id)s' is not compatible with this agent" msgstr "" -#: neutron/common/exceptions.py:371 +#: neutron/common/exceptions.py:377 #, python-format msgid "Router '%(router_id)s' cannot be both DVR and HA" msgstr "" -#: neutron/common/exceptions.py:392 +#: neutron/common/exceptions.py:398 msgid "network_id and router_id are None. One must be provided." msgstr "" -#: neutron/common/exceptions.py:396 +#: neutron/common/exceptions.py:402 msgid "Aborting periodic_sync_routers_task due to an error" msgstr "" -#: neutron/common/exceptions.py:408 +#: neutron/common/exceptions.py:414 #, python-format msgid "%(driver)s: Internal driver error." msgstr "" -#: neutron/common/exceptions.py:412 +#: neutron/common/exceptions.py:418 msgid "Unspecified minimum subnet pool prefix" msgstr "" -#: neutron/common/exceptions.py:416 +#: neutron/common/exceptions.py:422 msgid "Empty subnet pool prefix list" msgstr "" -#: neutron/common/exceptions.py:420 +#: neutron/common/exceptions.py:426 msgid "Cannot mix IPv4 and IPv6 prefixes in a subnet pool" msgstr "" -#: neutron/common/exceptions.py:424 +#: neutron/common/exceptions.py:430 #, python-format msgid "Prefix '%(prefix)s' not supported in IPv%(version)s pool" msgstr "" -#: neutron/common/exceptions.py:428 +#: neutron/common/exceptions.py:434 #, python-format msgid "" "Illegal prefix bounds: %(prefix_type)s=%(prefixlen)s, " "%(base_prefix_type)s=%(base_prefixlen)s" msgstr "" -#: neutron/common/exceptions.py:433 +#: neutron/common/exceptions.py:439 #, python-format msgid "Illegal update to prefixes: %(msg)s" msgstr "" -#: neutron/common/exceptions.py:437 +#: neutron/common/exceptions.py:443 #, python-format msgid "Failed to allocate subnet: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:441 +#: neutron/common/exceptions.py:447 #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, minimum " "allowed prefix is %(min_prefixlen)s" msgstr "" -#: neutron/common/exceptions.py:446 +#: neutron/common/exceptions.py:452 #, python-format msgid "" "Unable to allocate subnet with prefix length %(prefixlen)s, maximum " "allowed prefix is %(max_prefixlen)s" msgstr "" -#: neutron/common/exceptions.py:451 +#: neutron/common/exceptions.py:457 #, python-format msgid "Unable to delete subnet pool: %(reason)s" msgstr "" -#: neutron/common/exceptions.py:455 +#: neutron/common/exceptions.py:461 msgid "Per-tenant subnet pool prefix quota exceeded" msgstr "" -#: neutron/common/exceptions.py:459 +#: neutron/common/exceptions.py:465 #, python-format msgid "Device '%(device_name)s' does not exist" msgstr "" -#: neutron/common/exceptions.py:463 +#: neutron/common/exceptions.py:469 msgid "" "Subnets hosted on the same network must be allocated from the same subnet" " pool" @@ -1910,53 +1912,53 @@ msgstr "" msgid "Cannot create resource for another tenant" msgstr "" -#: neutron/db/db_base_plugin_v2.py:380 +#: neutron/db/db_base_plugin_v2.py:393 msgid "IP allocation requires subnet_id or ip_address" msgstr "" -#: neutron/db/db_base_plugin_v2.py:397 +#: neutron/db/db_base_plugin_v2.py:410 #, python-format msgid "" "Failed to create port on network %(network_id)s, because fixed_ips " "included invalid subnet %(subnet_id)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:423 +#: neutron/db/db_base_plugin_v2.py:436 #, python-format msgid "" "IPv6 address %(address)s can not be directly assigned to a port on subnet" " %(id)s since the subnet is configured for automatic addresses" msgstr "" -#: neutron/db/db_base_plugin_v2.py:442 neutron/db/db_base_plugin_v2.py:485 -#: neutron/plugins/opencontrail/contrail_plugin.py:388 +#: neutron/db/db_base_plugin_v2.py:455 neutron/db/db_base_plugin_v2.py:503 +#: neutron/plugins/opencontrail/contrail_plugin.py:390 msgid "Exceeded maximim amount of fixed ips per port" msgstr "" -#: neutron/db/db_base_plugin_v2.py:609 +#: neutron/db/db_base_plugin_v2.py:627 msgid "0 is not allowed as CIDR prefix length" msgstr "" -#: neutron/db/db_base_plugin_v2.py:619 +#: neutron/db/db_base_plugin_v2.py:637 #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps" " with another subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:714 neutron/db/db_base_plugin_v2.py:718 +#: neutron/db/db_base_plugin_v2.py:732 neutron/db/db_base_plugin_v2.py:736 #, python-format msgid "Invalid route: %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:796 +#: neutron/db/db_base_plugin_v2.py:814 #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" -#: neutron/db/db_base_plugin_v2.py:804 +#: neutron/db/db_base_plugin_v2.py:822 #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " @@ -1964,77 +1966,77 @@ msgid "" "the same value" msgstr "" -#: neutron/db/db_base_plugin_v2.py:812 +#: neutron/db/db_base_plugin_v2.py:830 msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set " "to False." msgstr "" -#: neutron/db/db_base_plugin_v2.py:818 +#: neutron/db/db_base_plugin_v2.py:836 msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1034 +#: neutron/db/db_base_plugin_v2.py:1052 #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1061 +#: neutron/db/db_base_plugin_v2.py:1079 msgid "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" -#: neutron/db/db_base_plugin_v2.py:1082 +#: neutron/db/db_base_plugin_v2.py:1100 msgid "Gateway is not valid on subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1102 neutron/db/db_base_plugin_v2.py:1116 -#: neutron/plugins/opencontrail/contrail_plugin.py:312 +#: neutron/db/db_base_plugin_v2.py:1120 neutron/db/db_base_plugin_v2.py:1134 +#: neutron/plugins/opencontrail/contrail_plugin.py:313 msgid "new subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1109 +#: neutron/db/db_base_plugin_v2.py:1127 #, python-format msgid "Error parsing dns address %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1125 +#: neutron/db/db_base_plugin_v2.py:1143 msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1129 +#: neutron/db/db_base_plugin_v2.py:1147 msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1278 +#: neutron/db/db_base_plugin_v2.py:1296 msgid "allocation_pools allowed only for specific subnet requests." msgstr "" -#: neutron/db/db_base_plugin_v2.py:1289 +#: neutron/db/db_base_plugin_v2.py:1307 #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1367 +#: neutron/db/db_base_plugin_v2.py:1385 msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1384 +#: neutron/db/db_base_plugin_v2.py:1402 msgid "cidr and prefixlen must not be supplied together" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1395 +#: neutron/db/db_base_plugin_v2.py:1413 msgid "A cidr must be specified in the absence of a subnet pool" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1695 +#: neutron/db/db_base_plugin_v2.py:1717 msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1762 +#: neutron/db/db_base_plugin_v2.py:1784 msgid "Subnet pool has existing allocations" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1769 +#: neutron/db/db_base_plugin_v2.py:1791 msgid "mac address update" msgstr "" @@ -2095,11 +2097,11 @@ msgid "" "of subnet %(sub_id)s" msgstr "" -#: neutron/db/l3_db.py:499 neutron/plugins/opencontrail/contrail_plugin.py:499 +#: neutron/db/l3_db.py:499 neutron/plugins/opencontrail/contrail_plugin.py:501 msgid "Either subnet_id or port_id must be specified" msgstr "" -#: neutron/db/l3_db.py:503 neutron/plugins/opencontrail/contrail_plugin.py:509 +#: neutron/db/l3_db.py:503 neutron/plugins/opencontrail/contrail_plugin.py:511 msgid "Cannot specify both subnet-id and port-id" msgstr "" @@ -2332,7 +2334,7 @@ msgid "The %s tunnel type to migrate from" msgstr "" #: neutron/db/migration/migrate_to_ml2.py:489 -#: neutron/plugins/openvswitch/common/config.py:65 +#: neutron/plugins/openvswitch/common/config.py:67 msgid "The UDP port to use for VXLAN tunnels." msgstr "" @@ -3708,7 +3710,7 @@ msgstr "" #: neutron/plugins/ml2/drivers/mlnx/agent/config.py:43 #: neutron/plugins/nec/config.py:27 #: neutron/plugins/oneconvergence/lib/config.py:45 -#: neutron/plugins/openvswitch/common/config.py:51 +#: neutron/plugins/openvswitch/common/config.py:53 #: neutron/plugins/sriovnicagent/common/config.py:58 msgid "" "The number of seconds the agent will wait between polling for local " @@ -4075,7 +4077,7 @@ msgstr "" msgid "network_type value '%s' not supported" msgstr "" -#: neutron/plugins/ml2/plugin.py:233 +#: neutron/plugins/ml2/plugin.py:232 msgid "binding:profile value too large" msgstr "" @@ -4682,30 +4684,30 @@ msgstr "" msgid "Unable to connect to NVSD controller. Exiting after %(retries)s attempts" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:58 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:60 #, python-format msgid "" "Unable to retrieve port details for devices: %(devices)s because of " "error: %(error)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1711 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1592 msgid "" "DVR deployments for VXLAN/GRE underlays require L2-pop to be enabled, in " "both the Agent and Server side." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1729 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1606 #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1751 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1628 #, python-format msgid "Invalid tunnel type specified: %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1754 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1631 msgid "Tunneling cannot be enabled without a valid local_ip." msgstr "" @@ -4735,31 +4737,35 @@ msgid "" "to physical bridges." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:55 +#: neutron/plugins/openvswitch/common/config.py:48 +msgid "OpenFlow interface to use." +msgstr "" + +#: neutron/plugins/openvswitch/common/config.py:57 msgid "Minimize polling by monitoring ovsdb for interface changes." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:59 +#: neutron/plugins/openvswitch/common/config.py:61 msgid "" "The number of seconds to wait before respawning the ovsdb monitor after " "losing communication with it." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:62 +#: neutron/plugins/openvswitch/common/config.py:64 msgid "Network types supported by the agent (gre and/or vxlan)." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:67 +#: neutron/plugins/openvswitch/common/config.py:69 msgid "MTU size of veth interfaces" msgstr "" -#: neutron/plugins/openvswitch/common/config.py:69 +#: neutron/plugins/openvswitch/common/config.py:71 msgid "" "Use ML2 l2population mechanism driver to learn remote MAC and IPs and " "improve tunnel scalability." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:72 +#: neutron/plugins/openvswitch/common/config.py:74 msgid "" "Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2 " "l2population driver. Allows the switch (when supporting an overlay) to " @@ -4767,7 +4773,7 @@ msgid "" "broadcast into the overlay." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:78 +#: neutron/plugins/openvswitch/common/config.py:80 msgid "" "Enable suppression of ARP responses that don't match an IP address that " "belongs to the port from which they originate. Note: This prevents the " @@ -4778,17 +4784,17 @@ msgid "" " a version of OVS that supports matching ARP headers." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:89 +#: neutron/plugins/openvswitch/common/config.py:91 msgid "" "Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying " "GRE/VXLAN tunnel." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:92 +#: neutron/plugins/openvswitch/common/config.py:94 msgid "Make the l2 agent run in DVR mode." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:94 +#: neutron/plugins/openvswitch/common/config.py:96 msgid "" "Set new timeout in seconds for new rpc calls after agent receives " "SIGTERM. If value is set to 0, rpc timeout won't be changed" @@ -5173,8 +5179,8 @@ msgstr "" msgid "Adds test attributes to core resources." msgstr "" -#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:963 -#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:980 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:870 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:887 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" diff --git a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po index 7fcb0eb5cc4..81e426efea8 100644 --- a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po @@ -8,9 +8,9 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-25 06:15+0000\n" -"PO-Revision-Date: 2015-05-22 16:09+0000\n" -"Last-Translator: Andre Campos Bezerra \n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" +"Last-Translator: openstackjenkins \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" "neutron/language/pt_BR/)\n" "Language: pt_BR\n" @@ -20,103 +20,49 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "Serviço %s é suportado pelo plugin núcleo" - -#, python-format -msgid "Loading Plugin: %s" -msgstr "Carregando Plug-in: %s" - -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" -"Inserindo política: %(new_policy)s no lugar de política deprecada: " -"%(old_policy)s" - -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "Serviço Neutron iniciado, escutando em %(host)s:%(port)s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "Exceção de HTTP lançada: %s" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "%(plugin_key)s: %(function_name)s com args %(args)s ignorado" + +#, python-format +msgid "%(type)s ID ranges: %(range)s" +msgstr "%(type)s faixas de ID: %(range)s" + +#, python-format +msgid "%(url)s returned a fault: %(exception)s" +msgstr "%(url)s retornou uma falha: %(exception)s" #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "%(url)s retornado com HTTP %(status)d" #, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s retornou uma falha: %(exception)s" - -msgid "Disabled security-group extension." -msgstr "Extensão de grupo de segurança desativada." +msgid "" +"Added segment %(id)s of type %(network_type)s for network %(network_id)s" +msgstr "" +"Adicionado segmento %(id)s de tipo %(network_type)s para a rede " +"%(network_id)s" #, python-format -msgid "Preparing filters for devices %s" -msgstr "Preparando filtros para dispositivos %s" +msgid "Adding %s to list of bridges." +msgstr "Adicionando %s na lista de pontes." + +msgid "Agent initialized successfully, now running... " +msgstr "Agente inicializado com êxito; em execução agora... " + +msgid "Agent out of sync with plugin!" +msgstr "Agente fora de sincronização com o plug-in!" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "Túnel do agente fora de sincronização com o plug-in!" #, python-format -msgid "Security group rule updated %r" -msgstr "Regra do grupo de segurança atualizada %r" - -#, python-format -msgid "Security group member updated %r" -msgstr "Membro do grupo de segurança atualizado %r" - -msgid "Provider rule updated" -msgstr "Regra do provedor atualizada" - -#, python-format -msgid "Remove device filter for %r" -msgstr "Remover filtro de dispositivo para %r" - -msgid "Refresh firewall rules" -msgstr "Atualizar regras de firewall" - -msgid "No ports here to refresh firewall" -msgstr "Nenhuma porta aqui para atualizar firewall" - -msgid "DHCP agent started" -msgstr "Agente DHCP iniciado" - -msgid "Synchronizing state" -msgstr "Sincronizando estado" - -msgid "Synchronizing state complete" -msgstr "Sincronizando estado finalizado" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated por lado do servidor %s!" - -msgid "L3 agent started" -msgstr "Agente L3 iniciado" - -#, python-format -msgid "Device %s already exists" -msgstr "O dispositivo %s já existe" - -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "Tentou atualizar o filtro de porta que não foi filtrado %s" - -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "Tentou remover o filtro de porta que não foi filtrado %r" - -msgid "Initializing extension manager." -msgstr "Inicializando o Extension Manager." - -#, python-format -msgid "Loaded extension: %s" -msgstr "Extensão carregada: %s" +msgid "Allocated vlan (%d) from the pool" +msgstr "alocada VLAN (%d) do pool" msgid "" "Allow sorting is enabled because native pagination requires native sorting" @@ -125,89 +71,39 @@ msgstr "" "classificação nativa" #, python-format -msgid "Deleting port: %s" -msgstr "Deletando porta: %s" - -msgid "OVS cleanup completed successfully" -msgstr "Limpeza de OVS concluída com êxito" - -msgid "Agent initialized successfully, now running... " -msgstr "Agente inicializado com êxito; em execução agora... " - -msgid "Logging enabled!" -msgstr "Criação de log ativada!" +msgid "Allowable flat physical_network names: %s" +msgstr "Nomes permitidos de rede flat physical_network : %s" #, python-format -msgid "Config paste file: %s" -msgstr "Arquivo de colagem configurado: %s" +msgid "Ancillary Port %s added" +msgstr "Porta auxiliar %s adicionada" + +msgid "Arbitrary flat physical_network names allowed" +msgstr "Nomes arbitrários de rede flat physical_network permitidos" #, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"A validação para CIDR: %(new_cidr)s falhou - se sobrepõe com a sub-rede " -"%(subnet_id)s (CIDR: %(cidr)s)" +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "Designando %(vlan_id)s como vlan local para net-id=%(net_uuid)s" #, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "Localizado endereço IP inválido no pool: %(start)s - %(end)s:" - -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "Endereços IP especificado não correspondem à versão do IP da sub-rede" +msgid "Attachment %s removed" +msgstr "Anexo %s removido" #, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "IP inicial (%(start)s) é maior que IP final (%(end)s)" +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "Tentou remover o filtro de porta que não foi filtrado %r" #, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "Localizado pool maior que a sub-rede CIDR:%(start)s - %(end)s" - -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "Localizados intervalos de sobreposição: %(l_range)s e %(r_range)s" - -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "Ignorando a porta %s porque nenhum IP está configurado nela" - -msgid "SNAT already bound to a service node." -msgstr "SNAT já conectado a um nó de serviço." - -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Backdoor de Eventlet escutando na porta %(port)s pelo processo %(pid)d" - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "Ignorando tarefa periódica %(task)s porque seu intervalo é negativo" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "Ignorando tarefa periódica %(task)s porque ela está desativada" +msgid "Attempted to update port filter which is not filtered %s" +msgstr "Tentou atualizar o filtro de porta que não foi filtrado %s" #, python-format msgid "Caught %s, exiting" msgstr "%s capturadas, saindo" -msgid "Parent process has died unexpectedly, exiting" -msgstr "Processo pai saiu inesperadamente, saindo" - #, python-format -msgid "Child caught %s, exiting" -msgstr "Filho capturado %s, terminando" - -msgid "Forking too fast, sleeping" -msgstr "Bifurcação muito rápida, suspendendo" - -#, python-format -msgid "Started child %d" -msgstr "Filho %d iniciado" - -#, python-format -msgid "Starting %d workers" -msgstr "Iniciando %d trabalhadores" +msgid "Caught %s, stopping children" +msgstr "%s capturado, parando filhos" #, python-format msgid "Child %(pid)d killed by signal %(sig)d" @@ -218,8 +114,262 @@ msgid "Child %(pid)s exited with status %(code)d" msgstr "Filho %(pid)s encerrando com status %(code)d" #, python-format -msgid "Caught %s, stopping children" -msgstr "%s capturado, parando filhos" +msgid "Child caught %s, exiting" +msgstr "Filho capturado %s, terminando" + +#, python-format +msgid "Config paste file: %s" +msgstr "Arquivo de colagem configurado: %s" + +#, python-format +msgid "Configured mechanism driver names: %s" +msgstr "Configurados nomes para o driver de mecanismo: %s" + +#, python-format +msgid "Configured type driver names: %s" +msgstr "Configurado nomes para o driver de tipo: %s" + +msgid "DHCP agent started" +msgstr "Agente DHCP iniciado" + +#, python-format +msgid "Default provider is not specified for service type %s" +msgstr "Provedor padrão não foi especificado para o tipo de serviço %s" + +#, python-format +msgid "Deleting port: %s" +msgstr "Deletando porta: %s" + +#, python-format +msgid "Device %s already exists" +msgstr "O dispositivo %s já existe" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "Dispositivo %s não definido no plug-in" + +msgid "Disabled security-group extension." +msgstr "Extensão de grupo de segurança desativada." + +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Backdoor de Eventlet escutando na porta %(port)s pelo processo %(pid)d" + +msgid "Forking too fast, sleeping" +msgstr "Bifurcação muito rápida, suspendendo" + +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "Localizado endereço IP inválido no pool: %(start)s - %(end)s:" + +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "Localizados intervalos de sobreposição: %(l_range)s e %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "Localizado pool maior que a sub-rede CIDR:%(start)s - %(end)s" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "Exceção de HTTP lançada: %s" + +#, python-format +msgid "Initializing driver for type '%s'" +msgstr "inicializando driver para o tipo '%s'" + +msgid "Initializing extension manager." +msgstr "Inicializando o Extension Manager." + +#, python-format +msgid "Initializing mechanism driver '%s'" +msgstr "Inicializando driver de mecanismo '%s'" + +#, python-format +msgid "" +"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" +msgstr "" +"Inserindo política: %(new_policy)s no lugar de política deprecada: " +"%(old_policy)s" + +#, python-format +msgid "Interface mappings: %s" +msgstr "Mapeamentos da interface: %s" + +msgid "L3 agent started" +msgstr "Agente L3 iniciado" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "Daemon RPC do Agente LinuxBridge Iniciado!" + +#, python-format +msgid "Loaded extension: %s" +msgstr "Extensão carregada: %s" + +#, python-format +msgid "Loaded mechanism driver names: %s" +msgstr "Carregados nomes do driver de mecanismo: %s" + +#, python-format +msgid "Loaded type driver names: %s" +msgstr "Carregados nomes do driver de tipo: %s" + +#, python-format +msgid "Loading Metering driver %s" +msgstr "Carregando driver de medição %s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "Carregando Plug-in: %s" + +msgid "Logging enabled!" +msgstr "Criação de log ativada!" + +#, python-format +msgid "" +"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "" +"A iteração do loop excedeu o intervalo (%(polling_interval)s vs. " +"%(elapsed)s)!" + +msgid "ML2 FlatTypeDriver initialization complete" +msgstr "Inicialização do ML2 FlatTypeDriver concluída" + +msgid "ML2 LocalTypeDriver initialization complete" +msgstr "Inicialização do ML2 LocalTypeDriver concluída" + +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "Mapeamento de rede física %(physical_network)s para a ponte %(bridge)s" + +msgid "Modular L2 Plugin initialization complete" +msgstr "Inicialização de plug-in L2 modular concluída" + +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "Intervalos de VLAN de rede: %s" + +#, python-format +msgid "Network name changed to %s" +msgstr "Nome da rede alterado para %s" + +#, python-format +msgid "Neutron service started, listening on %(host)s:%(port)s" +msgstr "Serviço Neutron iniciado, escutando em %(host)s:%(port)s" + +#, python-format +msgid "No %s Plugin loaded" +msgstr "Nenhum %s Plug-in carregado" + +msgid "No ports here to refresh firewall" +msgstr "Nenhuma porta aqui para atualizar firewall" + +msgid "OVS cleanup completed successfully" +msgstr "Limpeza de OVS concluída com êxito" + +msgid "Parent process has died unexpectedly, exiting" +msgstr "Processo pai saiu inesperadamente, saindo" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "Porta %(device)s atualizada. Detalhes: %(details)s" + +#, python-format +msgid "Port %s updated." +msgstr "Porta %s atualizada." + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "Preparando filtros para dispositivos %s" + +msgid "Provider rule updated" +msgstr "Regra do provedor atualizada" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "agent_id de RPC: %s" + +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "Recuperando vlan = %(vlan_id)s a partir de net-id = %(net_uuid)s" + +msgid "Refresh firewall rules" +msgstr "Atualizar regras de firewall" + +#, python-format +msgid "Registered mechanism drivers: %s" +msgstr "Registrados drivers de mecanismo : %s" + +#, python-format +msgid "Registered types: %s" +msgstr "Tipos registrados: %s" + +#, python-format +msgid "Remove device filter for %r" +msgstr "Remover filtro de dispositivo para %r" + +msgid "SNAT already bound to a service node." +msgstr "SNAT já conectado a um nó de serviço." + +#, python-format +msgid "Security group member updated %r" +msgstr "Membro do grupo de segurança atualizado %r" + +#, python-format +msgid "Security group rule updated %r" +msgstr "Regra do grupo de segurança atualizada %r" + +#, python-format +msgid "Service %s is supported by the core plugin" +msgstr "Serviço %s é suportado pelo plugin núcleo" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "Ignorando tarefa periódica %(task)s porque ela está desativada" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "Ignorando tarefa periódica %(task)s porque seu intervalo é negativo" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "Ignorando a porta %s porque nenhum IP está configurado nela" + +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "Endereços IP especificado não correspondem à versão do IP da sub-rede" + +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "IP inicial (%(start)s) é maior que IP final (%(end)s)" + +#, python-format +msgid "Started child %d" +msgstr "Filho %d iniciado" + +#, python-format +msgid "Starting %d workers" +msgstr "Iniciando %d trabalhadores" + +msgid "Synchronizing state" +msgstr "Sincronizando estado" + +msgid "Synchronizing state complete" +msgstr "Sincronizando estado finalizado" + +#, python-format +msgid "Tenant network_types: %s" +msgstr "Tipos de network_types: %s" + +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"A validação para CIDR: %(new_cidr)s falhou - se sobrepõe com a sub-rede " +"%(subnet_id)s (CIDR: %(cidr)s)" + +msgid "VlanTypeDriver initialization complete" +msgstr "Inicialização do VlanTypeDriver concluída" msgid "Wait called after thread killed. Cleaning up." msgstr "Espera requisitada depois que thread foi morta. Limpando." @@ -229,155 +379,5 @@ msgid "Waiting on %d children to exit" msgstr "Aguardando em %d filhos para sair" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "alocada VLAN (%d) do pool" - -#, python-format -msgid "No %s Plugin loaded" -msgstr "Nenhum %s Plug-in carregado" - -#, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s: %(function_name)s com args %(args)s ignorado" - -#, python-format -msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "" -"A iteração do loop excedeu o intervalo (%(polling_interval)s vs. " -"%(elapsed)s)!" - -#, python-format -msgid "RPC agent_id: %s" -msgstr "agent_id de RPC: %s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "Porta %(device)s atualizada. Detalhes: %(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "Dispositivo %s não definido no plug-in" - -#, python-format -msgid "Attachment %s removed" -msgstr "Anexo %s removido" - -#, python-format -msgid "Port %s updated." -msgstr "Porta %s atualizada." - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "Daemon RPC do Agente LinuxBridge Iniciado!" - -msgid "Agent out of sync with plugin!" -msgstr "Agente fora de sincronização com o plug-in!" - -#, python-format -msgid "Interface mappings: %s" -msgstr "Mapeamentos da interface: %s" - -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "" -"Adicionado segmento %(id)s de tipo %(network_type)s para a rede " -"%(network_id)s" - -#, python-format -msgid "Configured type driver names: %s" -msgstr "Configurado nomes para o driver de tipo: %s" - -#, python-format -msgid "Loaded type driver names: %s" -msgstr "Carregados nomes do driver de tipo: %s" - -#, python-format -msgid "Registered types: %s" -msgstr "Tipos registrados: %s" - -#, python-format -msgid "Tenant network_types: %s" -msgstr "Tipos de network_types: %s" - -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "inicializando driver para o tipo '%s'" - -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "Configurados nomes para o driver de mecanismo: %s" - -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "Carregados nomes do driver de mecanismo: %s" - -#, python-format -msgid "Registered mechanism drivers: %s" -msgstr "Registrados drivers de mecanismo : %s" - -#, python-format -msgid "Initializing mechanism driver '%s'" -msgstr "Inicializando driver de mecanismo '%s'" - -msgid "Modular L2 Plugin initialization complete" -msgstr "Inicialização de plug-in L2 modular concluída" - -msgid "Arbitrary flat physical_network names allowed" -msgstr "Nomes arbitrários de rede flat physical_network permitidos" - -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "Nomes permitidos de rede flat physical_network : %s" - -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "Inicialização do ML2 FlatTypeDriver concluída" - -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "Inicialização do ML2 LocalTypeDriver concluída" - -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "%(type)s faixas de ID: %(range)s" - -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "Intervalos de VLAN de rede: %s" - -msgid "VlanTypeDriver initialization complete" -msgstr "Inicialização do VlanTypeDriver concluída" - -#, python-format -msgid "Network name changed to %s" -msgstr "Nome da rede alterado para %s" - -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "Designando %(vlan_id)s como vlan local para net-id=%(net_uuid)s" - -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "Recuperando vlan = %(vlan_id)s a partir de net-id = %(net_uuid)s" - -#, python-format -msgid "Adding %s to list of bridges." -msgstr "Adicionando %s na lista de pontes." - -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "Mapeamento de rede física %(physical_network)s para a ponte %(bridge)s" - -#, python-format -msgid "Ancillary Port %s added" -msgstr "Porta auxiliar %s adicionada" - -msgid "Agent tunnel out of sync with plugin!" -msgstr "Túnel do agente fora de sincronização com o plug-in!" - -#, python-format -msgid "Default provider is not specified for service type %s" -msgstr "Provedor padrão não foi especificado para o tipo de serviço %s" - -#, python-format -msgid "Loading Metering driver %s" -msgstr "Carregando driver de medição %s" +msgid "agent_updated by server side %s!" +msgstr "agent_updated por lado do servidor %s!" diff --git a/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po index 629497919f4..9d896ed214e 100644 --- a/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po @@ -8,9 +8,9 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-10 06:14+0000\n" -"PO-Revision-Date: 2015-04-28 07:50+0000\n" -"Last-Translator: 汪军 \n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" +"Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/neutron/" "language/zh_CN/)\n" "Language: zh_CN\n" @@ -21,230 +21,103 @@ msgstr "" "Plural-Forms: nplurals=1; plural=0;\n" #, python-format -msgid "Loading core plugin: %s" -msgstr "加载核心插件: %s" - -#, python-format -msgid "Service %s is supported by the core plugin" -msgstr "服务%s由核心插件支持" - -#, python-format -msgid "Loading Plugin: %s" -msgstr "正在装入插件:%s" - -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "在被废弃的策略:%(old_policy)s位置上插入策略:%(new_policy)s " - -#, python-format -msgid "Neutron service started, listening on %(host)s:%(port)s" -msgstr "Neutron服务启动,正在%(host)s:%(port)s上监听" +msgid "%(action)s failed (client error): %(exc)s" +msgstr "%(action)s 失败 (客户端错误): %(exc)s" #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "HTTP 异常抛出:%s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s 随HTTP %(status)d返回" - -#, python-format -msgid "%(url)s returned a fault: %(exception)s" -msgstr "%(url)s 返回了故障:%(exception)s" - -msgid "Disabled security-group extension." -msgstr "已禁用安全组扩展。" - -#, python-format -msgid "Preparing filters for devices %s" -msgstr "正在为设备 %s 准备过滤器" - -#, python-format -msgid "Security group rule updated %r" -msgstr "已更新安全组规则 %r" - -#, python-format -msgid "Security group member updated %r" -msgstr "已更新安全组成员 %r" - -msgid "Provider rule updated" -msgstr "已更新提供程序规则" - -#, python-format -msgid "Remove device filter for %r" -msgstr "请为 %r 除去设备过滤器" - -msgid "Refresh firewall rules" -msgstr "请刷新防火墙规则" - -#, python-format -msgid "Port %(port_id)s not present in bridge %(br_name)s" -msgstr "端口 %(port_id)s 在桥 %(br_name)s中不存在" - -msgid "DHCP agent started" -msgstr "已启动 DHCP 代理" - -msgid "Synchronizing state" -msgstr "正在使状态同步" - -msgid "Synchronizing state complete" -msgstr "同步状态完成" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "服务器端 %s 已更新代理!" - -msgid "L3 agent started" -msgstr "已启动 L3 代理" - -#, python-format -msgid "Router %(router_id)s transitioned to %(state)s" -msgstr "路由器%(router_id)s 转换为%(state)s" - -#, python-format -msgid "" -"Router %s is not managed by this agent. It was possibly deleted concurrently." -msgstr "路由器%s没有被改该代理管理。可能已经被删除。" - -#, python-format -msgid "Process runs with uid/gid: %(uid)s/%(gid)s" -msgstr "进程运行uid/gid: %(uid)s/%(gid)s" - -#, python-format -msgid "Device %s already exists" -msgstr "设备 %s 已存在" - -#, python-format -msgid "Attempted to update port filter which is not filtered %s" -msgstr "已尝试更新未过滤的端口过滤器 %s" - -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "已尝试除去未过滤的端口过滤器 %r" - -msgid "Initializing extension manager." -msgstr "正在初始化扩展管理员。" - -#, python-format -msgid "Loaded extension: %s" -msgstr "加载的扩展:%s" - -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "已启用允许排序,因为本机分页需要本机排序" - -#, python-format -msgid "%(action)s failed (client error): %(exc)s" -msgstr "%(action)s 失败 (客户端错误): %(exc)s" - -#, python-format -msgid "Deleting port: %s" -msgstr "正在删除端口: %s" - -msgid "OVS cleanup completed successfully" -msgstr "OVS 清除已成功完成" - -msgid "Agent initialized successfully, now running... " -msgstr "代理已成功初始化,现在正在运行..." - -msgid "Logging enabled!" -msgstr "已启用日志记录!" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "%(plugin_key)s:已忽略具有自变量 %(args)s 的 %(function_name)s" #, python-format msgid "%(prog)s version %(version)s" msgstr "%(prog)s 版本 %(version)s" #, python-format -msgid "Config paste file: %s" -msgstr "配置粘贴文件:%s" - -msgid "IPv6 is not enabled on this system." -msgstr "IPv6在本系统上未使能。" +msgid "%(type)s ID ranges: %(range)s" +msgstr "%(type)s ID 范围: %(range)s" #, python-format -msgid "Adding network %(net)s to agent %(agent)%s on host %(host)s" -msgstr "在主机 %(host)s上添加网络%(net)s到代理%(agent)%s" +msgid "%(url)s returned a fault: %(exception)s" +msgstr "%(url)s 返回了故障:%(exception)s" + +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s 随HTTP %(status)d返回" + +#, python-format +msgid "APIC host agent: agent starting on %s" +msgstr "APIC 主机代理: 代理正启动在 %s" + +#, python-format +msgid "APIC host agent: started on %s" +msgstr "APIC 主机代理: 已启动在 %s" + +msgid "APIC service agent started" +msgstr "APIC 服务代理已启动" + +msgid "APIC service agent starting ..." +msgstr "APIC 服务代理启动中 ..." #, python-format msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"针对 CIDR %(new_cidr)s 的验证失败 - 与子网 %(subnet_id)s(CIDR 为 %(cidr)s)" -"重叠" +"Added segment %(id)s of type %(network_type)s for network %(network_id)s" +msgstr "增添segment%(id)s种类%(network_type)s在网络%(network_id)s" + +msgid "Agent initialized successfully, now running... " +msgstr "代理已成功初始化,现在正在运行..." + +msgid "Agent out of sync with plugin!" +msgstr "代理与插件不同步!" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "代理隧道与插件不同步!" #, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "在池中找到无效 IP 地址:%(start)s - %(end)s:" +msgid "Allocated vlan (%d) from the pool" +msgstr "已从池分配 vlan (%d)" -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "指定的 IP 地址与子网 IP 版本不匹配" +msgid "" +"Allow sorting is enabled because native pagination requires native sorting" +msgstr "已启用允许排序,因为本机分页需要本机排序" #, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "起始 IP (%(start)s) 大于结束 IP (%(end)s)" +msgid "Allowable flat physical_network names: %s" +msgstr "可以使用的平面物理网络名字: %s" + +msgid "Arbitrary flat physical_network names allowed" +msgstr "允许平面物理网络使用任意名字" #, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "找到超过子网 CIDR (%(start)s - %(end)s) 的池" +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "对于网络标识 %(net_uuid)s,正在将 %(vlan_id)s 分配为本地 vlan" #, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "找到重叠范围:%(l_range)s 和 %(r_range)s" +msgid "Attachment %s removed" +msgstr "已除去附件 %s" #, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "正在跳过端口 %s,因为没有在该端口上配置任何 IP" - -msgid "SNAT already bound to a service node." -msgstr "SNAT 已经绑定到服务节点。" +msgid "Attempt %(count)s to bind port %(port)s" +msgstr "尝试 %(count)s 次绑定端口 %(port)s" #, python-format -msgid "Table %(old_t)r was renamed to %(new_t)r" -msgstr "表 %(old_t)r 已经更名为 %(new_t)r" +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "已尝试除去未过滤的端口过滤器 %r" #, python-format -msgid "Nova event response: %s" -msgstr "Nova 事件响应: %s" - -#, python-format -msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" -msgstr "Eventlet为进程 %(pid)d 在后台监听 %(port)s " - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "正在跳过周期性任务 %(task)s,因为其时间间隔为负" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "正在跳过周期性任务 %(task)s,因为它已禁用" +msgid "Attempted to update port filter which is not filtered %s" +msgstr "已尝试更新未过滤的端口过滤器 %s" #, python-format msgid "Caught %s, exiting" msgstr "捕获到 %s,正在退出" -msgid "Parent process has died unexpectedly, exiting" -msgstr "父进程已意外终止,正在退出" - #, python-format -msgid "Child caught %s, exiting" -msgstr "子代捕获 %s,正在退出" - -msgid "Forking too fast, sleeping" -msgstr "派生速度太快,正在休眠" - -#, python-format -msgid "Started child %d" -msgstr "已启动子代 %d" - -#, python-format -msgid "Starting %d workers" -msgstr "正在启动 %d 工作程序" +msgid "Caught %s, stopping children" +msgstr "捕获到 %s,正在停止子代" #, python-format msgid "Child %(pid)d killed by signal %(sig)d" @@ -255,27 +128,295 @@ msgid "Child %(pid)s exited with status %(code)d" msgstr "子代 %(pid)s 已退出,状态为 %(code)d" #, python-format -msgid "Caught %s, stopping children" -msgstr "捕获到 %s,正在停止子代" - -msgid "Wait called after thread killed. Cleaning up." -msgstr "线程结束,正在清理" +msgid "Child caught %s, exiting" +msgstr "子代捕获 %s,正在退出" #, python-format -msgid "Waiting on %d children to exit" -msgstr "正在等待 %d 个子代退出" +msgid "Config paste file: %s" +msgstr "配置粘贴文件:%s" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "已从池分配 vlan (%d)" +msgid "Configuration for device %s completed." +msgstr "设备 %s 的配置已完成。" + +#, python-format +msgid "Configured mechanism driver names: %s" +msgstr "配置装置驱动名称: %s" + +#, python-format +msgid "Configured type driver names: %s" +msgstr "配置类型驱动名字: %s" + +#, python-format +msgid "Controller IPs: %s" +msgstr "控制器IP: %s" + +msgid "DHCP agent started" +msgstr "已启动 DHCP 代理" + +#, python-format +msgid "Deleting port: %s" +msgstr "正在删除端口: %s" + +#, python-format +msgid "Device %s already exists" +msgstr "设备 %s 已存在" + +#, python-format +msgid "Device %s not defined on plugin" +msgstr "未在插件上定义设备 %s" + +msgid "Disabled security-group extension." +msgstr "已禁用安全组扩展。" + +#, python-format +msgid "Eventlet backdoor listening on %(port)s for process %(pid)d" +msgstr "Eventlet为进程 %(pid)d 在后台监听 %(port)s " + +msgid "Forking too fast, sleeping" +msgstr "派生速度太快,正在休眠" + +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "在池中找到无效 IP 地址:%(start)s - %(end)s:" + +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "找到重叠范围:%(l_range)s 和 %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "找到超过子网 CIDR (%(start)s - %(end)s) 的池" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "HTTP 异常抛出:%s" + +msgid "IPv6 is not enabled on this system." +msgstr "IPv6在本系统上未使能。" + +msgid "Initializing CRD client... " +msgstr "正在初始化CRD客户端 ..." + +#, python-format +msgid "Initializing driver for type '%s'" +msgstr "为类型 '%s'初始化驱动" + +#, python-format +msgid "Initializing extension driver '%s'" +msgstr "初始化扩展驱动 '%s'" + +msgid "Initializing extension manager." +msgstr "正在初始化扩展管理员。" + +#, python-format +msgid "" +"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" +msgstr "在被废弃的策略:%(old_policy)s位置上插入策略:%(new_policy)s " + +#, python-format +msgid "Interface mappings: %s" +msgstr "接口映射:%s" + +msgid "L3 agent started" +msgstr "已启动 L3 代理" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "LinuxBridge 代理 RPC 守护程序已启动!" + +#, python-format +msgid "Loaded extension: %s" +msgstr "加载的扩展:%s" + +#, python-format +msgid "Loaded mechanism driver names: %s" +msgstr "已加载的装置驱动名称: %s" + +#, python-format +msgid "Loaded type driver names: %s" +msgstr "已加载驱动程序: %s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "正在装入插件:%s" + +#, python-format +msgid "Loading core plugin: %s" +msgstr "加载核心插件: %s" + +#, python-format +msgid "Loading interface driver %s" +msgstr "正在加载接口驱动 %s" + +msgid "Logging enabled!" +msgstr "已启用日志记录!" + +#, python-format +msgid "" +"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" +msgstr "循环迭代超过时间间隔(%(polling_interval)s 对 %(elapsed)s)!" + +msgid "ML2 FlatTypeDriver initialization complete" +msgstr "完成ML2 FlatTypeDriver的初始化" + +msgid "ML2 LocalTypeDriver initialization complete" +msgstr "完成L2插件模块初始化" + +#, python-format +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "正在将物理网络 %(physical_network)s 映射至网桥 %(bridge)s" + +msgid "Modular L2 Plugin initialization complete" +msgstr "L2插件模块初始化完成" + +#, python-format +msgid "Network VLAN ranges: %s" +msgstr "网络 VLAN 范围:%s" + +#, python-format +msgid "Network name changed to %s" +msgstr "网络名改变为 %s" + +#, python-format +msgid "Neutron service started, listening on %(host)s:%(port)s" +msgstr "Neutron服务启动,正在%(host)s:%(port)s上监听" #, python-format msgid "No %s Plugin loaded" msgstr "未装入任何 %s 插件" #, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s:已忽略具有自变量 %(args)s 的 %(function_name)s" +msgid "Nova event response: %s" +msgstr "Nova 事件响应: %s" + +msgid "OVS cleanup completed successfully" +msgstr "OVS 清除已成功完成" + +msgid "Parent process has died unexpectedly, exiting" +msgstr "父进程已意外终止,正在退出" + +#, python-format +msgid "Physical Devices mappings: %s" +msgstr "物理设备映射:%s" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "端口 %(device)s 已更新。详细信息:%(details)s" + +#, python-format +msgid "Port %(port_id)s not present in bridge %(br_name)s" +msgstr "端口 %(port_id)s 在桥 %(br_name)s中不存在" + +#, python-format +msgid "Port %s updated." +msgstr "端口 %s 已更新。" + +#, python-format +msgid "Port %s was deleted concurrently" +msgstr "端口 %s 被同时删除" + +#, python-format +msgid "Port name changed to %s" +msgstr "端口名改变为 %s" + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "正在为设备 %s 准备过滤器" + +#, python-format +msgid "Process runs with uid/gid: %(uid)s/%(gid)s" +msgstr "进程运行uid/gid: %(uid)s/%(gid)s" + +msgid "Provider rule updated" +msgstr "已更新提供程序规则" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "RPC agent_id:%s" + +#, python-format +msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" +msgstr "正在从网络标识 %(net_uuid)s 恢复 vlan %(vlan_id)s" + +msgid "Refresh firewall rules" +msgstr "请刷新防火墙规则" + +#, python-format +msgid "Registered types: %s" +msgstr "已注册类型: %s" + +#, python-format +msgid "Remove device filter for %r" +msgstr "请为 %r 除去设备过滤器" + +#, python-format +msgid "Router %(router_id)s transitioned to %(state)s" +msgstr "路由器%(router_id)s 转换为%(state)s" + +#, python-format +msgid "" +"Router %s is not managed by this agent. It was possibly deleted concurrently." +msgstr "路由器%s没有被改该代理管理。可能已经被删除。" + +msgid "SNAT already bound to a service node." +msgstr "SNAT 已经绑定到服务节点。" + +#, python-format +msgid "Security group member updated %r" +msgstr "已更新安全组成员 %r" + +#, python-format +msgid "Security group rule updated %r" +msgstr "已更新安全组规则 %r" + +#, python-format +msgid "Service %s is supported by the core plugin" +msgstr "服务%s由核心插件支持" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "正在跳过周期性任务 %(task)s,因为它已禁用" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "正在跳过周期性任务 %(task)s,因为其时间间隔为负" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "正在跳过端口 %s,因为没有在该端口上配置任何 IP" + +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "指定的 IP 地址与子网 IP 版本不匹配" + +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "起始 IP (%(start)s) 大于结束 IP (%(end)s)" + +#, python-format +msgid "Started child %d" +msgstr "已启动子代 %d" + +#, python-format +msgid "Starting %d workers" +msgstr "正在启动 %d 工作程序" + +#, python-format +msgid "Subnet %s was deleted concurrently" +msgstr "子网 %s 同时被删除 " + +msgid "Synchronizing state" +msgstr "正在使状态同步" + +msgid "Synchronizing state complete" +msgstr "同步状态完成" + +#, python-format +msgid "Table %(old_t)r was renamed to %(new_t)r" +msgstr "表 %(old_t)r 已经更名为 %(new_t)r" + +#, python-format +msgid "Tenant network_types: %s" +msgstr "项目网络类型: %s" #, python-format msgid "The IP addr of available SDN-VE controllers: %s" @@ -287,167 +428,22 @@ msgstr "SDN-VE 控制器 IP 地址: %s" #, python-format msgid "" -"Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" -msgstr "循环迭代超过时间间隔(%(polling_interval)s 对 %(elapsed)s)!" - -#, python-format -msgid "Controller IPs: %s" -msgstr "控制器IP: %s" - -#, python-format -msgid "RPC agent_id: %s" -msgstr "RPC agent_id:%s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "端口 %(device)s 已更新。详细信息:%(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "未在插件上定义设备 %s" - -#, python-format -msgid "Attachment %s removed" -msgstr "已除去附件 %s" - -#, python-format -msgid "Port %s updated." -msgstr "端口 %s 已更新。" - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "LinuxBridge 代理 RPC 守护程序已启动!" - -msgid "Agent out of sync with plugin!" -msgstr "代理与插件不同步!" - -#, python-format -msgid "Interface mappings: %s" -msgstr "接口映射:%s" - -#, python-format -msgid "" -"Added segment %(id)s of type %(network_type)s for network %(network_id)s" -msgstr "增添segment%(id)s种类%(network_type)s在网络%(network_id)s" - -#, python-format -msgid "Configured type driver names: %s" -msgstr "配置类型驱动名字: %s" - -#, python-format -msgid "Loaded type driver names: %s" -msgstr "已加载驱动程序: %s" - -#, python-format -msgid "Registered types: %s" -msgstr "已注册类型: %s" - -#, python-format -msgid "Tenant network_types: %s" -msgstr "项目网络类型: %s" - -#, python-format -msgid "Initializing driver for type '%s'" -msgstr "为类型 '%s'初始化驱动" - -#, python-format -msgid "Configured mechanism driver names: %s" -msgstr "配置装置驱动名称: %s" - -#, python-format -msgid "Loaded mechanism driver names: %s" -msgstr "已加载的装置驱动名称: %s" - -#, python-format -msgid "Initializing extension driver '%s'" -msgstr "初始化扩展驱动 '%s'" - -msgid "Modular L2 Plugin initialization complete" -msgstr "L2插件模块初始化完成" - -#, python-format -msgid "Attempt %(count)s to bind port %(port)s" -msgstr "尝试 %(count)s 次绑定端口 %(port)s" - -#, python-format -msgid "Port %s was deleted concurrently" -msgstr "端口 %s 被同时删除" - -#, python-format -msgid "Subnet %s was deleted concurrently" -msgstr "子网 %s 同时被删除 " - -msgid "Arbitrary flat physical_network names allowed" -msgstr "允许平面物理网络使用任意名字" - -#, python-format -msgid "Allowable flat physical_network names: %s" -msgstr "可以使用的平面物理网络名字: %s" - -msgid "ML2 FlatTypeDriver initialization complete" -msgstr "完成ML2 FlatTypeDriver的初始化" - -msgid "ML2 LocalTypeDriver initialization complete" -msgstr "完成L2插件模块初始化" - -#, python-format -msgid "%(type)s ID ranges: %(range)s" -msgstr "%(type)s ID 范围: %(range)s" - -#, python-format -msgid "Network VLAN ranges: %s" -msgstr "网络 VLAN 范围:%s" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"针对 CIDR %(new_cidr)s 的验证失败 - 与子网 %(subnet_id)s(CIDR 为 %(cidr)s)" +"重叠" msgid "VlanTypeDriver initialization complete" msgstr "Vlan类型驱动初始化完成" -#, python-format -msgid "Network name changed to %s" -msgstr "网络名改变为 %s" +msgid "Wait called after thread killed. Cleaning up." +msgstr "线程结束,正在清理" #, python-format -msgid "Port name changed to %s" -msgstr "端口名改变为 %s" - -msgid "APIC service agent starting ..." -msgstr "APIC 服务代理启动中 ..." - -msgid "APIC service agent started" -msgstr "APIC 服务代理已启动" +msgid "Waiting on %d children to exit" +msgstr "正在等待 %d 个子代退出" #, python-format -msgid "APIC host agent: agent starting on %s" -msgstr "APIC 主机代理: 代理正启动在 %s" - -#, python-format -msgid "APIC host agent: started on %s" -msgstr "APIC 主机代理: 已启动在 %s" - -msgid "Initializing CRD client... " -msgstr "正在初始化CRD客户端 ..." - -#, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "对于网络标识 %(net_uuid)s,正在将 %(vlan_id)s 分配为本地 vlan" - -#, python-format -msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" -msgstr "正在从网络标识 %(net_uuid)s 恢复 vlan %(vlan_id)s" - -#, python-format -msgid "Configuration for device %s completed." -msgstr "设备 %s 的配置已完成。" - -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "正在将物理网络 %(physical_network)s 映射至网桥 %(bridge)s" - -msgid "Agent tunnel out of sync with plugin!" -msgstr "代理隧道与插件不同步!" - -#, python-format -msgid "Physical Devices mappings: %s" -msgstr "物理设备映射:%s" - -#, python-format -msgid "Loading interface driver %s" -msgstr "正在加载接口驱动 %s" +msgid "agent_updated by server side %s!" +msgstr "服务器端 %s 已更新代理!" diff --git a/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po b/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po index c57e39aef2f..0dc0e89de9c 100644 --- a/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/zh_TW/LC_MESSAGES/neutron-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-04 06:08+0000\n" -"PO-Revision-Date: 2015-04-28 07:35+0000\n" +"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"PO-Revision-Date: 2015-05-28 20:54+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (Taiwan) (http://www.transifex.com/projects/p/neutron/" "language/zh_TW/)\n" @@ -19,155 +19,62 @@ msgstr "" "Generated-By: Babel 1.3\n" "Plural-Forms: nplurals=1; plural=0;\n" -#, python-format -msgid "Loading Plugin: %s" -msgstr "正在載入外掛程式:%s" - #, python-format msgid "%(method)s %(url)s" msgstr "%(method)s %(url)s" #, python-format -msgid "HTTP exception thrown: %s" -msgstr "已擲出 HTTP 異常狀況:%s" - -#, python-format -msgid "%(url)s returned with HTTP %(status)d" -msgstr "%(url)s 傳回了 HTTP %(status)d" +msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" +msgstr "%(plugin_key)s:已忽略帶有引數 %(args)s 的 %(function_name)s" #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s 傳回了錯誤:%(exception)s" -msgid "Disabled security-group extension." -msgstr "已停用安全群組延伸。" +#, python-format +msgid "%(url)s returned with HTTP %(status)d" +msgstr "%(url)s 傳回了 HTTP %(status)d" + +msgid "Agent initialized successfully, now running... " +msgstr "已順利地起始設定代理程式,現正在執行中..." + +msgid "Agent out of sync with plugin!" +msgstr "代理程式與外掛程式不同步!" + +msgid "Agent tunnel out of sync with plugin!" +msgstr "代理程式通道與外掛程式不同步!" #, python-format -msgid "Preparing filters for devices %s" -msgstr "正在準備裝置 %s 的過濾器" +msgid "Allocated vlan (%d) from the pool" +msgstr "已從儲存區配置 VLAN (%d)" + +msgid "" +"Allow sorting is enabled because native pagination requires native sorting" +msgstr "已啟用容許排序,因為原生分頁需要原生排序" #, python-format -msgid "Security group rule updated %r" -msgstr "安全群組規則已更新 %r" +msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" +msgstr "正在將 %(vlan_id)s 指派為 net-id = %(net_uuid)s 的本端 VLAN" #, python-format -msgid "Security group member updated %r" -msgstr "安全群組成員已更新 %r" - -msgid "Provider rule updated" -msgstr "已更新提供者規則" +msgid "Attachment %s removed" +msgstr "已移除連接裝置 %s" #, python-format -msgid "Remove device filter for %r" -msgstr "移除 %r 的裝置過濾器" - -msgid "Refresh firewall rules" -msgstr "重新整理防火牆規則" - -msgid "DHCP agent started" -msgstr "已啟動 DHCP 代理程式" - -msgid "Synchronizing state" -msgstr "正在同步化狀態" - -#, python-format -msgid "agent_updated by server side %s!" -msgstr "agent_updated 是由伺服器端 %s 執行!" - -msgid "L3 agent started" -msgstr "已啟動 L3 代理程式" - -#, python-format -msgid "Device %s already exists" -msgstr "裝置 %s 已存在" +msgid "Attempted to remove port filter which is not filtered %r" +msgstr "已嘗試移除未過濾的埠過濾器 %r" #, python-format msgid "Attempted to update port filter which is not filtered %s" msgstr "已嘗試更新未過濾的埠過濾器 %s" -#, python-format -msgid "Attempted to remove port filter which is not filtered %r" -msgstr "已嘗試移除未過濾的埠過濾器 %r" - -msgid "Initializing extension manager." -msgstr "正在起始設定延伸管理程式。" - -#, python-format -msgid "Loaded extension: %s" -msgstr "已載入延伸:%s" - -msgid "" -"Allow sorting is enabled because native pagination requires native sorting" -msgstr "已啟用容許排序,因為原生分頁需要原生排序" - -msgid "OVS cleanup completed successfully" -msgstr "已順利完成 OVS 清理" - -msgid "Agent initialized successfully, now running... " -msgstr "已順利地起始設定代理程式,現正在執行中..." - -msgid "Logging enabled!" -msgstr "已啟用記載!" - -#, python-format -msgid "Config paste file: %s" -msgstr "配置貼上檔案:%s" - -#, python-format -msgid "" -"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " -"%(subnet_id)s (CIDR: %(cidr)s)" -msgstr "" -"驗證 CIDR %(new_cidr)s 失敗 - 與子網路 %(subnet_id)s (CIDR %(cidr)s) 重疊" - -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "在儲存區中發現無效的 IP 位址:%(start)s - %(end)s:" - -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "指定的 IP 位址與子網路 IP 版本不符" - -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "起始 IP (%(start)s) 大於結尾 IP (%(end)s)" - -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "找到的儲存區大於子網路 CIDR:%(start)s - %(end)s" - -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "發現重疊的範圍:%(l_range)s 及 %(r_range)s" - -#, python-format -msgid "Skipping port %s as no IP is configure on it" -msgstr "正在跳過埠 %s,因為其上沒有配置 IP" - -#, python-format -msgid "Skipping periodic task %(task)s because its interval is negative" -msgstr "正在跳過定期作業 %(task)s,因為其間隔為負數" - -#, python-format -msgid "Skipping periodic task %(task)s because it is disabled" -msgstr "正在跳過定期作業 %(task)s,因為它已停用" - #, python-format msgid "Caught %s, exiting" msgstr "已捕捉到 %s,正在結束" -msgid "Parent process has died unexpectedly, exiting" -msgstr "母程序已非預期地當掉,正在結束" - -msgid "Forking too fast, sleeping" -msgstr "分岔太快,正在休眠" - #, python-format -msgid "Started child %d" -msgstr "已開始子行程 %d" - -#, python-format -msgid "Starting %d workers" -msgstr "正在啟動 %d 個工作程式" +msgid "Caught %s, stopping children" +msgstr "已捕捉到 %s,正在停止子項" #, python-format msgid "Child %(pid)d killed by signal %(sig)d" @@ -178,24 +85,65 @@ msgid "Child %(pid)s exited with status %(code)d" msgstr "子項 %(pid)s 已結束,狀態為 %(code)d" #, python-format -msgid "Caught %s, stopping children" -msgstr "已捕捉到 %s,正在停止子項" +msgid "Config paste file: %s" +msgstr "配置貼上檔案:%s" + +msgid "DHCP agent started" +msgstr "已啟動 DHCP 代理程式" #, python-format -msgid "Waiting on %d children to exit" -msgstr "正在等待 %d 個子項結束" +msgid "Device %s already exists" +msgstr "裝置 %s 已存在" #, python-format -msgid "Allocated vlan (%d) from the pool" -msgstr "已從儲存區配置 VLAN (%d)" +msgid "Device %s not defined on plugin" +msgstr "外掛程式上未定義裝置 %s" + +msgid "Disabled security-group extension." +msgstr "已停用安全群組延伸。" + +msgid "Forking too fast, sleeping" +msgstr "分岔太快,正在休眠" #, python-format -msgid "No %s Plugin loaded" -msgstr "未載入 %s 外掛程式" +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "在儲存區中發現無效的 IP 位址:%(start)s - %(end)s:" #, python-format -msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" -msgstr "%(plugin_key)s:已忽略帶有引數 %(args)s 的 %(function_name)s" +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "發現重疊的範圍:%(l_range)s 及 %(r_range)s" + +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "找到的儲存區大於子網路 CIDR:%(start)s - %(end)s" + +#, python-format +msgid "HTTP exception thrown: %s" +msgstr "已擲出 HTTP 異常狀況:%s" + +msgid "Initializing extension manager." +msgstr "正在起始設定延伸管理程式。" + +#, python-format +msgid "Interface mappings: %s" +msgstr "介面對映:%s" + +msgid "L3 agent started" +msgstr "已啟動 L3 代理程式" + +msgid "LinuxBridge Agent RPC Daemon Started!" +msgstr "已啟動「LinuxBridge 代理程式 RPC 常駐程式」!" + +#, python-format +msgid "Loaded extension: %s" +msgstr "已載入延伸:%s" + +#, python-format +msgid "Loading Plugin: %s" +msgstr "正在載入外掛程式:%s" + +msgid "Logging enabled!" +msgstr "已啟用記載!" #, python-format msgid "" @@ -203,50 +151,102 @@ msgid "" msgstr "迴圈反覆運算已超出間隔(%(polling_interval)s 與 %(elapsed)s)!" #, python-format -msgid "RPC agent_id: %s" -msgstr "RPC agent_id:%s" - -#, python-format -msgid "Port %(device)s updated. Details: %(details)s" -msgstr "已更新埠 %(device)s。詳細資料:%(details)s" - -#, python-format -msgid "Device %s not defined on plugin" -msgstr "外掛程式上未定義裝置 %s" - -#, python-format -msgid "Attachment %s removed" -msgstr "已移除連接裝置 %s" - -#, python-format -msgid "Port %s updated." -msgstr "已更新埠 %s。" - -msgid "LinuxBridge Agent RPC Daemon Started!" -msgstr "已啟動「LinuxBridge 代理程式 RPC 常駐程式」!" - -msgid "Agent out of sync with plugin!" -msgstr "代理程式與外掛程式不同步!" - -#, python-format -msgid "Interface mappings: %s" -msgstr "介面對映:%s" +msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" +msgstr "正在將實體網路 %(physical_network)s 對映到橋接器 %(bridge)s" #, python-format msgid "Network VLAN ranges: %s" msgstr "網路 VLAN 範圍:%s" #, python-format -msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" -msgstr "正在將 %(vlan_id)s 指派為 net-id = %(net_uuid)s 的本端 VLAN" +msgid "No %s Plugin loaded" +msgstr "未載入 %s 外掛程式" + +msgid "OVS cleanup completed successfully" +msgstr "已順利完成 OVS 清理" + +msgid "Parent process has died unexpectedly, exiting" +msgstr "母程序已非預期地當掉,正在結束" + +#, python-format +msgid "Port %(device)s updated. Details: %(details)s" +msgstr "已更新埠 %(device)s。詳細資料:%(details)s" + +#, python-format +msgid "Port %s updated." +msgstr "已更新埠 %s。" + +#, python-format +msgid "Preparing filters for devices %s" +msgstr "正在準備裝置 %s 的過濾器" + +msgid "Provider rule updated" +msgstr "已更新提供者規則" + +#, python-format +msgid "RPC agent_id: %s" +msgstr "RPC agent_id:%s" #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "正在從 net-id = %(net_uuid)s 收回 VLAN = %(vlan_id)s" -#, python-format -msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" -msgstr "正在將實體網路 %(physical_network)s 對映到橋接器 %(bridge)s" +msgid "Refresh firewall rules" +msgstr "重新整理防火牆規則" -msgid "Agent tunnel out of sync with plugin!" -msgstr "代理程式通道與外掛程式不同步!" +#, python-format +msgid "Remove device filter for %r" +msgstr "移除 %r 的裝置過濾器" + +#, python-format +msgid "Security group member updated %r" +msgstr "安全群組成員已更新 %r" + +#, python-format +msgid "Security group rule updated %r" +msgstr "安全群組規則已更新 %r" + +#, python-format +msgid "Skipping periodic task %(task)s because it is disabled" +msgstr "正在跳過定期作業 %(task)s,因為它已停用" + +#, python-format +msgid "Skipping periodic task %(task)s because its interval is negative" +msgstr "正在跳過定期作業 %(task)s,因為其間隔為負數" + +#, python-format +msgid "Skipping port %s as no IP is configure on it" +msgstr "正在跳過埠 %s,因為其上沒有配置 IP" + +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "指定的 IP 位址與子網路 IP 版本不符" + +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "起始 IP (%(start)s) 大於結尾 IP (%(end)s)" + +#, python-format +msgid "Started child %d" +msgstr "已開始子行程 %d" + +#, python-format +msgid "Starting %d workers" +msgstr "正在啟動 %d 個工作程式" + +msgid "Synchronizing state" +msgstr "正在同步化狀態" + +#, python-format +msgid "" +"Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " +"%(subnet_id)s (CIDR: %(cidr)s)" +msgstr "" +"驗證 CIDR %(new_cidr)s 失敗 - 與子網路 %(subnet_id)s (CIDR %(cidr)s) 重疊" + +#, python-format +msgid "Waiting on %d children to exit" +msgstr "正在等待 %d 個子項結束" + +#, python-format +msgid "agent_updated by server side %s!" +msgstr "agent_updated 是由伺服器端 %s 執行!" From b058658780f0ccb2787c26e3e95cabcc8e2e6349 Mon Sep 17 00:00:00 2001 From: Oleg Bondarev Date: Thu, 14 May 2015 15:09:24 +0300 Subject: [PATCH 078/292] Cleanup stale metadata processes on l3 agent sync Currently l3 agent only cleans up stale namespaces. The fix adds checking and deleting stale metadata processes to NamespaceManager class responsible for clearing stale namespaces Change-Id: I2b081803e312589d3d8a7808d286a6c9827ef53f Closes-Bug: #1455042 --- neutron/agent/l3/agent.py | 10 ++-- neutron/agent/l3/namespace_manager.py | 13 +++++- .../agent/l3/test_namespace_manager.py | 11 ++++- .../tests/functional/agent/test_l3_agent.py | 46 ++++++++----------- neutron/tests/unit/agent/l3/test_agent.py | 20 ++++++++ 5 files changed, 67 insertions(+), 33 deletions(-) diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index 01395d34ccf..9959fddc113 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -208,10 +208,15 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, continue break + self.metadata_driver = None + if self.conf.enable_metadata_proxy: + self.metadata_driver = metadata_driver.MetadataDriver(self) + self.namespaces_manager = namespace_manager.NamespaceManager( self.conf, self.driver, - self.conf.use_namespaces) + self.conf.use_namespaces, + self.metadata_driver) self._queue = queue.RouterProcessingQueue() super(L3NATAgent, self).__init__(conf=self.conf) @@ -219,9 +224,6 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, self.target_ex_net_id = None self.use_ipv6 = ipv6_utils.is_enabled() - if self.conf.enable_metadata_proxy: - self.metadata_driver = metadata_driver.MetadataDriver(self) - def _check_config_params(self): """Check items in configuration files. diff --git a/neutron/agent/l3/namespace_manager.py b/neutron/agent/l3/namespace_manager.py index 2f35b86ba6c..e7d029fcdca 100644 --- a/neutron/agent/l3/namespace_manager.py +++ b/neutron/agent/l3/namespace_manager.py @@ -14,6 +14,7 @@ from oslo_log import log as logging from neutron.agent.l3 import dvr_snat_ns from neutron.agent.l3 import namespaces +from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib from neutron.i18n import _LE @@ -41,16 +42,22 @@ class NamespaceManager(object): agent restarts gracefully. """ - def __init__(self, agent_conf, driver, clean_stale): + def __init__(self, agent_conf, driver, clean_stale, metadata_driver=None): """Initialize the NamespaceManager. :param agent_conf: configuration from l3 agent :param driver: to perform operations on devices :param clean_stale: Whether to try to clean stale namespaces + :param metadata_driver: used to cleanup stale metadata proxy processes """ self.agent_conf = agent_conf self.driver = driver self._clean_stale = clean_stale + self.metadata_driver = metadata_driver + if metadata_driver: + self.process_monitor = external_process.ProcessMonitor( + config=agent_conf, + resource_type='router') def __enter__(self): self._all_namespaces = set() @@ -85,6 +92,10 @@ class NamespaceManager(object): self.driver, use_ipv6=False) try: + if self.metadata_driver: + # cleanup stale metadata proxy processes first + self.metadata_driver.destroy_monitored_metadata_proxy( + self.process_monitor, ns_id, self.agent_conf) ns.delete() except RuntimeError: LOG.exception(_LE('Failed to destroy stale namespace %s'), ns) diff --git a/neutron/tests/functional/agent/l3/test_namespace_manager.py b/neutron/tests/functional/agent/l3/test_namespace_manager.py index 51922e6ff17..69a571858cb 100755 --- a/neutron/tests/functional/agent/l3/test_namespace_manager.py +++ b/neutron/tests/functional/agent/l3/test_namespace_manager.py @@ -31,8 +31,10 @@ class NamespaceManagerTestFramework(base.BaseSudoTestCase): super(NamespaceManagerTestFramework, self).setUp() self.agent_conf = mock.MagicMock() self.agent_conf.router_delete_namespaces = True + self.metadata_driver_mock = mock.Mock() self.namespace_manager = namespace_manager.NamespaceManager( - self.agent_conf, driver=None, clean_stale=True) + self.agent_conf, driver=None, clean_stale=True, + metadata_driver=self.metadata_driver_mock) def _create_namespace(self, router_id, ns_class): namespace = ns_class(router_id, self.agent_conf, driver=None, @@ -59,6 +61,7 @@ class NamespaceManagerTestCase(NamespaceManagerTestFramework): def test_namespace_manager(self): router_id = _uuid() + router_id_to_delete = _uuid() to_keep = set() to_delete = set() to_retrieve = set() @@ -66,7 +69,7 @@ class NamespaceManagerTestCase(NamespaceManagerTestFramework): namespaces.RouterNamespace)) to_keep.add(self._create_namespace(router_id, dvr_snat_ns.SnatNamespace)) - to_delete.add(self._create_namespace(_uuid(), + to_delete.add(self._create_namespace(router_id_to_delete, dvr_snat_ns.SnatNamespace)) to_retrieve = to_keep | to_delete @@ -80,4 +83,8 @@ class NamespaceManagerTestCase(NamespaceManagerTestFramework): for ns_name in to_keep: self.assertTrue(self._namespace_exists(ns_name)) for ns_name in to_delete: + (self.metadata_driver_mock.destroy_monitored_metadata_proxy. + assert_called_once_with(mock.ANY, + router_id_to_delete, + self.agent_conf)) self.assertFalse(self._namespace_exists(ns_name)) diff --git a/neutron/tests/functional/agent/test_l3_agent.py b/neutron/tests/functional/agent/test_l3_agent.py index 98e5d661e11..c979c03b666 100644 --- a/neutron/tests/functional/agent/test_l3_agent.py +++ b/neutron/tests/functional/agent/test_l3_agent.py @@ -63,7 +63,8 @@ def get_ovs_bridge(br_name): class L3AgentTestFramework(base.BaseSudoTestCase): def setUp(self): super(L3AgentTestFramework, self).setUp() - mock.patch('neutron.agent.l3.agent.L3PluginApi').start() + self.mock_plugin_api = mock.patch( + 'neutron.agent.l3.agent.L3PluginApi').start().return_value mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() self.agent = self._configure_agent('agent1') @@ -500,23 +501,23 @@ class L3AgentTestCase(L3AgentTestFramework): routers_to_keep = [] routers_to_delete = [] ns_names_to_retrieve = set() + routers_info_to_delete = [] for i in range(2): routers_to_keep.append(self.generate_router_info(False)) - self.manage_router(self.agent, routers_to_keep[i]) - ns_names_to_retrieve.add(namespaces.NS_PREFIX + - routers_to_keep[i]['id']) + ri = self.manage_router(self.agent, routers_to_keep[i]) + ns_names_to_retrieve.add(ri.ns_name) for i in range(2): routers_to_delete.append(self.generate_router_info(False)) - self.manage_router(self.agent, routers_to_delete[i]) - ns_names_to_retrieve.add(namespaces.NS_PREFIX + - routers_to_delete[i]['id']) + ri = self.manage_router(self.agent, routers_to_delete[i]) + routers_info_to_delete.append(ri) + ns_names_to_retrieve.add(ri.ns_name) # Mock the plugin RPC API to Simulate a situation where the agent # was handling the 4 routers created above, it went down and after # starting up again, two of the routers were deleted via the API - mocked_get_routers = ( - neutron_l3_agent.L3PluginApi.return_value.get_routers) - mocked_get_routers.return_value = routers_to_keep + self.mock_plugin_api.get_routers.return_value = routers_to_keep + # also clear agent router_info as it will be after restart + self.agent.router_info = {} # Synchonize the agent with the plug-in with mock.patch.object(namespace_manager.NamespaceManager, 'list_all', @@ -526,9 +527,8 @@ class L3AgentTestCase(L3AgentTestFramework): # Mock the plugin RPC API so a known external network id is returned # when the router updates are processed by the agent external_network_id = _uuid() - mocked_get_external_network_id = ( - neutron_l3_agent.L3PluginApi.return_value.get_external_network_id) - mocked_get_external_network_id.return_value = external_network_id + self.mock_plugin_api.get_external_network_id.return_value = ( + external_network_id) # Plug external_gateway_info in the routers that are not going to be # deleted by the agent when it processes the updates. Otherwise, @@ -539,7 +539,7 @@ class L3AgentTestCase(L3AgentTestFramework): # Have the agent process the update from the plug-in and verify # expected behavior - for _ in routers_to_keep + routers_to_delete: + for _ in routers_to_keep: self.agent._process_router_update() for i in range(2): @@ -547,10 +547,9 @@ class L3AgentTestCase(L3AgentTestFramework): self.assertTrue(self._namespace_exists(namespaces.NS_PREFIX + routers_to_keep[i]['id'])) for i in range(2): - self.assertNotIn(routers_to_delete[i]['id'], + self.assertNotIn(routers_info_to_delete[i].router_id, self.agent.router_info) - self.assertFalse(self._namespace_exists( - namespaces.NS_PREFIX + routers_to_delete[i]['id'])) + self._assert_router_does_not_exist(routers_info_to_delete[i]) def _router_lifecycle(self, enable_ha, ip_version=4, dual_stack=False, v6_ext_gw_with_sub=True): @@ -948,9 +947,7 @@ class TestDvrRouter(L3AgentTestFramework): self, agent_mode, **dvr_router_kwargs): self.agent.conf.agent_mode = agent_mode router_info = self.generate_dvr_router_info(**dvr_router_kwargs) - mocked_ext_net_id = ( - neutron_l3_agent.L3PluginApi.return_value.get_external_network_id) - mocked_ext_net_id.return_value = ( + self.mock_plugin_api.get_external_network_id.return_value = ( router_info['_floatingips'][0]['floating_network_id']) router = self.manage_router(self.agent, router_info) fip_ns = router.fip_ns.get_name() @@ -1010,15 +1007,12 @@ class TestDvrRouter(L3AgentTestFramework): # gateway_port information before the l3_agent will create it. # The port returned needs to have the same information as # router_info['gw_port'] - mocked_gw_port = ( - neutron_l3_agent.L3PluginApi.return_value.get_agent_gateway_port) - mocked_gw_port.return_value = router_info['gw_port'] + self.mock_plugin_api.get_agent_gateway_port.return_value = router_info[ + 'gw_port'] # We also need to mock the get_external_network_id method to # get the correct fip namespace. - mocked_ext_net_id = ( - neutron_l3_agent.L3PluginApi.return_value.get_external_network_id) - mocked_ext_net_id.return_value = ( + self.mock_plugin_api.get_external_network_id.return_value = ( router_info['_floatingips'][0]['floating_network_id']) # With all that set we can now ask the l3_agent to diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index e628cd465c7..885e21bf1b8 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -426,6 +426,26 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): agent.periodic_sync_routers_task(agent.context) self.assertFalse(agent.namespaces_manager._clean_stale) + def test_periodic_sync_routers_task_call_clean_stale_meta_proxies(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + stale_router_ids = [_uuid(), _uuid()] + active_routers = [{'id': _uuid()}, {'id': _uuid()}] + self.plugin_api.get_routers.return_value = active_routers + namespace_list = [namespaces.NS_PREFIX + r_id + for r_id in stale_router_ids] + namespace_list += [namespaces.NS_PREFIX + r['id'] + for r in active_routers] + self.mock_ip.get_namespaces.return_value = namespace_list + driver = metadata_driver.MetadataDriver + with mock.patch.object( + driver, 'destroy_monitored_metadata_proxy') as destroy_proxy: + agent.periodic_sync_routers_task(agent.context) + + expected_calls = [mock.call(mock.ANY, r_id, agent.conf) + for r_id in stale_router_ids] + self.assertEqual(len(stale_router_ids), destroy_proxy.call_count) + destroy_proxy.assert_has_calls(expected_calls, any_order=True) + def test_router_info_create(self): id = _uuid() ri = l3router.RouterInfo(id, {}, **self.ri_kwargs) From 359b7c971a88f6dff64e8e4d558210a880f3ee0f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 7 May 2015 14:59:38 +1000 Subject: [PATCH 079/292] Ensure netfilter is enabled for bridges Since security-groups use iptables rules on Linux bridges, we need to ensure that netfilter is enabled for bridges. Unfortunately, there seems to be a long history of distributions having differing defaults for this, best described in [1]. It seems at the moment everyone has to discover this for themselves; packstack found it in Ia8c86dcb31810a8d6b133a161388604fde9bead4, then fuel found the same thing in I8582c24706c3a7253e00569eef275f116d765bca and then finally someone else hit it and put it into documentation with I4ed3cec03a1b3a7d56dfe18394154ec1b2db6791. I just spent a long time figuring it out too when deploying with devstack. Rather than having yet another fix in devstack, I don't see why neutron shouldn't be ensuring the setting is correct when it starts up -- without these settings enabled, security-groups are silently broken. This does that, and modifies test-cases to check we make the calls. [1] http://wiki.libvirt.org/page/Net.bridge-nf-call_and_sysctl.conf Change-Id: If2d316eb8c422dc1e4f34b17a50b93dd72993a99 --- neutron/agent/linux/iptables_firewall.py | 29 ++++++++++++++++++- .../unit/agent/test_securitygroups_rpc.py | 10 +++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py index 840fba7f6f7..5cea8e39bab 100644 --- a/neutron/agent/linux/iptables_firewall.py +++ b/neutron/agent/linux/iptables_firewall.py @@ -22,6 +22,7 @@ from neutron.agent import firewall from neutron.agent.linux import ipset_manager from neutron.agent.linux import iptables_comments as ic from neutron.agent.linux import iptables_manager +from neutron.agent.linux import utils from neutron.common import constants from neutron.common import ipv6_utils from neutron.extensions import portsecurity as psec @@ -71,6 +72,32 @@ class IptablesFirewallDriver(firewall.FirewallDriver): lambda: collections.defaultdict(list)) self.pre_sg_members = None self.enable_ipset = cfg.CONF.SECURITYGROUP.enable_ipset + self._enabled_netfilter_for_bridges = False + + def _enable_netfilter_for_bridges(self): + # we only need to set these values once, but it has to be when + # we create a bridge; before that the bridge module might not + # be loaded and the proc values aren't there. + if self._enabled_netfilter_for_bridges: + return + else: + self._enabled_netfilter_for_bridges = True + + # These proc values ensure that netfilter is enabled on + # bridges; essential for enforcing security groups rules with + # OVS Hybrid. Distributions can differ on whether this is + # enabled by default or not (Ubuntu - yes, Redhat - no, for + # example). + LOG.debug("Enabling netfilter for bridges") + utils.execute(['sysctl', '-w', + 'net.bridge.bridge-nf-call-arptables=1'], + run_as_root=True) + utils.execute(['sysctl', '-w', + 'net.bridge.bridge-nf-call-ip6tables=1'], + run_as_root=True) + utils.execute(['sysctl', '-w', + 'net.bridge.bridge-nf-call-iptables=1'], + run_as_root=True) @property def ports(self): @@ -103,7 +130,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): LOG.debug("Preparing device (%s) filter", port['device']) self._remove_chains() self._set_ports(port) - + self._enable_netfilter_for_bridges() # each security group has it own chains self._setup_chains() self.iptables.apply() diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index b732b6b5af5..6d050e6cc13 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -2483,6 +2483,9 @@ class TestSecurityGroupAgentWithIptables(base.BaseTestCase): cfg.CONF.set_override('enable_ipset', False, group='SECURITYGROUP') cfg.CONF.set_override('comment_iptables_rules', False, group='AGENT') + self.utils_exec = mock.patch( + 'neutron.agent.linux.utils.execute').start() + self.rpc = mock.Mock() self._init_agent(defer_refresh_firewall) @@ -2607,6 +2610,13 @@ class TestSecurityGroupAgentWithIptables(base.BaseTestCase): self.assertThat(kwargs['process_input'], matchers.MatchesRegex(expected_regex)) + expected = ['net.bridge.bridge-nf-call-arptables=1', + 'net.bridge.bridge-nf-call-ip6tables=1', + 'net.bridge.bridge-nf-call-iptables=1'] + for e in expected: + self.utils_exec.assert_any_call(['sysctl', '-w', e], + run_as_root=True) + def _replay_iptables(self, v4_filter, v6_filter, raw): self._register_mock_call( ['iptables-save', '-c'], From 77303fbeaa060bcee2befad65dccb457fbb6ab65 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Tue, 26 May 2015 14:38:26 +0000 Subject: [PATCH 080/292] Sort _get_new/deleted_set_ips responses in unittests This fixes the test_set_members_adding/deleting_less_than_5 unit test that breaks with a randomized PYTHONHASHSEED (see the bug report). The test assumed that the _get_new/deleted_set_ips from neutron.agent.linux.ipset_manager return elements in a particular order. Found with PYTHONHASHSEED=1. The fix refactors the test case to force sorted responses from _get_new/deleted_set_ips during unittests. Partial-bug: #1348818 Note: There are several other unrelated unit tests that also break with a randomized PYTHONHASHSEED, but they are not addressed here. They will be addressed in separate patches. Change-Id: I8408365825ec1e97a83c2181f38ec1f9468df91e --- .../unit/agent/linux/test_ipset_manager.py | 28 +++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/neutron/tests/unit/agent/linux/test_ipset_manager.py b/neutron/tests/unit/agent/linux/test_ipset_manager.py index 1e22c304221..2e447bcd77b 100644 --- a/neutron/tests/unit/agent/linux/test_ipset_manager.py +++ b/neutron/tests/unit/agent/linux/test_ipset_manager.py @@ -31,6 +31,30 @@ class BaseIpsetManagerTest(base.BaseTestCase): self.execute = mock.patch.object(self.ipset, "execute").start() self.expected_calls = [] self.expect_create() + self.force_sorted_get_set_ips() + + def force_sorted_get_set_ips(self): + """Force sorted responses by self.ipset._get_new/deleted_set_ips. + + _get_new/deleted_set_ips use internally sets and return randomly + ordered responses. This method ensures sorted responses from them + in order to guarantee call order in self.ipset.set_members. + """ + original_get_new_set_ips = self.ipset._get_new_set_ips + original_get_deleted_set_ips = self.ipset._get_deleted_set_ips + + def sorted_get_new_set_ips(set_name, expected_ips): + unsorted = original_get_new_set_ips(set_name, expected_ips) + return sorted(unsorted) + + def sorted_get_deleted_set_ips(set_name, expected_ips): + unsorted = original_get_deleted_set_ips(set_name, expected_ips) + return sorted(unsorted) + + mock.patch.object(self.ipset, '_get_new_set_ips', + side_effect=sorted_get_new_set_ips).start() + mock.patch.object(self.ipset, '_get_deleted_set_ips', + side_effect=sorted_get_deleted_set_ips).start() def verify_mock_calls(self): self.execute.assert_has_calls(self.expected_calls, any_order=False) @@ -97,13 +121,13 @@ class IpsetManagerTestCase(BaseIpsetManagerTest): def test_set_members_adding_less_than_5(self): self.add_first_ip() - self.expect_add(reversed(FAKE_IPS[1:5])) + self.expect_add(FAKE_IPS[1:5]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:5]) self.verify_mock_calls() def test_set_members_deleting_less_than_5(self): self.add_all_ips() - self.expect_del(reversed(FAKE_IPS[4:5])) + self.expect_del(FAKE_IPS[3:4]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:3]) self.verify_mock_calls() From e43c037e14f0055206f16cc9549e03a3f6e43755 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Tue, 26 May 2015 13:24:58 +0000 Subject: [PATCH 081/292] Rename test_periodoc_resync_helper to test_periodic_resync_helper Change-Id: Iaaf57b5cdbe634af8ceda0fef4c920cdb053eed4 --- neutron/tests/unit/agent/dhcp/test_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index 2060474c2b2..33fa6ee3407 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -402,7 +402,7 @@ class TestDhcpAgent(base.BaseTestCase): dhcp.periodic_resync() spawn.assert_called_once_with(dhcp._periodic_resync_helper) - def test_periodoc_resync_helper(self): + def test_periodic_resync_helper(self): with mock.patch.object(dhcp_agent.eventlet, 'sleep') as sleep: dhcp = dhcp_agent.DhcpAgent(HOSTNAME) dhcp.needs_resync_reasons = collections.OrderedDict( From 9ff8cd524621daf6cd20da939238eee14b5a231f Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Wed, 27 May 2015 08:53:00 +0200 Subject: [PATCH 082/292] Improve test_set_members_deleting_less_than_5 In test_set_members_deleting_less_than_5[1], 3 ips are deleted from ipset but test_set_members_deleting_less_than_5 checked that the first one was deleted because the call ordering was non-trivial. The test was successful because assert_has_calls(expected_calls, any_order=False) allows extra calls before and after expected_calls. A parent change[2] forces the call ordering, this allows to check that the 3 ips are deleted. [1] neutron.tests.unit.agent.linux.test_ipset_manager [2] I8408365825ec1e97a83c2181f38ec1f9468df91e Related-Bug: #1348818 Change-Id: I773e2cd19fdec634b728d0c3a78c1d66392c743f --- neutron/tests/unit/agent/linux/test_ipset_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neutron/tests/unit/agent/linux/test_ipset_manager.py b/neutron/tests/unit/agent/linux/test_ipset_manager.py index 2e447bcd77b..cedbbceeeaf 100644 --- a/neutron/tests/unit/agent/linux/test_ipset_manager.py +++ b/neutron/tests/unit/agent/linux/test_ipset_manager.py @@ -127,7 +127,7 @@ class IpsetManagerTestCase(BaseIpsetManagerTest): def test_set_members_deleting_less_than_5(self): self.add_all_ips() - self.expect_del(FAKE_IPS[3:4]) + self.expect_del(FAKE_IPS[3:]) self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:3]) self.verify_mock_calls() From 880252868b8509125f6463e2709c2d0c4f3957ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Verg=C3=A9?= Date: Mon, 25 May 2015 18:46:03 +0200 Subject: [PATCH 083/292] Fix indentation errors in tests They are some missing/extra indentations in tests source code. This results in variables used out their scope (which remains unnoticed as long as `with` contexts do not fail), and prevent refactoring scripts (such as the one for getting rid of `contextlib.nested` [1]) from performing well. This simple patch fixes these indentation errors. [1]: See change I8d1de09ff38ed0af9fb56f423a2c43476408e0fb Change-Id: Icef34c7755e0d96c4c5ee85982de86d0ccc196c7 Related-Blueprint: neutron-python3 --- .../rpc/handlers/test_securitygroups_rpc.py | 8 +- neutron/tests/unit/extensions/test_l3.py | 90 +++++++-------- .../agent/test_linuxbridge_neutron_agent.py | 2 +- .../agent/test_ovs_neutron_agent.py | 106 +++++++++--------- .../plugins/openvswitch/test_ovs_tunnel.py | 35 +++--- .../unit/scheduler/test_l3_agent_scheduler.py | 37 +++--- 6 files changed, 139 insertions(+), 139 deletions(-) diff --git a/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py index 6728062091e..4728e3de327 100644 --- a/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py +++ b/neutron/tests/unit/api/rpc/handlers/test_securitygroups_rpc.py @@ -28,10 +28,10 @@ class SecurityGroupServerRpcApiTestCase(base.BaseTestCase): prepare_mock.return_value = rpcapi.client rpcapi.security_group_rules_for_devices('context', ['fake_device']) - rpc_mock.assert_called_once_with( - 'context', - 'security_group_rules_for_devices', - devices=['fake_device']) + rpc_mock.assert_called_once_with( + 'context', + 'security_group_rules_for_devices', + devices=['fake_device']) class SGAgentRpcCallBackMixinTestCase(base.BaseTestCase): diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py index 35a632dfa22..2e67d40f037 100644 --- a/neutron/tests/unit/extensions/test_l3.py +++ b/neutron/tests/unit/extensions/test_l3.py @@ -1170,19 +1170,19 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): with contextlib.nested( self.subnet(network=n1, cidr='10.0.0.0/24'), self.subnet(network=n2, cidr='10.1.0.0/24')) as (s1, s2): - body = self._router_interface_action( - 'add', - r['router']['id'], - s2['subnet']['id'], - None) - self.assertIn('port_id', body) - self._router_interface_action( - 'add', - r['router']['id'], - s1['subnet']['id'], - None, - tenant_id=tenant_id) - self.assertIn('port_id', body) + body = self._router_interface_action( + 'add', + r['router']['id'], + s2['subnet']['id'], + None) + self.assertIn('port_id', body) + self._router_interface_action( + 'add', + r['router']['id'], + s1['subnet']['id'], + None, + tenant_id=tenant_id) + self.assertIn('port_id', body) def test_router_add_interface_port(self): with self.router() as r: @@ -1598,45 +1598,45 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): self.router(), self.subnet(), mock.patch.object(registry, 'notify')) as (r, s, notify): - errors = [ - exceptions.NotificationError( - 'foo_callback_id', n_exc.InUse()), - ] - # we fail the first time, but not the second, when - # the clean-up takes place - notify.side_effect = [ - exceptions.CallbackFailure(errors=errors), None - ] - self._router_interface_action('add', - r['router']['id'], - s['subnet']['id'], - None) - self._router_interface_action( - 'remove', - r['router']['id'], - s['subnet']['id'], - None, - exc.HTTPConflict.code) + errors = [ + exceptions.NotificationError( + 'foo_callback_id', n_exc.InUse()), + ] + # we fail the first time, but not the second, when + # the clean-up takes place + notify.side_effect = [ + exceptions.CallbackFailure(errors=errors), None + ] + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + self._router_interface_action( + 'remove', + r['router']['id'], + s['subnet']['id'], + None, + exc.HTTPConflict.code) def test_router_clear_gateway_callback_failure_returns_409(self): with contextlib.nested( self.router(), self.subnet(), mock.patch.object(registry, 'notify')) as (r, s, notify): - errors = [ - exceptions.NotificationError( - 'foo_callback_id', n_exc.InUse()), - ] - notify.side_effect = exceptions.CallbackFailure(errors=errors) - self._set_net_external(s['subnet']['network_id']) - self._add_external_gateway_to_router( - r['router']['id'], - s['subnet']['network_id']) - self._remove_external_gateway_from_router( + errors = [ + exceptions.NotificationError( + 'foo_callback_id', n_exc.InUse()), + ] + notify.side_effect = exceptions.CallbackFailure(errors=errors) + self._set_net_external(s['subnet']['network_id']) + self._add_external_gateway_to_router( r['router']['id'], - s['subnet']['network_id'], - external_gw_info={}, - expected_code=exc.HTTPConflict.code) + s['subnet']['network_id']) + self._remove_external_gateway_from_router( + r['router']['id'], + s['subnet']['network_id'], + external_gw_info={}, + expected_code=exc.HTTPConflict.code) def test_router_remove_interface_wrong_subnet_returns_400(self): with self.router() as r: diff --git a/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py b/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py index b7adf56b821..192637f28e1 100644 --- a/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py +++ b/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py @@ -874,7 +874,7 @@ class TestLinuxBridgeManager(base.BaseTestCase): mock.patch.object( ip_lib, 'iproute_arg_supported', return_value=iproute_arg_supported)): - self.assertEqual(expected, self.lbm.vxlan_ucast_supported()) + self.assertEqual(expected, self.lbm.vxlan_ucast_supported()) def test_vxlan_ucast_supported(self): self._check_vxlan_ucast_supported( diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index af4c5496b58..9063df417ea 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -350,7 +350,7 @@ class TestOvsNeutronAgent(object): self.agent.treat_devices_added_or_updated([{}], False)) # The function should not raise self.assertFalse(skip_devs) - return func.called + return func.called def test_treat_devices_added_updated_ignores_invalid_ofport(self): port = mock.Mock() @@ -1003,19 +1003,19 @@ class TestOvsNeutronAgent(object): except Exception: pass - scan_ports.assert_has_calls([ - mock.call(set(), set()), - mock.call(set(), set()) - ]) - process_network_ports.assert_has_calls([ - mock.call(reply2, False), - mock.call(reply3, True) - ]) - self.assertTrue(update_stale.called) - # Verify the OVS restart we triggered in the loop - # re-setup the bridges - setup_int_br.assert_has_calls([mock.call()]) - setup_phys_br.assert_has_calls([mock.call({})]) + scan_ports.assert_has_calls([ + mock.call(set(), set()), + mock.call(set(), set()) + ]) + process_network_ports.assert_has_calls([ + mock.call(reply2, False), + mock.call(reply3, True) + ]) + self.assertTrue(update_stale.called) + # Verify the OVS restart we triggered in the loop + # re-setup the bridges + setup_int_br.assert_has_calls([mock.call()]) + setup_phys_br.assert_has_calls([mock.call({})]) def test_ovs_status(self): self._test_ovs_status(constants.OVS_NORMAL, @@ -1652,26 +1652,26 @@ class TestOvsDvrNeutronAgent(object): mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): - self.agent.treat_devices_removed([self._port.vif_id]) - if ip_version == 4: - expected = [ - mock.call.delete_dvr_process_ipv4( - vlan_tag=lvid, - gateway_ip=gateway_ip), - ] - else: - expected = [ - mock.call.delete_dvr_process_ipv6( - vlan_tag=lvid, - gateway_mac=gateway_mac), - ] - expected.extend([ - mock.call.delete_dvr_process( + self.agent.treat_devices_removed([self._port.vif_id]) + if ip_version == 4: + expected = [ + mock.call.delete_dvr_process_ipv4( vlan_tag=lvid, - vif_mac=self._port.vif_mac), - ]) - self.assertEqual([], int_br.mock_calls) - self.assertEqual(expected, tun_br.mock_calls) + gateway_ip=gateway_ip), + ] + else: + expected = [ + mock.call.delete_dvr_process_ipv6( + vlan_tag=lvid, + gateway_mac=gateway_mac), + ] + expected.extend([ + mock.call.delete_dvr_process( + vlan_tag=lvid, + vif_mac=self._port.vif_mac), + ]) + self.assertEqual([], int_br.mock_calls) + self.assertEqual(expected, tun_br.mock_calls) def _test_treat_devices_removed_for_dvr(self, device_owner, ip_version=4): self._setup_for_dvr_test() @@ -1757,15 +1757,15 @@ class TestOvsDvrNeutronAgent(object): mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): - self.agent.treat_devices_removed([self._compute_port.vif_id]) - int_br.assert_has_calls([ - mock.call.delete_dvr_to_src_mac( - network_type='vxlan', - vlan_tag=lvid, - dst_mac=self._compute_port.vif_mac, - ), - ]) - self.assertEqual([], tun_br.mock_calls) + self.agent.treat_devices_removed([self._compute_port.vif_id]) + int_br.assert_has_calls([ + mock.call.delete_dvr_to_src_mac( + network_type='vxlan', + vlan_tag=lvid, + dst_mac=self._compute_port.vif_mac, + ), + ]) + self.assertEqual([], tun_br.mock_calls) def test_treat_devices_removed_for_dvr_with_compute_ports(self): self._test_treat_devices_removed_for_dvr( @@ -1847,17 +1847,17 @@ class TestOvsDvrNeutronAgent(object): mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): - self.agent.treat_devices_removed([self._port.vif_id]) - expected_on_int_br = [ - mock.call.delete_dvr_to_src_mac( - network_type='vxlan', - dst_mac=self._port.vif_mac, - vlan_tag=lvid, - ), - ] - self.assertEqual(expected_on_int_br, int_br.mock_calls) - expected_on_tun_br = [] - self.assertEqual(expected_on_tun_br, tun_br.mock_calls) + self.agent.treat_devices_removed([self._port.vif_id]) + expected_on_int_br = [ + mock.call.delete_dvr_to_src_mac( + network_type='vxlan', + dst_mac=self._port.vif_mac, + vlan_tag=lvid, + ), + ] + self.assertEqual(expected_on_int_br, int_br.mock_calls) + expected_on_tun_br = [] + self.assertEqual(expected_on_tun_br, tun_br.mock_calls) def test_setup_dvr_flows_on_int_br(self): self._setup_for_dvr_test() diff --git a/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py b/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py index a3f8600ed99..64d130b4bbe 100644 --- a/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py +++ b/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py @@ -524,23 +524,24 @@ class TunnelTest(object): except Exception: pass - # FIXME(salv-orlando): There should not be assertions on log messages - log_exception.assert_called_once_with( - "Error while processing VIF ports") - scan_ports.assert_has_calls([ - mock.call(set(), set()), - mock.call(set(['tap0']), set()) - ]) - process_network_ports.assert_has_calls([ - mock.call({'current': set(['tap0']), - 'removed': set([]), - 'added': set(['tap2'])}, False), - mock.call({'current': set(['tap2']), - 'removed': set(['tap0']), - 'added': set([])}, False) - ]) - self.assertTrue(update_stale.called) - self._verify_mock_calls() + # FIXME(salv-orlando): There should not be assertions on log + # messages + log_exception.assert_called_once_with( + "Error while processing VIF ports") + scan_ports.assert_has_calls([ + mock.call(set(), set()), + mock.call(set(['tap0']), set()) + ]) + process_network_ports.assert_has_calls([ + mock.call({'current': set(['tap0']), + 'removed': set([]), + 'added': set(['tap2'])}, False), + mock.call({'current': set(['tap2']), + 'removed': set(['tap0']), + 'added': set([])}, False) + ]) + self.assertTrue(update_stale.called) + self._verify_mock_calls() class TunnelTestOFCtl(TunnelTest, ovs_test_base.OVSOFCtlTestBase): diff --git a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py index 93ceae59761..07f06db7a11 100644 --- a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py @@ -134,10 +134,10 @@ class L3SchedulerBaseTestCase(base.BaseTestCase): ) as (gs, gr): result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) - self.assertTrue(self.plugin.get_enabled_agent_on_host.called) - self.assertTrue(result) - self.assertTrue(gs.called) - self.assertTrue(gr.called) + self.assertTrue(self.plugin.get_enabled_agent_on_host.called) + self.assertTrue(result) + self.assertTrue(gs.called) + self.assertTrue(gr.called) def test_auto_schedule_routers_no_agents(self): self.plugin.get_enabled_agent_on_host.return_value = None @@ -257,9 +257,9 @@ class L3SchedulerBaseTestCase(base.BaseTestCase): ) as ( mock_has_binding, mock_bind): self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent) - mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router', - 'foo_agent') - self.assertEqual(not has_binding, mock_bind.called) + mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router', + 'foo_agent') + self.assertEqual(not has_binding, mock_bind.called) def test__bind_routers_ha_has_binding(self): self._test__bind_routers_ha(has_binding=True) @@ -473,10 +473,9 @@ class L3SchedulerTestBaseMixin(object): with contextlib.nested( mock.patch.object(scheduler, 'bind_router'), mock.patch.object( - plugin, 'get_snat_bindings', return_value=False) - ): - scheduler._schedule_router( - plugin, self.adminContext, 'foo_router_id', None) + plugin, 'get_snat_bindings', return_value=False)): + scheduler._schedule_router( + plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ mock.call.get_router(mock.ANY, 'foo_router_id'), mock.call.get_l3_agents_hosting_routers( @@ -1308,8 +1307,8 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): mock.patch.object(self.dut, 'bind_dvr_router_servicenode') ) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr): self.dut.schedule_snat_router(self.adminContext, 'foo', 'bar') - self.assertTrue(mock_bind_snat.called) - self.assertFalse(mock_bind_dvr.called) + self.assertTrue(mock_bind_snat.called) + self.assertFalse(mock_bind_dvr.called) def test_schedule_snat_router_return_value(self): agent, router = self._prepare_schedule_snat_tests() @@ -1340,7 +1339,7 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): mock_snat_bind.return_value = False self.dut.schedule_snat_router( self.adminContext, 'foo_router_id', router) - self.assertFalse(mock_unbind.called) + self.assertFalse(mock_unbind.called) def test_schedule_snat_router_with_snat_candidates(self): agent, router = self._prepare_schedule_snat_tests() @@ -1359,8 +1358,8 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): mock_candidates.return_value = [agent] self.dut.schedule_snat_router( self.adminContext, 'foo_router_id', mock.ANY) - mock_bind.assert_called_once_with( - self.adminContext, 'foo_router_id', [agent]) + mock_bind.assert_called_once_with( + self.adminContext, 'foo_router_id', [agent]) def test_unbind_snat_servicenode(self): router_id = 'foo_router_id' @@ -1383,9 +1382,9 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): mock_query.return_value = binding mock_get_subnets.return_value = ['foo_subnet_id'] self.dut.unbind_snat_servicenode(self.adminContext, router_id) - mock_get_subnets.assert_called_with(self.adminContext, router_id) - self.assertTrue(mock_session.call_count) - self.assertTrue(mock_delete.call_count) + mock_get_subnets.assert_called_with(self.adminContext, router_id) + self.assertTrue(mock_session.call_count) + self.assertTrue(mock_delete.call_count) core_plugin.assert_called_once_with() l3_notifier.assert_called_once_with() From 422588e13338dab4a5ba1973c96256690ba4adf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Verg=C3=A9?= Date: Fri, 29 May 2015 22:54:33 +0200 Subject: [PATCH 084/292] Get completely rid of contextlib.nested `contextlib.nested` is deprecated since Python 2.7 and incompatible with Python 3. This patch removes all its occurences by using the helper script at [1]. This is a necessary step to allow us running all unit tests with Python 3 (not just a small subset as it is done now). [1]: https://github.com/adrienverge/context_unnester Change-Id: I8d1de09ff38ed0af9fb56f423a2c43476408e0fb Blueprint: neutron-python3 Closes-Bug: #1428424 --- neutron/hacking/checks.py | 13 - .../unit/db/metering/test_metering_db.py | 60 +- .../tests/unit/db/test_db_base_plugin_v2.py | 370 ++++----- neutron/tests/unit/db/test_l3_dvr_db.py | 123 ++- .../tests/unit/db/test_securitygroups_db.py | 10 +- .../unit/extensions/test_external_net.py | 4 +- .../tests/unit/extensions/test_extraroute.py | 34 +- neutron/tests/unit/extensions/test_l3.py | 150 ++-- .../unit/extensions/test_securitygroup.py | 141 ++-- .../unit/plugins/ibm/test_sdnve_agent.py | 15 +- .../unit/plugins/ibm/test_sdnve_plugin.py | 23 +- .../agent/test_linuxbridge_neutron_agent.py | 261 +++--- .../plugins/ml2/drivers/base_type_tunnel.py | 17 +- .../ml2/drivers/l2pop/test_mech_driver.py | 22 +- .../plugins/ml2/test_extension_driver_api.py | 37 +- neutron/tests/unit/plugins/ml2/test_plugin.py | 115 ++- neutron/tests/unit/plugins/ml2/test_rpc.py | 9 +- .../unit/plugins/ml2/test_security_group.py | 18 +- .../plugins/oneconvergence/test_nvsd_agent.py | 43 +- .../oneconvergence/test_nvsd_plugin.py | 3 +- .../agent/test_ovs_neutron_agent.py | 753 +++++++++--------- .../openvswitch/test_agent_scheduler.py | 114 ++- .../plugins/openvswitch/test_ovs_tunnel.py | 26 +- .../sriovnicagent/test_eswitch_manager.py | 170 ++-- .../scheduler/test_dhcp_agent_scheduler.py | 36 +- .../unit/scheduler/test_l3_agent_scheduler.py | 277 +++---- 26 files changed, 1276 insertions(+), 1568 deletions(-) diff --git a/neutron/hacking/checks.py b/neutron/hacking/checks.py index 3a7d8df48d5..c6160072d58 100644 --- a/neutron/hacking/checks.py +++ b/neutron/hacking/checks.py @@ -149,19 +149,6 @@ def check_no_contextlib_nested(logical_line, filename): "docs.python.org/2/library/contextlib.html#contextlib.nested for " "more information.") - # TODO(ankit): The following check is temporary. - # A series of patches will be submitted to address - # these issues. It should be removed completely - # when bug 1428424 is closed. - ignore_dirs = [ - "neutron/tests/unit/db", - "neutron/tests/unit/extensions", - "neutron/tests/unit/plugins", - "neutron/tests/unit/scheduler"] - for directory in ignore_dirs: - if directory in filename: - return - if contextlib_nested.match(logical_line): yield(0, msg) diff --git a/neutron/tests/unit/db/metering/test_metering_db.py b/neutron/tests/unit/db/metering/test_metering_db.py index bd513696b33..c9e185163bc 100644 --- a/neutron/tests/unit/db/metering/test_metering_db.py +++ b/neutron/tests/unit/db/metering/test_metering_db.py @@ -167,9 +167,9 @@ class TestMetering(MeteringPluginDbTestCase): name = 'my label' description = 'my metering label' - with contextlib.nested( - self.metering_label(name, description), - self.metering_label(name, description)) as metering_label: + with self.metering_label(name, description) as v1,\ + self.metering_label(name, description) as v2: + metering_label = (v1, v2) self._test_list_resources('metering-label', metering_label) @@ -224,15 +224,15 @@ class TestMetering(MeteringPluginDbTestCase): remote_ip_prefix = '192.168.0.0/24' excluded = True - with contextlib.nested( - self.metering_label_rule(metering_label_id, - direction, - remote_ip_prefix, - excluded), - self.metering_label_rule(metering_label_id, - 'ingress', - remote_ip_prefix, - excluded)) as metering_label_rule: + with self.metering_label_rule(metering_label_id, + direction, + remote_ip_prefix, + excluded) as v1,\ + self.metering_label_rule(metering_label_id, + 'ingress', + remote_ip_prefix, + excluded) as v2: + metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) @@ -248,15 +248,15 @@ class TestMetering(MeteringPluginDbTestCase): remote_ip_prefix = '192.168.0.0/24' excluded = True - with contextlib.nested( - self.metering_label_rule(metering_label_id, - direction, - remote_ip_prefix, - excluded), - self.metering_label_rule(metering_label_id, - direction, - n_consts.IPv4_ANY, - False)) as metering_label_rule: + with self.metering_label_rule(metering_label_id, + direction, + remote_ip_prefix, + excluded) as v1,\ + self.metering_label_rule(metering_label_id, + direction, + n_consts.IPv4_ANY, + False) as v2: + metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) @@ -299,15 +299,15 @@ class TestMetering(MeteringPluginDbTestCase): remote_ip_prefix = '192.168.0.0/24' excluded = True - with contextlib.nested( - self.metering_label_rule(metering_label_id1, - direction, - remote_ip_prefix, - excluded), - self.metering_label_rule(metering_label_id2, - direction, - remote_ip_prefix, - excluded)) as metering_label_rule: + with self.metering_label_rule(metering_label_id1, + direction, + remote_ip_prefix, + excluded) as v1,\ + self.metering_label_rule(metering_label_id2, + direction, + remote_ip_prefix, + excluded) as v2: + metering_label_rule = (v1, v2) self._test_list_resources('metering-label-rule', metering_label_rule) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index f34beb85a30..21989c0bfde 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -1040,15 +1040,14 @@ class TestPortsV2(NeutronDbPluginV2TestCase): def test_list_ports(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) - with contextlib.nested(self.port(), - self.port(), - self.port()) as ports: + with self.port() as v1, self.port() as v2, self.port() as v3: + ports = (v1, v2, v3) self._test_list_resources('port', ports) def test_list_ports_filtered_by_fixed_ip(self): # for this test we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) - with contextlib.nested(self.port(), self.port()) as (port1, port2): + with self.port() as port1, self.port(): fixed_ips = port1['port']['fixed_ips'][0] query_params = """ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s @@ -1061,9 +1060,8 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s def test_list_ports_public_network(self): with self.network(shared=True) as network: with self.subnet(network) as subnet: - with contextlib.nested(self.port(subnet, tenant_id='tenant_1'), - self.port(subnet, tenant_id='tenant_2') - ) as (port1, port2): + with self.port(subnet, tenant_id='tenant_1') as port1,\ + self.port(subnet, tenant_id='tenant_2') as port2: # Admin request - must return both ports self._test_list_resources('port', [port1, port2]) # Tenant_1 request - must return single port @@ -1079,13 +1077,12 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") cfg.CONF.set_default('allow_overlapping_ips', True) - with contextlib.nested(self.port(admin_state_up='True', - mac_address='00:00:00:00:00:01'), - self.port(admin_state_up='False', - mac_address='00:00:00:00:00:02'), - self.port(admin_state_up='False', - mac_address='00:00:00:00:00:03') - ) as (port1, port2, port3): + with self.port(admin_state_up='True', + mac_address='00:00:00:00:00:01') as port1,\ + self.port(admin_state_up='False', + mac_address='00:00:00:00:00:02') as port2,\ + self.port(admin_state_up='False', + mac_address='00:00:00:00:00:03') as port3: self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) @@ -1096,13 +1093,12 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s new=_fake_get_sorting_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) - with contextlib.nested(self.port(admin_state_up='True', - mac_address='00:00:00:00:00:01'), - self.port(admin_state_up='False', - mac_address='00:00:00:00:00:02'), - self.port(admin_state_up='False', - mac_address='00:00:00:00:00:03') - ) as (port1, port2, port3): + with self.port(admin_state_up='True', + mac_address='00:00:00:00:00:01') as port1,\ + self.port(admin_state_up='False', + mac_address='00:00:00:00:00:02') as port2,\ + self.port(admin_state_up='False', + mac_address='00:00:00:00:00:03') as port3: self._test_list_with_sort('port', (port3, port2, port1), [('admin_state_up', 'asc'), ('mac_address', 'desc')]) @@ -1111,10 +1107,9 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) - with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), - self.port(mac_address='00:00:00:00:00:02'), - self.port(mac_address='00:00:00:00:00:03') - ) as (port1, port2, port3): + with self.port(mac_address='00:00:00:00:00:01') as port1,\ + self.port(mac_address='00:00:00:00:00:02') as port2,\ + self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) @@ -1125,10 +1120,9 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s new=_fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) - with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), - self.port(mac_address='00:00:00:00:00:02'), - self.port(mac_address='00:00:00:00:00:03') - ) as (port1, port2, port3): + with self.port(mac_address='00:00:00:00:00:01') as port1,\ + self.port(mac_address='00:00:00:00:00:02') as port2,\ + self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination('port', (port1, port2, port3), ('mac_address', 'asc'), 2, 2) @@ -1137,10 +1131,9 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") cfg.CONF.set_default('allow_overlapping_ips', True) - with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), - self.port(mac_address='00:00:00:00:00:02'), - self.port(mac_address='00:00:00:00:00:03') - ) as (port1, port2, port3): + with self.port(mac_address='00:00:00:00:00:01') as port1,\ + self.port(mac_address='00:00:00:00:00:02') as port2,\ + self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination_reverse('port', (port1, port2, port3), ('mac_address', 'asc'), @@ -1152,10 +1145,9 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s new=_fake_get_pagination_helper) helper_patcher.start() cfg.CONF.set_default('allow_overlapping_ips', True) - with contextlib.nested(self.port(mac_address='00:00:00:00:00:01'), - self.port(mac_address='00:00:00:00:00:02'), - self.port(mac_address='00:00:00:00:00:03') - ) as (port1, port2, port3): + with self.port(mac_address='00:00:00:00:00:01') as port1,\ + self.port(mac_address='00:00:00:00:00:02') as port2,\ + self.port(mac_address='00:00:00:00:00:03') as port3: self._test_list_with_pagination_reverse('port', (port1, port2, port3), ('mac_address', 'asc'), @@ -1662,14 +1654,13 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s def test_requested_subnet_id_v4_and_v6_slaac(self): with self.network() as network: - with contextlib.nested( - self.subnet(network), - self.subnet(network, - cidr='2607:f0d0:1002:51::/64', - ip_version=6, - gateway_ip='fe80::1', - ipv6_address_mode=constants.IPV6_SLAAC) - ) as (subnet, subnet2): + with self.subnet(network) as subnet,\ + self.subnet( + network, + cidr='2607:f0d0:1002:51::/64', + ip_version=6, + gateway_ip='fe80::1', + ipv6_address_mode=constants.IPV6_SLAAC) as subnet2: with self.port( subnet, fixed_ips=[{'subnet_id': subnet['subnet']['id']}, @@ -1689,13 +1680,12 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s def test_create_router_port_ipv4_and_ipv6_slaac_no_fixed_ips(self): with self.network() as network: # Create an IPv4 and an IPv6 SLAAC subnet on the network - with contextlib.nested( - self.subnet(network), - self.subnet(network, - cidr='2607:f0d0:1002:51::/64', - ip_version=6, - gateway_ip='fe80::1', - ipv6_address_mode=constants.IPV6_SLAAC)): + with self.subnet(network),\ + self.subnet(network, + cidr='2607:f0d0:1002:51::/64', + ip_version=6, + gateway_ip='fe80::1', + ipv6_address_mode=constants.IPV6_SLAAC): # Create a router port without specifying fixed_ips port = self._make_port( self.fmt, network['network']['id'], @@ -2144,11 +2134,9 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s plugin = manager.NeutronManager.get_plugin() ctx = context.get_admin_context() with self.subnet() as subnet: - with contextlib.nested( - self.port(subnet=subnet, device_id='owner1'), - self.port(subnet=subnet, device_id='owner1'), - self.port(subnet=subnet, device_id='owner2'), - ) as (p1, p2, p3): + with self.port(subnet=subnet, device_id='owner1') as p1,\ + self.port(subnet=subnet, device_id='owner1') as p2,\ + self.port(subnet=subnet, device_id='owner2') as p3: network_id = subnet['subnet']['network_id'] plugin.delete_ports_by_device_id(ctx, 'owner1', network_id) @@ -2162,11 +2150,9 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s def _test_delete_ports_by_device_id_second_call_failure(self, plugin): ctx = context.get_admin_context() with self.subnet() as subnet: - with contextlib.nested( - self.port(subnet=subnet, device_id='owner1'), - self.port(subnet=subnet, device_id='owner1'), - self.port(subnet=subnet, device_id='owner2'), - ) as (p1, p2, p3): + with self.port(subnet=subnet, device_id='owner1') as p1,\ + self.port(subnet=subnet, device_id='owner1') as p2,\ + self.port(subnet=subnet, device_id='owner2') as p3: orig = plugin.delete_port with mock.patch.object(plugin, 'delete_port') as del_port: @@ -2194,10 +2180,8 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s def _test_delete_ports_ignores_port_not_found(self, plugin): ctx = context.get_admin_context() with self.subnet() as subnet: - with contextlib.nested( - self.port(subnet=subnet, device_id='owner1'), - mock.patch.object(plugin, 'delete_port') - ) as (p, del_port): + with self.port(subnet=subnet, device_id='owner1') as p,\ + mock.patch.object(plugin, 'delete_port') as del_port: del_port.side_effect = n_exc.PortNotFound( port_id=p['port']['id'] ) @@ -2507,21 +2491,16 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): ) def test_list_networks(self): - with contextlib.nested(self.network(), - self.network(), - self.network()) as networks: + with self.network() as v1, self.network() as v2, self.network() as v3: + networks = (v1, v2, v3) self._test_list_resources('network', networks) def test_list_networks_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") - with contextlib.nested(self.network(admin_status_up=True, - name='net1'), - self.network(admin_status_up=False, - name='net2'), - self.network(admin_status_up=False, - name='net3') - ) as (net1, net2, net3): + with self.network(admin_status_up=True, name='net1') as net1,\ + self.network(admin_status_up=False, name='net2') as net2,\ + self.network(admin_status_up=False, name='net3') as net3: self._test_list_with_sort('network', (net3, net2, net1), [('admin_state_up', 'asc'), ('name', 'desc')]) @@ -2529,13 +2508,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): def test_list_networks_with_sort_extended_attr_native_returns_400(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") - with contextlib.nested(self.network(admin_status_up=True, - name='net1'), - self.network(admin_status_up=False, - name='net2'), - self.network(admin_status_up=False, - name='net3') - ): + with self.network(admin_status_up=True, name='net1'),\ + self.network(admin_status_up=False, name='net2'),\ + self.network(admin_status_up=False, name='net3'): req = self.new_list_request( 'networks', params='sort_key=provider:segmentation_id&sort_dir=asc') @@ -2545,13 +2520,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): def test_list_networks_with_sort_remote_key_native_returns_400(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") - with contextlib.nested(self.network(admin_status_up=True, - name='net1'), - self.network(admin_status_up=False, - name='net2'), - self.network(admin_status_up=False, - name='net3') - ): + with self.network(admin_status_up=True, name='net1'),\ + self.network(admin_status_up=False, name='net2'),\ + self.network(admin_status_up=False, name='net3'): req = self.new_list_request( 'networks', params='sort_key=subnets&sort_dir=asc') res = req.get_response(self.api) @@ -2562,13 +2533,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() - with contextlib.nested(self.network(admin_status_up=True, - name='net1'), - self.network(admin_status_up=False, - name='net2'), - self.network(admin_status_up=False, - name='net3') - ) as (net1, net2, net3): + with self.network(admin_status_up=True, name='net1') as net1,\ + self.network(admin_status_up=False, name='net2') as net2,\ + self.network(admin_status_up=False, name='net3') as net3: self._test_list_with_sort('network', (net3, net2, net1), [('admin_state_up', 'asc'), ('name', 'desc')]) @@ -2576,10 +2543,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): def test_list_networks_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") - with contextlib.nested(self.network(name='net1'), - self.network(name='net2'), - self.network(name='net3') - ) as (net1, net2, net3): + with self.network(name='net1') as net1,\ + self.network(name='net2') as net2,\ + self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2) @@ -2589,10 +2555,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() - with contextlib.nested(self.network(name='net1'), - self.network(name='net2'), - self.network(name='net3') - ) as (net1, net2, net3): + with self.network(name='net1') as net1,\ + self.network(name='net2') as net2,\ + self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2) @@ -2602,13 +2567,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() - with contextlib.nested(self.network(name='net1', - shared=True), - self.network(name='net2', - shared=False), - self.network(name='net3', - shared=True) - ) as (net1, net2, net3): + with self.network(name='net1', shared=True) as net1,\ + self.network(name='net2', shared=False) as net2,\ + self.network(name='net3', shared=True) as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2, @@ -2618,10 +2579,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): def test_list_networks_without_pk_in_fields_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") - with contextlib.nested(self.network(name='net1'), - self.network(name='net2'), - self.network(name='net3') - ) as (net1, net2, net3): + with self.network(name='net1') as net1,\ + self.network(name='net2') as net2,\ + self.network(name='net3') as net3: self._test_list_with_pagination('network', (net1, net2, net3), ('name', 'asc'), 2, 2, @@ -2631,10 +2591,9 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): def test_list_networks_with_pagination_reverse_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") - with contextlib.nested(self.network(name='net1'), - self.network(name='net2'), - self.network(name='net3') - ) as (net1, net2, net3): + with self.network(name='net1') as net1,\ + self.network(name='net2') as net2,\ + self.network(name='net3') as net3: self._test_list_with_pagination_reverse('network', (net1, net2, net3), ('name', 'asc'), 2, 2) @@ -2644,18 +2603,16 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() - with contextlib.nested(self.network(name='net1'), - self.network(name='net2'), - self.network(name='net3') - ) as (net1, net2, net3): + with self.network(name='net1') as net1,\ + self.network(name='net2') as net2,\ + self.network(name='net3') as net3: self._test_list_with_pagination_reverse('network', (net1, net2, net3), ('name', 'asc'), 2, 2) def test_list_networks_with_parameters(self): - with contextlib.nested(self.network(name='net1', - admin_state_up=False), - self.network(name='net2')) as (net1, net2): + with self.network(name='net1', admin_state_up=False) as net1,\ + self.network(name='net2') as net2: query_params = 'admin_state_up=False' self._test_list_resources('network', [net1], query_params=query_params) @@ -2674,25 +2631,23 @@ class TestNetworksV2(NeutronDbPluginV2TestCase): self.assertIsNone(res['networks'][0].get('id')) def test_list_networks_with_parameters_invalid_values(self): - with contextlib.nested(self.network(name='net1', - admin_state_up=False), - self.network(name='net2')) as (net1, net2): + with self.network(name='net1', admin_state_up=False),\ + self.network(name='net2'): req = self.new_list_request('networks', params='admin_state_up=fake') res = req.get_response(self.api) self.assertEqual(webob.exc.HTTPClientError.code, res.status_int) def test_list_shared_networks_with_non_admin_user(self): - with contextlib.nested(self.network(shared=False, - name='net1', - tenant_id='tenant1'), - self.network(shared=True, - name='net2', - tenant_id='another_tenant'), - self.network(shared=False, - name='net3', - tenant_id='another_tenant') - ) as (net1, net2, net3): + with self.network(shared=False, + name='net1', + tenant_id='tenant1') as net1,\ + self.network(shared=True, + name='net2', + tenant_id='another_tenant') as net2,\ + self.network(shared=False, + name='net3', + tenant_id='another_tenant'): ctx = context.Context(user_id='non_admin', tenant_id='tenant1', is_admin=False) @@ -2931,8 +2886,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): cidr_2 = '10.0.0.0/24' cfg.CONF.set_override('allow_overlapping_ips', True) - with contextlib.nested(self.subnet(cidr=cidr_1), - self.subnet(cidr=cidr_2)): + with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2): pass def test_create_2_subnets_overlapping_cidr_not_allowed_returns_400(self): @@ -2941,8 +2895,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): cfg.CONF.set_override('allow_overlapping_ips', False) with testlib_api.ExpectedException( webob.exc.HTTPClientError) as ctx_manager: - with contextlib.nested(self.subnet(cidr=cidr_1), - self.subnet(cidr=cidr_2)): + with self.subnet(cidr=cidr_1), self.subnet(cidr=cidr_2): pass self.assertEqual(ctx_manager.exception.code, webob.exc.HTTPClientError.code) @@ -3100,10 +3053,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): def test_delete_subnet_with_other_subnet_on_network_still_in_use(self): with self.network() as network: - with contextlib.nested( - self.subnet(network=network), - self.subnet(network=network, cidr='10.0.1.0/24'), - ) as (subnet1, subnet2): + with self.subnet(network=network) as subnet1,\ + self.subnet(network=network, + cidr='10.0.1.0/24') as subnet2: subnet1_id = subnet1['subnet']['id'] subnet2_id = subnet2['subnet']['id'] with self.port( @@ -3219,16 +3171,15 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): set_context=False) def test_create_subnet_nonzero_cidr(self): - with contextlib.nested( - self.subnet(cidr='10.129.122.5/8'), - self.subnet(cidr='11.129.122.5/15'), - self.subnet(cidr='12.129.122.5/16'), - self.subnet(cidr='13.129.122.5/18'), - self.subnet(cidr='14.129.122.5/22'), - self.subnet(cidr='15.129.122.5/24'), - self.subnet(cidr='16.129.122.5/28'), - self.subnet(cidr='17.129.122.5/32', enable_dhcp=False) - ) as subs: + with self.subnet(cidr='10.129.122.5/8') as v1,\ + self.subnet(cidr='11.129.122.5/15') as v2,\ + self.subnet(cidr='12.129.122.5/16') as v3,\ + self.subnet(cidr='13.129.122.5/18') as v4,\ + self.subnet(cidr='14.129.122.5/22') as v5,\ + self.subnet(cidr='15.129.122.5/24') as v6,\ + self.subnet(cidr='16.129.122.5/28') as v7,\ + self.subnet(cidr='17.129.122.5/32', enable_dhcp=False) as v8: + subs = (v1, v2, v3, v4, v5, v6, v7, v8) # the API should accept and correct these for users self.assertEqual(subs[0]['subnet']['cidr'], '10.0.0.0/8') self.assertEqual(subs[1]['subnet']['cidr'], '11.128.0.0/15') @@ -4218,15 +4169,16 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): def test_list_subnets(self): with self.network() as network: - with contextlib.nested(self.subnet(network=network, - gateway_ip='10.0.0.1', - cidr='10.0.0.0/24'), - self.subnet(network=network, - gateway_ip='10.0.1.1', - cidr='10.0.1.0/24'), - self.subnet(network=network, - gateway_ip='10.0.2.1', - cidr='10.0.2.0/24')) as subnets: + with self.subnet(network=network, + gateway_ip='10.0.0.1', + cidr='10.0.0.0/24') as v1,\ + self.subnet(network=network, + gateway_ip='10.0.1.1', + cidr='10.0.1.0/24') as v2,\ + self.subnet(network=network, + gateway_ip='10.0.2.1', + cidr='10.0.2.0/24') as v3: + subnets = (v1, v2, v3) self._test_list_resources('subnet', subnets) def test_list_subnets_shared(self): @@ -4253,13 +4205,13 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): def test_list_subnets_with_parameter(self): with self.network() as network: - with contextlib.nested(self.subnet(network=network, - gateway_ip='10.0.0.1', - cidr='10.0.0.0/24'), - self.subnet(network=network, - gateway_ip='10.0.1.1', - cidr='10.0.1.0/24') - ) as subnets: + with self.subnet(network=network, + gateway_ip='10.0.0.1', + cidr='10.0.0.0/24') as v1,\ + self.subnet(network=network, + gateway_ip='10.0.1.1', + cidr='10.0.1.0/24') as v2: + subnets = (v1, v2) query_params = 'ip_version=4&ip_version=6' self._test_list_resources('subnet', subnets, query_params=query_params) @@ -4270,13 +4222,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): def test_list_subnets_with_sort_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") - with contextlib.nested(self.subnet(enable_dhcp=True, - cidr='10.0.0.0/24'), - self.subnet(enable_dhcp=False, - cidr='11.0.0.0/24'), - self.subnet(enable_dhcp=False, - cidr='12.0.0.0/24') - ) as (subnet1, subnet2, subnet3): + with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\ + self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\ + self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3: self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), [('enable_dhcp', 'asc'), ('cidr', 'desc')]) @@ -4286,13 +4234,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): 'neutron.api.v2.base.Controller._get_sorting_helper', new=_fake_get_sorting_helper) helper_patcher.start() - with contextlib.nested(self.subnet(enable_dhcp=True, - cidr='10.0.0.0/24'), - self.subnet(enable_dhcp=False, - cidr='11.0.0.0/24'), - self.subnet(enable_dhcp=False, - cidr='12.0.0.0/24') - ) as (subnet1, subnet2, subnet3): + with self.subnet(enable_dhcp=True, cidr='10.0.0.0/24') as subnet1,\ + self.subnet(enable_dhcp=False, cidr='11.0.0.0/24') as subnet2,\ + self.subnet(enable_dhcp=False, cidr='12.0.0.0/24') as subnet3: self._test_list_with_sort('subnet', (subnet3, subnet2, subnet1), @@ -4302,10 +4246,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): def test_list_subnets_with_pagination_native(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented sorting feature") - with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), - self.subnet(cidr='11.0.0.0/24'), - self.subnet(cidr='12.0.0.0/24') - ) as (subnet1, subnet2, subnet3): + with self.subnet(cidr='10.0.0.0/24') as subnet1,\ + self.subnet(cidr='11.0.0.0/24') as subnet2,\ + self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) @@ -4315,10 +4258,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() - with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), - self.subnet(cidr='11.0.0.0/24'), - self.subnet(cidr='12.0.0.0/24') - ) as (subnet1, subnet2, subnet3): + with self.subnet(cidr='10.0.0.0/24') as subnet1,\ + self.subnet(cidr='11.0.0.0/24') as subnet2,\ + self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination('subnet', (subnet1, subnet2, subnet3), ('cidr', 'asc'), 2, 2) @@ -4326,10 +4268,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): def test_list_subnets_with_pagination_reverse_native(self): if self._skip_native_sorting: self.skipTest("Skip test for not implemented sorting feature") - with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), - self.subnet(cidr='11.0.0.0/24'), - self.subnet(cidr='12.0.0.0/24') - ) as (subnet1, subnet2, subnet3): + with self.subnet(cidr='10.0.0.0/24') as subnet1,\ + self.subnet(cidr='11.0.0.0/24') as subnet2,\ + self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination_reverse('subnet', (subnet1, subnet2, subnet3), @@ -4340,10 +4281,9 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): 'neutron.api.v2.base.Controller._get_pagination_helper', new=_fake_get_pagination_helper) helper_patcher.start() - with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), - self.subnet(cidr='11.0.0.0/24'), - self.subnet(cidr='12.0.0.0/24') - ) as (subnet1, subnet2, subnet3): + with self.subnet(cidr='10.0.0.0/24') as subnet1,\ + self.subnet(cidr='11.0.0.0/24') as subnet2,\ + self.subnet(cidr='12.0.0.0/24') as subnet3: self._test_list_with_pagination_reverse('subnet', (subnet1, subnet2, subnet3), @@ -4599,9 +4539,8 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): self.assertEqual(res.status_int, webob.exc.HTTPNoContent.code) def test_delete_subnet_with_callback(self): - with contextlib.nested( - self.subnet(), - mock.patch.object(registry, 'notify')) as (subnet, notify): + with self.subnet() as subnet,\ + mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( @@ -5557,18 +5496,11 @@ class NeutronDbPluginV2AsMixinTestCase(NeutronDbPluginV2TestCase, def test_get_user_allocation_for_dhcp_port_returns_none(self): plugin = manager.NeutronManager.get_plugin() - with contextlib.nested( - self.network(), - self.network() - ) as (net, net1): - with contextlib.nested( - self.subnet(network=net, cidr='10.0.0.0/24'), - self.subnet(network=net1, cidr='10.0.1.0/24') - ) as (subnet, subnet1): - with contextlib.nested( - self.port(subnet=subnet, device_owner='network:dhcp'), - self.port(subnet=subnet1) - ) as (p, p2): + with self.network() as net, self.network() as net1: + with self.subnet(network=net, cidr='10.0.0.0/24') as subnet,\ + self.subnet(network=net1, cidr='10.0.1.0/24') as subnet1: + with self.port(subnet=subnet, device_owner='network:dhcp'),\ + self.port(subnet=subnet1): # check that user allocations on another network don't # affect _subnet_get_user_allocation method res = plugin._subnet_get_user_allocation( diff --git a/neutron/tests/unit/db/test_l3_dvr_db.py b/neutron/tests/unit/db/test_l3_dvr_db.py index cf76942ace1..a37c5205706 100644 --- a/neutron/tests/unit/db/test_l3_dvr_db.py +++ b/neutron/tests/unit/db/test_l3_dvr_db.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import mock from neutron.common import constants as l3_const @@ -205,12 +204,11 @@ class L3DvrTestCase(testlib_api.SqlTestCase): router_db = self._create_router(router) router_id = router_db['id'] self.assertTrue(router_db.extra_attributes.distributed) - with contextlib.nested( - mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, - '_create_gw_port'), - mock.patch.object(self.mixin, - '_create_snat_intf_ports_if_not_exists') - ) as (cw, cs): + with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, + '_create_gw_port'),\ + mock.patch.object( + self.mixin, + '_create_snat_intf_ports_if_not_exists') as cs: self.mixin._create_gw_port( self.ctx, router_id, router_db, mock.ANY, mock.ANY) @@ -228,16 +226,15 @@ class L3DvrTestCase(testlib_api.SqlTestCase): 'fixed_port_id': _uuid(), 'floating_network_id': _uuid() } - with contextlib.nested( - mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, - '_get_floatingip'), - mock.patch.object(self.mixin, - '_get_vm_port_hostid'), - mock.patch.object(self.mixin, - '_check_fips_availability_on_host_ext_net'), - mock.patch.object(self.mixin, - '_delete_floatingip_agent_gateway_port') - ) as (gfips, gvm, cfips, dfips): + with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, + '_get_floatingip') as gfips,\ + mock.patch.object(self.mixin, '_get_vm_port_hostid') as gvm,\ + mock.patch.object( + self.mixin, + '_check_fips_availability_on_host_ext_net') as cfips,\ + mock.patch.object( + self.mixin, + '_delete_floatingip_agent_gateway_port') as dfips: gfips.return_value = floatingip gvm.return_value = 'my-host' cfips.return_value = True @@ -254,10 +251,9 @@ class L3DvrTestCase(testlib_api.SqlTestCase): 'network_id': 'ext_network_id', 'device_owner': l3_const.DEVICE_OWNER_AGENT_GW } - with contextlib.nested( - mock.patch.object(manager.NeutronManager, 'get_plugin'), - mock.patch.object(self.mixin, - '_get_vm_port_hostid')) as (gp, vm_host): + with mock.patch.object(manager.NeutronManager, 'get_plugin') as gp,\ + mock.patch.object(self.mixin, + '_get_vm_port_hostid') as vm_host: plugin = mock.Mock() gp.return_value = plugin plugin.get_ports.return_value = [port] @@ -271,23 +267,20 @@ class L3DvrTestCase(testlib_api.SqlTestCase): def _delete_floatingip_test_setup(self, floatingip): fip_id = floatingip['id'] - with contextlib.nested( - mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, - '_get_floatingip'), - mock.patch.object(self.mixin, - '_clear_unused_fip_agent_gw_port'), - mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, - 'delete_floatingip')) as (gf, vf, df): + with mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, + '_get_floatingip') as gf,\ + mock.patch.object(self.mixin, + '_clear_unused_fip_agent_gw_port') as vf,\ + mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, + 'delete_floatingip'): gf.return_value = floatingip self.mixin.delete_floatingip(self.ctx, fip_id) return vf def _disassociate_floatingip_setup(self, port_id=None, floatingip=None): - with contextlib.nested( - mock.patch.object(self.mixin, '_get_floatingip_on_port'), - mock.patch.object(self.mixin, - '_clear_unused_fip_agent_gw_port'), - ) as (gf, vf): + with mock.patch.object(self.mixin, '_get_floatingip_on_port') as gf,\ + mock.patch.object(self.mixin, + '_clear_unused_fip_agent_gw_port') as vf: gf.return_value = floatingip self.mixin.disassociate_floatingips( self.ctx, port_id, do_notify=False) @@ -386,14 +379,11 @@ class L3DvrTestCase(testlib_api.SqlTestCase): 'router_id': 'foo_router_id' } router = {'id': 'foo_router_id', 'distributed': True} - with contextlib.nested( - mock.patch.object(self.mixin, - 'get_router'), - mock.patch.object(self.mixin, - '_clear_unused_fip_agent_gw_port'), - mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, - '_update_fip_assoc'), - ) as (grtr, vf, cf): + with mock.patch.object(self.mixin, 'get_router') as grtr,\ + mock.patch.object(self.mixin, + '_clear_unused_fip_agent_gw_port') as vf,\ + mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, + '_update_fip_assoc'): grtr.return_value = router self.mixin._update_fip_assoc( self.ctx, fip, floatingip, mock.ANY) @@ -407,18 +397,15 @@ class L3DvrTestCase(testlib_api.SqlTestCase): 'network_id': 'external_net' } - with contextlib.nested( - mock.patch.object(self.mixin, - 'get_router'), - mock.patch.object(self.mixin, - '_get_vm_port_hostid'), - mock.patch.object(self.mixin, - '_clear_unused_fip_agent_gw_port'), - mock.patch.object(self.mixin, - 'create_fip_agent_gw_port_if_not_exists'), - mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, - '_update_fip_assoc'), - ) as (grtr, vmp, d_fip, c_fip, up_fip): + with mock.patch.object(self.mixin, 'get_router') as grtr,\ + mock.patch.object(self.mixin, '_get_vm_port_hostid') as vmp,\ + mock.patch.object(self.mixin, + '_clear_unused_fip_agent_gw_port') as d_fip,\ + mock.patch.object( + self.mixin, + 'create_fip_agent_gw_port_if_not_exists') as c_fip,\ + mock.patch.object(l3_dvr_db.l3_db.L3_NAT_db_mixin, + '_update_fip_assoc'): grtr.return_value = router_db vmp.return_value = 'my-host' self.mixin._update_fip_assoc( @@ -502,23 +489,19 @@ class L3DvrTestCase(testlib_api.SqlTestCase): return_value=False) plugin.remove_router_from_l3_agent = mock.Mock( return_value=None) - with contextlib.nested( - mock.patch.object(self.mixin, - '_get_router'), - mock.patch.object(self.mixin, - '_get_device_owner'), - mock.patch.object(self.mixin, - '_remove_interface_by_subnet'), - mock.patch.object(self.mixin, - 'delete_csnat_router_interface_ports'), - mock.patch.object(manager.NeutronManager, - 'get_service_plugins'), - mock.patch.object(self.mixin, - '_make_router_interface_info'), - mock.patch.object(self.mixin, - 'notify_router_interface_action'), - ) as (grtr, gdev, rmintf, delintf, gplugin, - mkintf, notify): + with mock.patch.object(self.mixin, '_get_router') as grtr,\ + mock.patch.object(self.mixin, '_get_device_owner') as gdev,\ + mock.patch.object(self.mixin, + '_remove_interface_by_subnet') as rmintf,\ + mock.patch.object( + self.mixin, + 'delete_csnat_router_interface_ports') as delintf,\ + mock.patch.object(manager.NeutronManager, + 'get_service_plugins') as gplugin,\ + mock.patch.object(self.mixin, + '_make_router_interface_info') as mkintf,\ + mock.patch.object(self.mixin, + 'notify_router_interface_action') as notify: grtr.return_value = router gdev.return_value = mock.Mock() rmintf.return_value = (mock.MagicMock(), mock.MagicMock()) diff --git a/neutron/tests/unit/db/test_securitygroups_db.py b/neutron/tests/unit/db/test_securitygroups_db.py index 7f87802acbd..0626f9ca65f 100644 --- a/neutron/tests/unit/db/test_securitygroups_db.py +++ b/neutron/tests/unit/db/test_securitygroups_db.py @@ -11,7 +11,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import mock import testtools @@ -45,11 +44,10 @@ class SecurityGroupDbMixinTestCase(testlib_api.SqlTestCase): self.mixin.create_security_group(self.ctx, secgroup) def test_delete_security_group_in_use(self): - with contextlib.nested( - mock.patch.object(self.mixin, '_get_port_security_group_bindings'), - mock.patch.object(self.mixin, '_get_security_group'), - mock.patch.object(registry, "notify"), - ) as (_, _, mock_notify): + with mock.patch.object(self.mixin, + '_get_port_security_group_bindings'),\ + mock.patch.object(self.mixin, '_get_security_group'),\ + mock.patch.object(registry, "notify") as mock_notify: mock_notify.side_effect = exceptions.CallbackFailure(Exception()) with testtools.ExpectedException( securitygroup.SecurityGroupInUse): diff --git a/neutron/tests/unit/extensions/test_external_net.py b/neutron/tests/unit/extensions/test_external_net.py index b2ccce818e3..0f68cae38f7 100644 --- a/neutron/tests/unit/extensions/test_external_net.py +++ b/neutron/tests/unit/extensions/test_external_net.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import itertools import mock @@ -90,8 +89,7 @@ class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def test_list_nets_external_pagination(self): if self._skip_native_pagination: self.skipTest("Skip test for not implemented pagination feature") - with contextlib.nested(self.network(name='net1'), - self.network(name='net3')) as (n1, n3): + with self.network(name='net1') as n1, self.network(name='net3') as n3: self._set_net_external(n1['network']['id']) self._set_net_external(n3['network']['id']) with self.network(name='net2') as n2: diff --git a/neutron/tests/unit/extensions/test_extraroute.py b/neutron/tests/unit/extensions/test_extraroute.py index 78e555f0b54..278567f4cff 100644 --- a/neutron/tests/unit/extensions/test_extraroute.py +++ b/neutron/tests/unit/extensions/test_extraroute.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib - from oslo_config import cfg from oslo_log import log as logging from webob import exc @@ -146,13 +144,10 @@ class ExtraRouteDBTestCaseBase(object): 'nexthop': '10.0.0.3'}] routes2 = [{'destination': '12.0.0.0/8', 'nexthop': '10.0.0.4'}] - with contextlib.nested( - self.router(), - self.router(), - self.subnet(cidr='10.0.0.0/24')) as (r1, r2, s): - with contextlib.nested( - self.port(subnet=s), - self.port(subnet=s)) as (p1, p2): + with self.router() as r1,\ + self.router() as r2,\ + self.subnet(cidr='10.0.0.0/24') as s: + with self.port(subnet=s) as p1, self.port(subnet=s) as p2: body = self._routes_update_prepare(r1['router']['id'], None, p1['port']['id'], routes1) @@ -427,27 +422,24 @@ class ExtraRouteDBTestCaseBase(object): self.assertIsNone(gw_info) def test_router_list_with_sort(self): - with contextlib.nested(self.router(name='router1'), - self.router(name='router2'), - self.router(name='router3') - ) as (router1, router2, router3): + with self.router(name='router1') as router1,\ + self.router(name='router2') as router2,\ + self.router(name='router3') as router3: self._test_list_with_sort('router', (router3, router2, router1), [('name', 'desc')]) def test_router_list_with_pagination(self): - with contextlib.nested(self.router(name='router1'), - self.router(name='router2'), - self.router(name='router3') - ) as (router1, router2, router3): + with self.router(name='router1') as router1,\ + self.router(name='router2') as router2,\ + self.router(name='router3') as router3: self._test_list_with_pagination('router', (router1, router2, router3), ('name', 'asc'), 2, 2) def test_router_list_with_pagination_reverse(self): - with contextlib.nested(self.router(name='router1'), - self.router(name='router2'), - self.router(name='router3') - ) as (router1, router2, router3): + with self.router(name='router1') as router1,\ + self.router(name='router2') as router2,\ + self.router(name='router3') as router3: self._test_list_with_pagination_reverse('router', (router1, router2, router3), diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py index 2e67d40f037..2392adc03bb 100644 --- a/neutron/tests/unit/extensions/test_l3.py +++ b/neutron/tests/unit/extensions/test_l3.py @@ -620,11 +620,10 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): def test_router_create_with_gwinfo_ext_ip_subnet(self): with self.network() as n: - with contextlib.nested( - self.subnet(network=n), - self.subnet(network=n, cidr='1.0.0.0/24'), - self.subnet(network=n, cidr='2.0.0.0/24'), - ) as subnets: + with self.subnet(network=n) as v1,\ + self.subnet(network=n, cidr='1.0.0.0/24') as v2,\ + self.subnet(network=n, cidr='2.0.0.0/24') as v3: + subnets = (v1, v2, v3) self._set_net_external(n['network']['id']) for s in subnets: ext_info = { @@ -658,16 +657,13 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): self.assertEqual(res.status_int, exc.HTTPForbidden.code) def test_router_list(self): - with contextlib.nested(self.router(), - self.router(), - self.router() - ) as routers: + with self.router() as v1, self.router() as v2, self.router() as v3: + routers = (v1, v2, v3) self._test_list_resources('router', routers) def test_router_list_with_parameters(self): - with contextlib.nested(self.router(name='router1'), - self.router(name='router2'), - ) as (router1, router2): + with self.router(name='router1') as router1,\ + self.router(name='router2') as router2: query_params = 'name=router1' self._test_list_resources('router', [router1], query_params=query_params) @@ -679,27 +675,24 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): query_params=query_params) def test_router_list_with_sort(self): - with contextlib.nested(self.router(name='router1'), - self.router(name='router2'), - self.router(name='router3') - ) as (router1, router2, router3): + with self.router(name='router1') as router1,\ + self.router(name='router2') as router2,\ + self.router(name='router3') as router3: self._test_list_with_sort('router', (router3, router2, router1), [('name', 'desc')]) def test_router_list_with_pagination(self): - with contextlib.nested(self.router(name='router1'), - self.router(name='router2'), - self.router(name='router3') - ) as (router1, router2, router3): + with self.router(name='router1') as router1,\ + self.router(name='router2') as router2,\ + self.router(name='router3') as router3: self._test_list_with_pagination('router', (router1, router2, router3), ('name', 'asc'), 2, 2) def test_router_list_with_pagination_reverse(self): - with contextlib.nested(self.router(name='router1'), - self.router(name='router2'), - self.router(name='router3') - ) as (router1, router2, router3): + with self.router(name='router1') as router1,\ + self.router(name='router2') as router2,\ + self.router(name='router3') as router3: self._test_list_with_pagination_reverse('router', (router1, router2, router3), @@ -767,11 +760,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): expected_code=exc.HTTPBadRequest.code) def test_router_update_gateway_with_invalid_external_subnet(self): - with contextlib.nested( - self.subnet(), - self.subnet(cidr='1.0.0.0/24'), - self.router() - ) as (s1, s2, r): + with self.subnet() as s1,\ + self.subnet(cidr='1.0.0.0/24') as s2,\ + self.router() as r: self._set_net_external(s1['subnet']['network_id']) self._add_external_gateway_to_router( r['router']['id'], @@ -782,11 +773,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): def test_router_update_gateway_with_different_external_subnet(self): with self.network() as n: - with contextlib.nested( - self.subnet(network=n), - self.subnet(network=n, cidr='1.0.0.0/24'), - self.router() - ) as (s1, s2, r): + with self.subnet(network=n) as s1,\ + self.subnet(network=n, cidr='1.0.0.0/24') as s2,\ + self.router() as r: self._set_net_external(n['network']['id']) res1 = self._add_external_gateway_to_router( r['router']['id'], @@ -998,7 +987,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): 'address_mode': stateless}] for uc in use_cases: fake_notifier.reset() - with contextlib.nested(self.router(), self.network()) as (r, n): + with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=6, ipv6_ra_mode=uc['ra_mode'], @@ -1106,7 +1095,7 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): 'ra_mode': None, 'address_mode': l3_constants.DHCPV6_STATELESS}] for uc in use_cases: - with contextlib.nested(self.router(), self.network()) as (r, n): + with self.router() as r, self.network() as n: with self.subnet(network=n, cidr='fd00::1/64', gateway_ip='fd00::1', ip_version=6, ipv6_ra_mode=uc['ra_mode'], @@ -1163,13 +1152,11 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): def test_router_add_interface_subnet_with_port_from_other_tenant(self): tenant_id = _uuid() other_tenant_id = _uuid() - with contextlib.nested( - self.router(tenant_id=tenant_id), - self.network(tenant_id=tenant_id), - self.network(tenant_id=other_tenant_id)) as (r, n1, n2): - with contextlib.nested( - self.subnet(network=n1, cidr='10.0.0.0/24'), - self.subnet(network=n2, cidr='10.1.0.0/24')) as (s1, s2): + with self.router(tenant_id=tenant_id) as r,\ + self.network(tenant_id=tenant_id) as n1,\ + self.network(tenant_id=other_tenant_id) as n2: + with self.subnet(network=n1, cidr='10.0.0.0/24') as s1,\ + self.subnet(network=n2, cidr='10.1.0.0/24') as s2: body = self._router_interface_action( 'add', r['router']['id'], @@ -1594,10 +1581,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): expected_code=exc.HTTPConflict.code) def test_router_remove_interface_callback_failure_returns_409(self): - with contextlib.nested( - self.router(), - self.subnet(), - mock.patch.object(registry, 'notify')) as (r, s, notify): + with self.router() as r,\ + self.subnet() as s,\ + mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( 'foo_callback_id', n_exc.InUse()), @@ -1619,10 +1605,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): exc.HTTPConflict.code) def test_router_clear_gateway_callback_failure_returns_409(self): - with contextlib.nested( - self.router(), - self.subnet(), - mock.patch.object(registry, 'notify')) as (r, s, notify): + with self.router() as r,\ + self.subnet() as s,\ + mock.patch.object(registry, 'notify') as notify: errors = [ exceptions.NotificationError( 'foo_callback_id', n_exc.InUse()), @@ -1980,11 +1965,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): def test_floatingip_update_different_router(self): # Create subnet with different CIDRs to account for plugins which # do not support overlapping IPs - with contextlib.nested(self.subnet(cidr='10.0.0.0/24'), - self.subnet(cidr='10.0.1.0/24')) as ( - s1, s2): - with contextlib.nested(self.port(subnet=s1), - self.port(subnet=s2)) as (p1, p2): + with self.subnet(cidr='10.0.0.0/24') as s1,\ + self.subnet(cidr='10.0.1.0/24') as s2: + with self.port(subnet=s1) as p1, self.port(subnet=s2) as p2: private_sub1 = {'subnet': {'id': p1['port']['fixed_ips'][0]['subnet_id']}} @@ -1992,12 +1975,12 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): {'id': p2['port']['fixed_ips'][0]['subnet_id']}} with self.subnet(cidr='12.0.0.0/24') as public_sub: - with contextlib.nested( + with self.floatingip_no_assoc_with_public_sub( + private_sub1, + public_sub=public_sub) as (fip1, r1),\ self.floatingip_no_assoc_with_public_sub( - private_sub1, public_sub=public_sub), - self.floatingip_no_assoc_with_public_sub( - private_sub2, public_sub=public_sub)) as ( - (fip1, r1), (fip2, r2)): + private_sub2, + public_sub=public_sub) as (fip2, r2): def assert_no_assoc(fip): body = self._show('floatingips', @@ -2160,10 +2143,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): self.assertEqual(res.status_int, 400) def test_floatingip_list_with_sort(self): - with contextlib.nested(self.subnet(cidr="10.0.0.0/24"), - self.subnet(cidr="11.0.0.0/24"), - self.subnet(cidr="12.0.0.0/24") - ) as (s1, s2, s3): + with self.subnet(cidr="10.0.0.0/24") as s1,\ + self.subnet(cidr="11.0.0.0/24") as s2,\ + self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] @@ -2186,10 +2168,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): self.assertEqual(len(res['floatingips']), 0) def test_floatingip_list_with_pagination(self): - with contextlib.nested(self.subnet(cidr="10.0.0.0/24"), - self.subnet(cidr="11.0.0.0/24"), - self.subnet(cidr="12.0.0.0/24") - ) as (s1, s2, s3): + with self.subnet(cidr="10.0.0.0/24") as s1,\ + self.subnet(cidr="11.0.0.0/24") as s2,\ + self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] @@ -2204,10 +2185,9 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): ('floating_ip_address', 'asc'), 2, 2) def test_floatingip_list_with_pagination_reverse(self): - with contextlib.nested(self.subnet(cidr="10.0.0.0/24"), - self.subnet(cidr="11.0.0.0/24"), - self.subnet(cidr="12.0.0.0/24") - ) as (s1, s2, s3): + with self.subnet(cidr="10.0.0.0/24") as s1,\ + self.subnet(cidr="11.0.0.0/24") as s2,\ + self.subnet(cidr="12.0.0.0/24") as s3: network_id1 = s1['subnet']['network_id'] network_id2 = s2['subnet']['network_id'] network_id3 = s3['subnet']['network_id'] @@ -2222,21 +2202,19 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): ('floating_ip_address', 'asc'), 2, 2) def test_floatingip_multi_external_one_internal(self): - with contextlib.nested(self.subnet(cidr="10.0.0.0/24"), - self.subnet(cidr="11.0.0.0/24"), - self.subnet(cidr="12.0.0.0/24") - ) as (exs1, exs2, ins1): + with self.subnet(cidr="10.0.0.0/24") as exs1,\ + self.subnet(cidr="11.0.0.0/24") as exs2,\ + self.subnet(cidr="12.0.0.0/24") as ins1: network_ex_id1 = exs1['subnet']['network_id'] network_ex_id2 = exs2['subnet']['network_id'] self._set_net_external(network_ex_id1) self._set_net_external(network_ex_id2) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] - with contextlib.nested(self.router(no_delete=True), - self.router(no_delete=True), - self.port(subnet=ins1, - fixed_ips=r2i_fixed_ips) - ) as (r1, r2, r2i_port): + with self.router(no_delete=True) as r1,\ + self.router(no_delete=True) as r2,\ + self.port(subnet=ins1, + fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( r1['router']['id'], network_ex_id1) @@ -2589,9 +2567,7 @@ class L3NatDBIntAgentSchedulingTestCase(L3BaseForIntTests, self.assertEqual(agents[0]['host'], agent_host) def test_update_gateway_agent_exists_supporting_network(self): - with contextlib.nested(self.router(), - self.subnet(), - self.subnet()) as (r, s1, s2): + with self.router() as r, self.subnet() as s1, self.subnet() as s2: self._set_net_external(s1['subnet']['network_id']) l3_rpc_cb = l3_rpc.L3RpcCallback() helpers.register_l3_agent( @@ -2616,9 +2592,7 @@ class L3NatDBIntAgentSchedulingTestCase(L3BaseForIntTests, self._assert_router_on_agent(r['router']['id'], 'host2') def test_update_gateway_agent_exists_supporting_multiple_network(self): - with contextlib.nested(self.router(), - self.subnet(), - self.subnet()) as (r, s1, s2): + with self.router() as r, self.subnet() as s1, self.subnet() as s2: self._set_net_external(s1['subnet']['network_id']) l3_rpc_cb = l3_rpc.L3RpcCallback() helpers.register_l3_agent( diff --git a/neutron/tests/unit/extensions/test_securitygroup.py b/neutron/tests/unit/extensions/test_securitygroup.py index e21813b354e..7aff2321d32 100644 --- a/neutron/tests/unit/extensions/test_securitygroup.py +++ b/neutron/tests/unit/extensions/test_securitygroup.py @@ -373,51 +373,36 @@ class TestSecurityGroups(SecurityGroupDBTestCase): self.assertEqual(res.status_int, webob.exc.HTTPConflict.code) def test_list_security_groups(self): - with contextlib.nested(self.security_group(name='sg1', - description='sg'), - self.security_group(name='sg2', - description='sg'), - self.security_group(name='sg3', - description='sg') - ) as security_groups: + with self.security_group(name='sg1', description='sg') as v1,\ + self.security_group(name='sg2', description='sg') as v2,\ + self.security_group(name='sg3', description='sg') as v3: + security_groups = (v1, v2, v3) self._test_list_resources('security-group', security_groups, query_params='description=sg') def test_list_security_groups_with_sort(self): - with contextlib.nested(self.security_group(name='sg1', - description='sg'), - self.security_group(name='sg2', - description='sg'), - self.security_group(name='sg3', - description='sg') - ) as (sg1, sg2, sg3): + with self.security_group(name='sg1', description='sg') as sg1,\ + self.security_group(name='sg2', description='sg') as sg2,\ + self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_sort('security-group', (sg3, sg2, sg1), [('name', 'desc')], query_params='description=sg') def test_list_security_groups_with_pagination(self): - with contextlib.nested(self.security_group(name='sg1', - description='sg'), - self.security_group(name='sg2', - description='sg'), - self.security_group(name='sg3', - description='sg') - ) as (sg1, sg2, sg3): + with self.security_group(name='sg1', description='sg') as sg1,\ + self.security_group(name='sg2', description='sg') as sg2,\ + self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_pagination('security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2, query_params='description=sg') def test_list_security_groups_with_pagination_reverse(self): - with contextlib.nested(self.security_group(name='sg1', - description='sg'), - self.security_group(name='sg2', - description='sg'), - self.security_group(name='sg3', - description='sg') - ) as (sg1, sg2, sg3): + with self.security_group(name='sg1', description='sg') as sg1,\ + self.security_group(name='sg2', description='sg') as sg2,\ + self.security_group(name='sg3', description='sg') as sg3: self._test_list_with_pagination_reverse( 'security-group', (sg1, sg2, sg3), ('name', 'asc'), 2, 2, query_params='description=sg') @@ -1069,19 +1054,18 @@ class TestSecurityGroups(SecurityGroupDBTestCase): def test_list_security_group_rules(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] - with contextlib.nested(self.security_group_rule(security_group_id, - direction='egress', - port_range_min=22, - port_range_max=22), - self.security_group_rule(security_group_id, - direction='egress', - port_range_min=23, - port_range_max=23), - self.security_group_rule(security_group_id, - direction='egress', - port_range_min=24, - port_range_max=24) - ) as (sgr1, sgr2, sgr3): + with self.security_group_rule(security_group_id, + direction='egress', + port_range_min=22, + port_range_max=22) as sgr1,\ + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=23, + port_range_max=23) as sgr2,\ + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=24, + port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. @@ -1096,19 +1080,18 @@ class TestSecurityGroups(SecurityGroupDBTestCase): def test_list_security_group_rules_with_sort(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] - with contextlib.nested(self.security_group_rule(security_group_id, - direction='egress', - port_range_min=22, - port_range_max=22), - self.security_group_rule(security_group_id, - direction='egress', - port_range_min=23, - port_range_max=23), - self.security_group_rule(security_group_id, - direction='egress', - port_range_min=24, - port_range_max=24) - ) as (sgr1, sgr2, sgr3): + with self.security_group_rule(security_group_id, + direction='egress', + port_range_min=22, + port_range_max=22) as sgr1,\ + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=23, + port_range_max=23) as sgr2,\ + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=24, + port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. @@ -1124,19 +1107,18 @@ class TestSecurityGroups(SecurityGroupDBTestCase): def test_list_security_group_rules_with_pagination(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] - with contextlib.nested(self.security_group_rule(security_group_id, - direction='egress', - port_range_min=22, - port_range_max=22), - self.security_group_rule(security_group_id, - direction='egress', - port_range_min=23, - port_range_max=23), - self.security_group_rule(security_group_id, - direction='egress', - port_range_min=24, - port_range_max=24) - ) as (sgr1, sgr2, sgr3): + with self.security_group_rule(security_group_id, + direction='egress', + port_range_min=22, + port_range_max=22) as sgr1,\ + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=23, + port_range_max=23) as sgr2,\ + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=24, + port_range_max=24) as sgr3: # Delete default rules as they would fail the following # assertion at the end. @@ -1152,19 +1134,18 @@ class TestSecurityGroups(SecurityGroupDBTestCase): def test_list_security_group_rules_with_pagination_reverse(self): with self.security_group(name='sg') as sg: security_group_id = sg['security_group']['id'] - with contextlib.nested(self.security_group_rule(security_group_id, - direction='egress', - port_range_min=22, - port_range_max=22), - self.security_group_rule(security_group_id, - direction='egress', - port_range_min=23, - port_range_max=23), - self.security_group_rule(security_group_id, - direction='egress', - port_range_min=24, - port_range_max=24) - ) as (sgr1, sgr2, sgr3): + with self.security_group_rule(security_group_id, + direction='egress', + port_range_min=22, + port_range_max=22) as sgr1,\ + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=23, + port_range_max=23) as sgr2,\ + self.security_group_rule(security_group_id, + direction='egress', + port_range_min=24, + port_range_max=24) as sgr3: self._test_list_with_pagination_reverse( 'security-group-rule', (sgr3, sgr2, sgr1), ('port_range_max', 'desc'), 2, 2, diff --git a/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py b/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py index a1e017fd460..a170704c826 100644 --- a/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py +++ b/neutron/tests/unit/plugins/ibm/test_sdnve_agent.py @@ -14,8 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib - import mock from oslo_config import cfg @@ -67,13 +65,12 @@ class TestSdnveNeutronAgent(base.BaseTestCase): def start(self, interval=0): self.f() - with contextlib.nested( - mock.patch('neutron.plugins.ibm.agent.sdnve_neutron_agent.' - 'SdnveNeutronAgent.setup_integration_br', - return_value=mock.Mock()), - mock.patch('neutron.openstack.common.loopingcall.' - 'FixedIntervalLoopingCall', - new=MockFixedIntervalLoopingCall)): + with mock.patch('neutron.plugins.ibm.agent.sdnve_neutron_agent.' + 'SdnveNeutronAgent.setup_integration_br', + return_value=mock.Mock()),\ + mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall', + new=MockFixedIntervalLoopingCall): self.agent = sdnve_neutron_agent.SdnveNeutronAgent(**kwargs) def test_setup_physical_interfaces(self): diff --git a/neutron/tests/unit/plugins/ibm/test_sdnve_plugin.py b/neutron/tests/unit/plugins/ibm/test_sdnve_plugin.py index 525468f157e..ff79eafffbe 100644 --- a/neutron/tests/unit/plugins/ibm/test_sdnve_plugin.py +++ b/neutron/tests/unit/plugins/ibm/test_sdnve_plugin.py @@ -15,7 +15,6 @@ # under the License. -import contextlib import mock from neutron.extensions import portbindings @@ -71,13 +70,10 @@ class MockKeystoneClient(object): class IBMPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase): def setUp(self): - with contextlib.nested( - mock.patch('neutron.plugins.ibm.sdnve_api.' - 'KeystoneClient', - new=MockKeystoneClient), - mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client', - new=MockClient)): + with mock.patch('neutron.plugins.ibm.sdnve_api.' 'KeystoneClient', + new=MockKeystoneClient),\ + mock.patch('neutron.plugins.ibm.sdnve_api.' 'Client', + new=MockClient): super(IBMPluginV2TestCase, self).setUp(plugin=_plugin_name) @@ -114,13 +110,10 @@ class TestIBMPortBinding(IBMPluginV2TestCase, class IBMPluginRouterTestCase(test_l3.L3NatDBIntTestCase): def setUp(self): - with contextlib.nested( - mock.patch('neutron.plugins.ibm.sdnve_api.' - 'KeystoneClient', - new=MockKeystoneClient), - mock.patch('neutron.plugins.ibm.sdnve_api.' - 'Client', - new=MockClient)): + with mock.patch('neutron.plugins.ibm.sdnve_api.' 'KeystoneClient', + new=MockKeystoneClient),\ + mock.patch('neutron.plugins.ibm.sdnve_api.' 'Client', + new=MockClient): super(IBMPluginRouterTestCase, self).setUp(plugin=_plugin_name) def test_floating_port_status_not_applicable(self): diff --git a/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py b/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py index 192637f28e1..11e923fd3c0 100644 --- a/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py +++ b/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import os import mock @@ -106,10 +105,10 @@ class TestLinuxBridgeAgent(base.BaseTestCase): def test_treat_devices_removed_with_existed_device(self): agent = self.agent devices = [DEVICE_1] - with contextlib.nested( - mock.patch.object(agent.plugin_rpc, "update_device_down"), - mock.patch.object(agent.sg_agent, "remove_devices_filter") - ) as (fn_udd, fn_rdf): + with mock.patch.object(agent.plugin_rpc, + "update_device_down") as fn_udd,\ + mock.patch.object(agent.sg_agent, + "remove_devices_filter") as fn_rdf: fn_udd.return_value = {'device': DEVICE_1, 'exists': True} with mock.patch.object(linuxbridge_neutron_agent.LOG, @@ -123,10 +122,10 @@ class TestLinuxBridgeAgent(base.BaseTestCase): def test_treat_devices_removed_with_not_existed_device(self): agent = self.agent devices = [DEVICE_1] - with contextlib.nested( - mock.patch.object(agent.plugin_rpc, "update_device_down"), - mock.patch.object(agent.sg_agent, "remove_devices_filter") - ) as (fn_udd, fn_rdf): + with mock.patch.object(agent.plugin_rpc, + "update_device_down") as fn_udd,\ + mock.patch.object(agent.sg_agent, + "remove_devices_filter") as fn_rdf: fn_udd.return_value = {'device': DEVICE_1, 'exists': False} with mock.patch.object(linuxbridge_neutron_agent.LOG, @@ -140,10 +139,10 @@ class TestLinuxBridgeAgent(base.BaseTestCase): def test_treat_devices_removed_failed(self): agent = self.agent devices = [DEVICE_1] - with contextlib.nested( - mock.patch.object(agent.plugin_rpc, "update_device_down"), - mock.patch.object(agent.sg_agent, "remove_devices_filter") - ) as (fn_udd, fn_rdf): + with mock.patch.object(agent.plugin_rpc, + "update_device_down") as fn_udd,\ + mock.patch.object(agent.sg_agent, + "remove_devices_filter") as fn_rdf: fn_udd.side_effect = Exception() with mock.patch.object(linuxbridge_neutron_agent.LOG, 'debug') as log: @@ -387,11 +386,9 @@ class TestLinuxBridgeManager(base.BaseTestCase): self.assertTrue(listdir_fn.called) def test_get_interfaces_on_bridge(self): - with contextlib.nested( - mock.patch.object(utils, 'execute'), - mock.patch.object(os, 'listdir'), - mock.patch.object(ip_lib, 'device_exists', return_value=True) - ) as (exec_fn, listdir_fn, dev_exists_fn): + with mock.patch.object(utils, 'execute'),\ + mock.patch.object(os, 'listdir') as listdir_fn,\ + mock.patch.object(ip_lib, 'device_exists', return_value=True): listdir_fn.return_value = ["qbr1"] self.assertEqual(self.lbm.get_interfaces_on_bridge("br0"), ["qbr1"]) @@ -408,10 +405,8 @@ class TestLinuxBridgeManager(base.BaseTestCase): self.assertEqual(self.lbm.get_tap_devices_count('br0'), 0) def test_get_interface_by_ip(self): - with contextlib.nested( - mock.patch.object(ip_lib.IPWrapper, 'get_devices'), - mock.patch.object(ip_lib.IpAddrCommand, 'list') - ) as (get_dev_fn, ip_list_fn): + with mock.patch.object(ip_lib.IPWrapper, 'get_devices') as get_dev_fn,\ + mock.patch.object(ip_lib.IpAddrCommand, 'list') as ip_list_fn: device = mock.Mock() device.name = 'dev_name' get_dev_fn.return_value = [device] @@ -420,10 +415,10 @@ class TestLinuxBridgeManager(base.BaseTestCase): 'dev_name') def test_get_bridge_for_tap_device(self): - with contextlib.nested( - mock.patch.object(self.lbm, "get_all_neutron_bridges"), - mock.patch.object(self.lbm, "get_interfaces_on_bridge") - ) as (get_all_qbr_fn, get_if_fn): + with mock.patch.object(self.lbm, + "get_all_neutron_bridges") as get_all_qbr_fn,\ + mock.patch.object(self.lbm, + "get_interfaces_on_bridge") as get_if_fn: get_all_qbr_fn.return_value = ["br-int", "br-ex"] get_if_fn.return_value = ["tap1", "tap2", "tap3"] self.assertEqual(self.lbm.get_bridge_for_tap_device("tap1"), @@ -440,10 +435,9 @@ class TestLinuxBridgeManager(base.BaseTestCase): ) def test_get_interface_details(self): - with contextlib.nested( - mock.patch.object(ip_lib.IpAddrCommand, 'list'), - mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') - ) as (list_fn, getgw_fn): + with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\ + mock.patch.object(ip_lib.IpRouteCommand, + 'get_gateway') as getgw_fn: gwdict = dict(gateway='1.1.1.1') getgw_fn.return_value = gwdict ipdict = dict(cidr='1.1.1.1/24', @@ -459,10 +453,9 @@ class TestLinuxBridgeManager(base.BaseTestCase): self.assertEqual(ret, (ipdict, gwdict)) def test_ensure_flat_bridge(self): - with contextlib.nested( - mock.patch.object(ip_lib.IpAddrCommand, 'list'), - mock.patch.object(ip_lib.IpRouteCommand, 'get_gateway') - ) as (list_fn, getgw_fn): + with mock.patch.object(ip_lib.IpAddrCommand, 'list') as list_fn,\ + mock.patch.object(ip_lib.IpRouteCommand, + 'get_gateway') as getgw_fn: gwdict = dict(gateway='1.1.1.1') getgw_fn.return_value = gwdict ipdict = dict(cidr='1.1.1.1/24', @@ -482,11 +475,10 @@ class TestLinuxBridgeManager(base.BaseTestCase): ipdict, gwdict) def test_ensure_vlan_bridge(self): - with contextlib.nested( - mock.patch.object(self.lbm, 'ensure_vlan'), - mock.patch.object(self.lbm, 'ensure_bridge'), - mock.patch.object(self.lbm, 'get_interface_details'), - ) as (ens_vl_fn, ens, get_int_det_fn): + with mock.patch.object(self.lbm, 'ensure_vlan') as ens_vl_fn,\ + mock.patch.object(self.lbm, 'ensure_bridge') as ens,\ + mock.patch.object(self.lbm, + 'get_interface_details') as get_int_det_fn: ens_vl_fn.return_value = "eth0.1" get_int_det_fn.return_value = (None, None) self.assertEqual(self.lbm.ensure_vlan_bridge("123", "eth0", "1"), @@ -549,19 +541,17 @@ class TestLinuxBridgeManager(base.BaseTestCase): scope='global', ip_version=4, dynamic=False) - with contextlib.nested( - mock.patch.object(ip_lib.IpAddrCommand, 'add'), - mock.patch.object(ip_lib.IpAddrCommand, 'delete') - ) as (add_fn, del_fn): + with mock.patch.object(ip_lib.IpAddrCommand, 'add') as add_fn,\ + mock.patch.object(ip_lib.IpAddrCommand, 'delete') as del_fn: self.lbm.update_interface_ip_details("br0", "eth0", [ipdict], None) self.assertTrue(add_fn.called) self.assertTrue(del_fn.called) - with contextlib.nested( - mock.patch.object(ip_lib.IpRouteCommand, 'add_gateway'), - mock.patch.object(ip_lib.IpRouteCommand, 'delete_gateway') - ) as (addgw_fn, delgw_fn): + with mock.patch.object(ip_lib.IpRouteCommand, + 'add_gateway') as addgw_fn,\ + mock.patch.object(ip_lib.IpRouteCommand, + 'delete_gateway') as delgw_fn: self.lbm.update_interface_ip_details("br0", "eth0", None, gwdict) self.assertTrue(addgw_fn.called) @@ -578,14 +568,16 @@ class TestLinuxBridgeManager(base.BaseTestCase): self.assertFalse(self.lbm._bridge_exists_and_ensure_up("br0")) def test_ensure_bridge(self): - with contextlib.nested( - mock.patch.object(self.lbm, '_bridge_exists_and_ensure_up'), - mock.patch.object(utils, 'execute'), - mock.patch.object(self.lbm, 'update_interface_ip_details'), - mock.patch.object(self.lbm, 'interface_exists_on_bridge'), - mock.patch.object(self.lbm, 'is_device_on_bridge'), - mock.patch.object(self.lbm, 'get_bridge_for_tap_device'), - ) as (de_fn, exec_fn, upd_fn, ie_fn, if_br_fn, get_if_br_fn): + with mock.patch.object(self.lbm, + '_bridge_exists_and_ensure_up') as de_fn,\ + mock.patch.object(utils, 'execute') as exec_fn,\ + mock.patch.object(self.lbm, + 'update_interface_ip_details') as upd_fn,\ + mock.patch.object(self.lbm, + 'interface_exists_on_bridge') as ie_fn,\ + mock.patch.object(self.lbm, 'is_device_on_bridge'),\ + mock.patch.object(self.lbm, + 'get_bridge_for_tap_device') as get_if_br_fn: de_fn.return_value = False exec_fn.return_value = False self.assertEqual(self.lbm.ensure_bridge("br0", None), "br0") @@ -652,11 +644,10 @@ class TestLinuxBridgeManager(base.BaseTestCase): ) de_fn.return_value = True - with contextlib.nested( - mock.patch.object(self.lbm, "ensure_local_bridge"), - mock.patch.object(utils, "execute"), - mock.patch.object(self.lbm, "get_bridge_for_tap_device") - ) as (en_fn, exec_fn, get_br): + with mock.patch.object(self.lbm, "ensure_local_bridge") as en_fn,\ + mock.patch.object(utils, "execute") as exec_fn,\ + mock.patch.object(self.lbm, + "get_bridge_for_tap_device") as get_br: exec_fn.return_value = False get_br.return_value = True self.assertTrue(self.lbm.add_tap_interface("123", @@ -672,11 +663,12 @@ class TestLinuxBridgeManager(base.BaseTestCase): "physnet1", None, "tap1")) - with contextlib.nested( - mock.patch.object(self.lbm, "ensure_physical_in_bridge"), - mock.patch.object(self.lbm, "ensure_tap_mtu"), - mock.patch.object(self.lbm, "get_bridge_for_tap_device") - ) as (ens_fn, en_mtu_fn, get_br): + with mock.patch.object(self.lbm, + "ensure_physical_in_bridge") as ens_fn,\ + mock.patch.object(self.lbm, + "ensure_tap_mtu") as en_mtu_fn,\ + mock.patch.object(self.lbm, + "get_bridge_for_tap_device") as get_br: ens_fn.return_value = False self.assertFalse(self.lbm.add_tap_interface("123", p_const.TYPE_VLAN, @@ -697,16 +689,16 @@ class TestLinuxBridgeManager(base.BaseTestCase): "1", "tap234") def test_delete_vlan_bridge(self): - with contextlib.nested( - mock.patch.object(ip_lib, "device_exists"), - mock.patch.object(self.lbm, "get_interfaces_on_bridge"), - mock.patch.object(self.lbm, "remove_interface"), - mock.patch.object(self.lbm, "get_interface_details"), - mock.patch.object(self.lbm, "update_interface_ip_details"), - mock.patch.object(self.lbm, "delete_vxlan"), - mock.patch.object(utils, "execute") - ) as (de_fn, getif_fn, remif_fn, if_det_fn, - updif_fn, del_vxlan, exec_fn): + with mock.patch.object(ip_lib, "device_exists") as de_fn,\ + mock.patch.object(self.lbm, + "get_interfaces_on_bridge") as getif_fn,\ + mock.patch.object(self.lbm, "remove_interface"),\ + mock.patch.object(self.lbm, + "get_interface_details") as if_det_fn,\ + mock.patch.object(self.lbm, + "update_interface_ip_details") as updif_fn,\ + mock.patch.object(self.lbm, "delete_vxlan") as del_vxlan,\ + mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = False self.lbm.delete_vlan_bridge("br0") self.assertFalse(getif_fn.called) @@ -720,16 +712,16 @@ class TestLinuxBridgeManager(base.BaseTestCase): del_vxlan.assert_called_with("vxlan-1002") def test_delete_vlan_bridge_with_ip(self): - with contextlib.nested( - mock.patch.object(ip_lib, "device_exists"), - mock.patch.object(self.lbm, "get_interfaces_on_bridge"), - mock.patch.object(self.lbm, "remove_interface"), - mock.patch.object(self.lbm, "get_interface_details"), - mock.patch.object(self.lbm, "update_interface_ip_details"), - mock.patch.object(self.lbm, "delete_vlan"), - mock.patch.object(utils, "execute") - ) as (de_fn, getif_fn, remif_fn, if_det_fn, - updif_fn, del_vlan, exec_fn): + with mock.patch.object(ip_lib, "device_exists") as de_fn,\ + mock.patch.object(self.lbm, + "get_interfaces_on_bridge") as getif_fn,\ + mock.patch.object(self.lbm, "remove_interface"),\ + mock.patch.object(self.lbm, + "get_interface_details") as if_det_fn,\ + mock.patch.object(self.lbm, + "update_interface_ip_details") as updif_fn,\ + mock.patch.object(self.lbm, "delete_vlan") as del_vlan,\ + mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = True getif_fn.return_value = ["eth0", "eth1.1"] if_det_fn.return_value = ("ips", "gateway") @@ -739,16 +731,16 @@ class TestLinuxBridgeManager(base.BaseTestCase): self.assertFalse(del_vlan.called) def test_delete_vlan_bridge_no_ip(self): - with contextlib.nested( - mock.patch.object(ip_lib, "device_exists"), - mock.patch.object(self.lbm, "get_interfaces_on_bridge"), - mock.patch.object(self.lbm, "remove_interface"), - mock.patch.object(self.lbm, "get_interface_details"), - mock.patch.object(self.lbm, "update_interface_ip_details"), - mock.patch.object(self.lbm, "delete_vlan"), - mock.patch.object(utils, "execute") - ) as (de_fn, getif_fn, remif_fn, if_det_fn, - updif_fn, del_vlan, exec_fn): + with mock.patch.object(ip_lib, "device_exists") as de_fn,\ + mock.patch.object(self.lbm, + "get_interfaces_on_bridge") as getif_fn,\ + mock.patch.object(self.lbm, "remove_interface"),\ + mock.patch.object(self.lbm, + "get_interface_details") as if_det_fn,\ + mock.patch.object(self.lbm, + "update_interface_ip_details") as updif_fn,\ + mock.patch.object(self.lbm, "delete_vlan") as del_vlan,\ + mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = True getif_fn.return_value = ["eth0", "eth1.1"] exec_fn.return_value = False @@ -764,13 +756,12 @@ class TestLinuxBridgeManager(base.BaseTestCase): lbm = linuxbridge_neutron_agent.LinuxBridgeManager( interface_mappings) - with contextlib.nested( - mock.patch.object(ip_lib, "device_exists"), - mock.patch.object(lbm, "get_interfaces_on_bridge"), - mock.patch.object(lbm, "remove_interface"), - mock.patch.object(lbm, "delete_vxlan"), - mock.patch.object(utils, "execute") - ) as (de_fn, getif_fn, remif_fn, del_vxlan, exec_fn): + with mock.patch.object(ip_lib, "device_exists") as de_fn,\ + mock.patch.object(lbm, + "get_interfaces_on_bridge") as getif_fn,\ + mock.patch.object(lbm, "remove_interface"),\ + mock.patch.object(lbm, "delete_vxlan") as del_vxlan,\ + mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = False lbm.delete_vlan_bridge("br0") self.assertFalse(getif_fn.called) @@ -787,20 +778,18 @@ class TestLinuxBridgeManager(base.BaseTestCase): def tap_count_side_effect(*args): return 0 if args[0] == 'brqnet1' else 1 - with contextlib.nested( - mock.patch.object(self.lbm, "delete_vlan_bridge"), - mock.patch.object(self.lbm, "get_tap_devices_count", - side_effect=tap_count_side_effect), - ) as (del_br_fn, count_tap_fn): + with mock.patch.object(self.lbm, "delete_vlan_bridge") as del_br_fn,\ + mock.patch.object(self.lbm, + "get_tap_devices_count", + side_effect=tap_count_side_effect): self.lbm.remove_empty_bridges() del_br_fn.assert_called_once_with('brqnet1') def test_remove_interface(self): - with contextlib.nested( - mock.patch.object(ip_lib, "device_exists"), - mock.patch.object(self.lbm, "is_device_on_bridge"), - mock.patch.object(utils, "execute") - ) as (de_fn, isdev_fn, exec_fn): + with mock.patch.object(ip_lib, "device_exists") as de_fn,\ + mock.patch.object(self.lbm, + "is_device_on_bridge") as isdev_fn,\ + mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = False self.assertFalse(self.lbm.remove_interface("br0", "eth0")) self.assertFalse(isdev_fn.called) @@ -817,10 +806,8 @@ class TestLinuxBridgeManager(base.BaseTestCase): self.assertTrue(self.lbm.remove_interface("br0", "eth0")) def test_delete_vlan(self): - with contextlib.nested( - mock.patch.object(ip_lib, "device_exists"), - mock.patch.object(utils, "execute") - ) as (de_fn, exec_fn): + with mock.patch.object(ip_lib, "device_exists") as de_fn,\ + mock.patch.object(utils, "execute") as exec_fn: de_fn.return_value = False self.lbm.delete_vlan("eth1.1") self.assertFalse(exec_fn.called) @@ -832,11 +819,12 @@ class TestLinuxBridgeManager(base.BaseTestCase): def _check_vxlan_support(self, expected, vxlan_ucast_supported, vxlan_mcast_supported): - with contextlib.nested( - mock.patch.object(self.lbm, 'vxlan_ucast_supported', - return_value=vxlan_ucast_supported), - mock.patch.object(self.lbm, 'vxlan_mcast_supported', - return_value=vxlan_mcast_supported)): + with mock.patch.object(self.lbm, + 'vxlan_ucast_supported', + return_value=vxlan_ucast_supported),\ + mock.patch.object(self.lbm, + 'vxlan_mcast_supported', + return_value=vxlan_mcast_supported): if expected == lconst.VXLAN_NONE: self.assertRaises(exceptions.VxlanNetworkUnsupported, self.lbm.check_vxlan_support) @@ -863,17 +851,20 @@ class TestLinuxBridgeManager(base.BaseTestCase): def _check_vxlan_ucast_supported( self, expected, l2_population, iproute_arg_supported, fdb_append): cfg.CONF.set_override('l2_population', l2_population, 'VXLAN') - with contextlib.nested( + with mock.patch.object(ip_lib, 'device_exists', return_value=False),\ + mock.patch.object(self.lbm, + 'delete_vxlan', + return_value=None),\ + mock.patch.object(self.lbm, + 'ensure_vxlan', + return_value=None),\ mock.patch.object( - ip_lib, 'device_exists', return_value=False), - mock.patch.object(self.lbm, 'delete_vxlan', return_value=None), - mock.patch.object(self.lbm, 'ensure_vxlan', return_value=None), - mock.patch.object( - utils, 'execute', - side_effect=None if fdb_append else RuntimeError()), - mock.patch.object( - ip_lib, 'iproute_arg_supported', - return_value=iproute_arg_supported)): + utils, + 'execute', + side_effect=None if fdb_append else RuntimeError()),\ + mock.patch.object(ip_lib, + 'iproute_arg_supported', + return_value=iproute_arg_supported): self.assertEqual(expected, self.lbm.vxlan_ucast_supported()) def test_vxlan_ucast_supported(self): @@ -940,10 +931,10 @@ class TestLinuxBridgeRpcCallbacks(base.BaseTestCase): ) def test_network_delete(self): - with contextlib.nested( - mock.patch.object(self.lb_rpc.agent.br_mgr, "get_bridge_name"), - mock.patch.object(self.lb_rpc.agent.br_mgr, "delete_vlan_bridge") - ) as (get_br_fn, del_fn): + with mock.patch.object(self.lb_rpc.agent.br_mgr, + "get_bridge_name") as get_br_fn,\ + mock.patch.object(self.lb_rpc.agent.br_mgr, + "delete_vlan_bridge") as del_fn: get_br_fn.return_value = "br0" self.lb_rpc.network_delete("anycontext", network_id="123") get_br_fn.assert_called_with("123") diff --git a/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py b/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py index cd5469fd001..0cc93f02e94 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py +++ b/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import mock from six import moves import testtools @@ -237,10 +236,10 @@ class TunnelRpcCallbackTestMixin(object): self.driver = self.DRIVER_CLASS() def _test_tunnel_sync(self, kwargs, delete_tunnel=False): - with contextlib.nested( - mock.patch.object(self.notifier, 'tunnel_update'), - mock.patch.object(self.notifier, 'tunnel_delete') - ) as (tunnel_update, tunnel_delete): + with mock.patch.object(self.notifier, + 'tunnel_update') as tunnel_update,\ + mock.patch.object(self.notifier, + 'tunnel_delete') as tunnel_delete: details = self.callbacks.tunnel_sync('fake_context', **kwargs) tunnels = details['tunnels'] for tunnel in tunnels: @@ -253,10 +252,10 @@ class TunnelRpcCallbackTestMixin(object): self.assertFalse(tunnel_delete.called) def _test_tunnel_sync_raises(self, kwargs): - with contextlib.nested( - mock.patch.object(self.notifier, 'tunnel_update'), - mock.patch.object(self.notifier, 'tunnel_delete') - ) as (tunnel_update, tunnel_delete): + with mock.patch.object(self.notifier, + 'tunnel_update') as tunnel_update,\ + mock.patch.object(self.notifier, + 'tunnel_delete') as tunnel_delete: self.assertRaises(exc.InvalidInput, self.callbacks.tunnel_sync, 'fake_context', **kwargs) diff --git a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py index 22d661f5cf1..9275689fe48 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py +++ b/neutron/tests/unit/plugins/ml2/drivers/l2pop/test_mech_driver.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import testtools import mock @@ -791,13 +790,11 @@ class TestL2PopulationRpcTestCase(test_plugin.Ml2PluginV2TestCase): l2pop_mech = l2pop_mech_driver.L2populationMechanismDriver() l2pop_mech.L2PopulationAgentNotify = mock.Mock() l2pop_mech.rpc_ctx = mock.Mock() - with contextlib.nested( - mock.patch.object(l2pop_mech, - '_update_port_down', - return_value=None), + with mock.patch.object(l2pop_mech, + '_update_port_down', + return_value=None) as upd_port_down,\ mock.patch.object(l2pop_mech.L2PopulationAgentNotify, - 'remove_fdb_entries')) as (upd_port_down, - rem_fdb_entries): + 'remove_fdb_entries'): l2pop_mech.delete_port_postcommit(mock.Mock()) self.assertTrue(upd_port_down.called) @@ -836,16 +833,15 @@ class TestL2PopulationMechDriver(base.BaseTestCase): def agent_ip_side_effect(agent): return agent_ips[agent] - with contextlib.nested( - mock.patch.object(l2pop_db.L2populationDbMixin, - 'get_agent_ip', - side_effect=agent_ip_side_effect), + with mock.patch.object(l2pop_db.L2populationDbMixin, + 'get_agent_ip', + side_effect=agent_ip_side_effect),\ mock.patch.object(l2pop_db.L2populationDbMixin, 'get_nondvr_active_network_ports', - new=fdb_network_ports_query), + new=fdb_network_ports_query),\ mock.patch.object(l2pop_db.L2populationDbMixin, 'get_dvr_active_network_ports', - new=tunnel_network_ports_query)): + new=tunnel_network_ports_query): session = mock.Mock() agent = mock.Mock() agent.host = HOST diff --git a/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py b/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py index 92e4957e99a..bff70fecb58 100644 --- a/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py +++ b/neutron/tests/unit/plugins/ml2/test_extension_driver_api.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import mock from neutron import context @@ -86,13 +85,11 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase): self.assertEqual('Test_Port_Extension_Update_update', val) def test_extend_network_dict(self): - with contextlib.nested( - mock.patch.object(ext_test.TestExtensionDriver, - 'process_update_network'), - mock.patch.object(ext_test.TestExtensionDriver, - 'extend_network_dict'), - self.network() - ) as (ext_update_net, ext_net_dict, network): + with mock.patch.object(ext_test.TestExtensionDriver, + 'process_update_network') as ext_update_net,\ + mock.patch.object(ext_test.TestExtensionDriver, + 'extend_network_dict') as ext_net_dict,\ + self.network() as network: net_id = network['network']['id'] net_data = {'network': {'id': net_id}} self._plugin.update_network(self._ctxt, net_id, net_data) @@ -100,13 +97,11 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase): self.assertTrue(ext_net_dict.called) def test_extend_subnet_dict(self): - with contextlib.nested( - mock.patch.object(ext_test.TestExtensionDriver, - 'process_update_subnet'), - mock.patch.object(ext_test.TestExtensionDriver, - 'extend_subnet_dict'), - self.subnet() - ) as (ext_update_subnet, ext_subnet_dict, subnet): + with mock.patch.object(ext_test.TestExtensionDriver, + 'process_update_subnet') as ext_update_subnet,\ + mock.patch.object(ext_test.TestExtensionDriver, + 'extend_subnet_dict') as ext_subnet_dict,\ + self.subnet() as subnet: subnet_id = subnet['subnet']['id'] subnet_data = {'subnet': {'id': subnet_id}} self._plugin.update_subnet(self._ctxt, subnet_id, subnet_data) @@ -114,13 +109,11 @@ class ExtensionDriverTestCase(test_plugin.Ml2PluginV2TestCase): self.assertTrue(ext_subnet_dict.called) def test_extend_port_dict(self): - with contextlib.nested( - mock.patch.object(ext_test.TestExtensionDriver, - 'process_update_port'), - mock.patch.object(ext_test.TestExtensionDriver, - 'extend_port_dict'), - self.port() - ) as (ext_update_port, ext_port_dict, port): + with mock.patch.object(ext_test.TestExtensionDriver, + 'process_update_port') as ext_update_port,\ + mock.patch.object(ext_test.TestExtensionDriver, + 'extend_port_dict') as ext_port_dict,\ + self.port() as port: port_id = port['port']['id'] port_data = {'port': {'id': port_id}} self._plugin.update_port(self._ctxt, port_id, port_data) diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index 8b49f6152f6..68b14f92f09 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -13,7 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import functools import mock import six @@ -461,13 +460,11 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): def test_create_ports_bulk_with_sec_grp(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() - with contextlib.nested( - self.network(), - mock.patch.object(plugin.notifier, - 'security_groups_member_updated'), - mock.patch.object(plugin.notifier, - 'security_groups_provider_updated') - ) as (net, m_upd, p_upd): + with self.network() as net,\ + mock.patch.object(plugin.notifier, + 'security_groups_member_updated') as m_upd,\ + mock.patch.object(plugin.notifier, + 'security_groups_provider_updated') as p_upd: res = self._create_port_bulk(self.fmt, 3, net['network']['id'], 'test', True, context=ctx) @@ -479,13 +476,11 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): def test_create_ports_bulk_with_sec_grp_member_provider_update(self): ctx = context.get_admin_context() plugin = manager.NeutronManager.get_plugin() - with contextlib.nested( - self.network(), - mock.patch.object(plugin.notifier, - 'security_groups_member_updated'), - mock.patch.object(plugin.notifier, - 'security_groups_provider_updated') - ) as (net, m_upd, p_upd): + with self.network() as net,\ + mock.patch.object(plugin.notifier, + 'security_groups_member_updated') as m_upd,\ + mock.patch.object(plugin.notifier, + 'security_groups_provider_updated') as p_upd: net_id = net['network']['id'] data = [{ @@ -520,14 +515,16 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): fake_prefix = '2001:db8::/64' fake_gateway = 'fe80::1' with self.network() as net: - with contextlib.nested( - self.subnet(net, gateway_ip=fake_gateway, - cidr=fake_prefix, ip_version=6), - mock.patch.object( - plugin.notifier, 'security_groups_member_updated'), - mock.patch.object( - plugin.notifier, 'security_groups_provider_updated') - ) as (snet_v6, m_upd, p_upd): + with self.subnet(net, + gateway_ip=fake_gateway, + cidr=fake_prefix, + ip_version=6) as snet_v6,\ + mock.patch.object( + plugin.notifier, + 'security_groups_member_updated') as m_upd,\ + mock.patch.object( + plugin.notifier, + 'security_groups_provider_updated') as p_upd: net_id = net['network']['id'] data = [{ @@ -547,11 +544,11 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): plugin = manager.NeutronManager.get_plugin() l3plugin = manager.NeutronManager.get_service_plugins().get( p_const.L3_ROUTER_NAT) - with contextlib.nested( - self.port(), - mock.patch.object(l3plugin, 'disassociate_floatingips'), - mock.patch.object(registry, 'notify') - ) as (port, disassociate_floatingips, notify): + with self.port() as port,\ + mock.patch.object( + l3plugin, + 'disassociate_floatingips') as disassociate_floatingips,\ + mock.patch.object(registry, 'notify') as notify: port_id = port['port']['id'] plugin.delete_port(ctx, port_id) @@ -662,18 +659,18 @@ class TestMl2DvrPortsV2(TestMl2PortsV2): if floating_ip: fip_set.add(ns_to_delete['router_id']) - with contextlib.nested( - mock.patch.object(manager.NeutronManager, - 'get_service_plugins', - return_value=self.service_plugins), - self.port(device_owner=device_owner), - mock.patch.object(registry, 'notify'), - mock.patch.object(self.l3plugin, 'disassociate_floatingips', - return_value=fip_set), - mock.patch.object(self.l3plugin, 'dvr_deletens_if_no_port', - return_value=[ns_to_delete]), - ) as (get_service_plugin, port, notify, disassociate_floatingips, - dvr_delns_ifno_port): + with mock.patch.object(manager.NeutronManager, + 'get_service_plugins', + return_value=self.service_plugins),\ + self.port(device_owner=device_owner) as port,\ + mock.patch.object(registry, 'notify') as notify,\ + mock.patch.object(self.l3plugin, + 'disassociate_floatingips', + return_value=fip_set),\ + mock.patch.object( + self.l3plugin, + 'dvr_deletens_if_no_port', + return_value=[ns_to_delete]) as dvr_delns_ifno_port: port_id = port['port']['id'] self.plugin.delete_port(self.context, port_id) @@ -790,12 +787,11 @@ class TestMl2PortBinding(Ml2PluginV2TestCase, plugin, self.context, port['port'], plugin.get_network(self.context, port['port']['network_id']), binding, None) - with contextlib.nested( - mock.patch('neutron.plugins.ml2.plugin.' - 'db.get_locked_port_and_binding', - return_value=(None, None)), - mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin._make_port_dict') - ) as (glpab_mock, mpd_mock): + with mock.patch( + 'neutron.plugins.ml2.plugin.' 'db.get_locked_port_and_binding', + return_value=(None, None)) as glpab_mock,\ + mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin.' + '_make_port_dict') as mpd_mock: plugin._bind_port_if_needed(mech_context) # called during deletion to get port self.assertTrue(glpab_mock.mock_calls) @@ -1489,10 +1485,9 @@ class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase): return plugin def test_create_port_rpc_outside_transaction(self): - with contextlib.nested( - mock.patch.object(ml2_plugin.Ml2Plugin, '__init__'), - mock.patch.object(base_plugin.NeutronDbPluginV2, 'create_port'), - ) as (init, super_create_port): + with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\ + mock.patch.object(base_plugin.NeutronDbPluginV2, + 'create_port'): init.return_value = None new_host_port = mock.Mock() @@ -1505,10 +1500,9 @@ class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase): plugin, **kwargs) def test_update_port_rpc_outside_transaction(self): - with contextlib.nested( - mock.patch.object(ml2_plugin.Ml2Plugin, '__init__'), - mock.patch.object(base_plugin.NeutronDbPluginV2, 'update_port'), - ) as (init, super_update_port): + with mock.patch.object(ml2_plugin.Ml2Plugin, '__init__') as init,\ + mock.patch.object(base_plugin.NeutronDbPluginV2, + 'update_port'): init.return_value = None new_host_port = mock.Mock() plugin = self._create_plugin_for_create_update_port(new_host_port) @@ -1531,13 +1525,12 @@ class TestMl2PluginCreateUpdateDeletePort(base.BaseTestCase): 'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS, constants.L3_DISTRIBUTED_EXT_ALIAS ] - with contextlib.nested( - mock.patch.object(ml2_plugin.Ml2Plugin, '__init__', - return_value=None), - mock.patch.object(manager.NeutronManager, - 'get_service_plugins', - return_value={'L3_ROUTER_NAT': l3plugin}), - ): + with mock.patch.object(ml2_plugin.Ml2Plugin, + '__init__', + return_value=None),\ + mock.patch.object(manager.NeutronManager, + 'get_service_plugins', + return_value={'L3_ROUTER_NAT': l3plugin}): plugin = self._create_plugin_for_create_update_port(mock.Mock()) # deleting the port will call registry.notify, which will # run the transaction balancing function defined in this test diff --git a/neutron/tests/unit/plugins/ml2/test_rpc.py b/neutron/tests/unit/plugins/ml2/test_rpc.py index 65c35aa0e36..f0e1a360322 100644 --- a/neutron/tests/unit/plugins/ml2/test_rpc.py +++ b/neutron/tests/unit/plugins/ml2/test_rpc.py @@ -18,7 +18,6 @@ Unit Tests for ml2 rpc """ import collections -import contextlib import mock from oslo_config import cfg @@ -202,12 +201,8 @@ class RpcApiTestCase(base.BaseTestCase): expected_version = kwargs.pop('version', None) fanout = kwargs.pop('fanout', False) - with contextlib.nested( - mock.patch.object(rpcapi.client, rpc_method), - mock.patch.object(rpcapi.client, 'prepare'), - ) as ( - rpc_mock, prepare_mock - ): + with mock.patch.object(rpcapi.client, rpc_method) as rpc_mock,\ + mock.patch.object(rpcapi.client, 'prepare') as prepare_mock: prepare_mock.return_value = rpcapi.client rpc_mock.return_value = expected_retval retval = getattr(rpcapi, method)(ctxt, **kwargs) diff --git a/neutron/tests/unit/plugins/ml2/test_security_group.py b/neutron/tests/unit/plugins/ml2/test_security_group.py index 772853938dd..897cadf58aa 100644 --- a/neutron/tests/unit/plugins/ml2/test_security_group.py +++ b/neutron/tests/unit/plugins/ml2/test_security_group.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import math import mock @@ -110,12 +109,11 @@ class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase, max_ports_per_query = 5 ports_to_query = 73 for max_ports_per_query in (1, 2, 5, 7, 9, 31): - with contextlib.nested( - mock.patch('neutron.plugins.ml2.db.MAX_PORTS_PER_QUERY', - new=max_ports_per_query), - mock.patch('neutron.plugins.ml2.db.get_sg_ids_grouped_by_port', - return_value={}), - ) as (max_mock, get_mock): + with mock.patch('neutron.plugins.ml2.db.MAX_PORTS_PER_QUERY', + new=max_ports_per_query),\ + mock.patch( + 'neutron.plugins.ml2.db.get_sg_ids_grouped_by_port', + return_value={}) as get_mock: plugin.get_ports_from_devices(self.ctx, ['%s%s' % (const.TAP_DEVICE_PREFIX, i) for i in range(ports_to_query)]) @@ -139,10 +137,8 @@ class TestMl2SecurityGroups(Ml2SecurityGroupsTestCase, plugin = manager.NeutronManager.get_plugin() # when full UUIDs are provided, the _or statement should only # have one matching 'IN' critiera for all of the IDs - with contextlib.nested( - mock.patch('neutron.plugins.ml2.db.or_'), - mock.patch('sqlalchemy.orm.Session.query') - ) as (or_mock, qmock): + with mock.patch('neutron.plugins.ml2.db.or_') as or_mock,\ + mock.patch('sqlalchemy.orm.Session.query') as qmock: fmock = qmock.return_value.outerjoin.return_value.filter # return no ports to exit the method early since we are mocking # the query diff --git a/neutron/tests/unit/plugins/oneconvergence/test_nvsd_agent.py b/neutron/tests/unit/plugins/oneconvergence/test_nvsd_agent.py index 5835d89a7fd..769bf4f421f 100644 --- a/neutron/tests/unit/plugins/oneconvergence/test_nvsd_agent.py +++ b/neutron/tests/unit/plugins/oneconvergence/test_nvsd_agent.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import time import mock @@ -37,10 +36,8 @@ class TestOneConvergenceAgentBase(base.BaseTestCase): cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') - with contextlib.nested( - mock.patch('neutron.openstack.common.loopingcall.' - 'FixedIntervalLoopingCall'), - ) as (loopingcall): + with mock.patch('neutron.openstack.common.loopingcall.' + 'FixedIntervalLoopingCall') as loopingcall: kwargs = {'integ_br': 'integration_bridge', 'polling_interval': 5} context = mock.Mock() @@ -56,10 +53,10 @@ class TestOneConvergenceAgentBase(base.BaseTestCase): class TestOneConvergenceAgentCallback(TestOneConvergenceAgentBase): def test_port_update(self): - with contextlib.nested( - mock.patch.object(ovs_lib.OVSBridge, 'get_vif_port_by_id'), - mock.patch.object(self.sg_agent, 'refresh_firewall') - ) as (get_vif_port_by_id, refresh_firewall): + with mock.patch.object(ovs_lib.OVSBridge, + 'get_vif_port_by_id') as get_vif_port_by_id,\ + mock.patch.object(self.sg_agent, + 'refresh_firewall') as refresh_firewall: context = mock.Mock() vifport = ovs_lib.VifPort('port1', '1', 'id-1', 'mac-1', self.agent.int_br) @@ -129,13 +126,17 @@ class TestNVSDAgent(TestOneConvergenceAgentBase): [] for _i in moves.range(DAEMON_LOOP_COUNT - len(self.vif_ports_scenario))) - with contextlib.nested( - mock.patch.object(time, 'sleep', side_effect=sleep_mock), - mock.patch.object(ovs_lib.OVSBridge, 'get_vif_port_set'), - mock.patch.object(self.agent.sg_agent, 'prepare_devices_filter'), - mock.patch.object(self.agent.sg_agent, 'remove_devices_filter') - ) as (sleep, get_vif_port_set, prepare_devices_filter, - remove_devices_filter): + with mock.patch.object(time, + 'sleep', + side_effect=sleep_mock) as sleep,\ + mock.patch.object(ovs_lib.OVSBridge, + 'get_vif_port_set') as get_vif_port_set,\ + mock.patch.object( + self.agent.sg_agent, + 'prepare_devices_filter') as prepare_devices_filter,\ + mock.patch.object( + self.agent.sg_agent, + 'remove_devices_filter') as remove_devices_filter: get_vif_port_set.side_effect = self.vif_ports_scenario with testtools.ExpectedException(RuntimeError): @@ -158,11 +159,11 @@ class TestNVSDAgent(TestOneConvergenceAgentBase): class TestOneConvergenceAgentMain(base.BaseTestCase): def test_main(self): - with contextlib.nested( - mock.patch.object(nvsd_neutron_agent, 'NVSDNeutronAgent'), - mock.patch.object(nvsd_neutron_agent, 'common_config'), - mock.patch.object(nvsd_neutron_agent, 'config') - ) as (agent, common_config, config): + with mock.patch.object(nvsd_neutron_agent, + 'NVSDNeutronAgent') as agent,\ + mock.patch.object(nvsd_neutron_agent, + 'common_config') as common_config,\ + mock.patch.object(nvsd_neutron_agent, 'config') as config: config.AGENT.integration_bridge = 'br-int-dummy' config.AGENT.polling_interval = 5 diff --git a/neutron/tests/unit/plugins/oneconvergence/test_nvsd_plugin.py b/neutron/tests/unit/plugins/oneconvergence/test_nvsd_plugin.py index f8ac5f48070..8058c813ec3 100644 --- a/neutron/tests/unit/plugins/oneconvergence/test_nvsd_plugin.py +++ b/neutron/tests/unit/plugins/oneconvergence/test_nvsd_plugin.py @@ -15,7 +15,6 @@ """Test Library for OneConvergencePlugin.""" -import contextlib import uuid import mock @@ -79,7 +78,7 @@ class TestOneConvergencePluginPortsV2(test_plugin.TestPortsV2, def test_ports_vif_details(self): cfg.CONF.set_default('allow_overlapping_ips', True) plugin = manager.NeutronManager.get_plugin() - with contextlib.nested(self.port(), self.port()) as (port1, port2): + with self.port(), self.port(): ctx = context.get_admin_context() ports = plugin.get_ports(ctx) self.assertEqual(len(ports), 2) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index 9063df417ea..d29750dbaf6 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -12,7 +12,6 @@ # License for the specific language governing permissions and limitations # under the License. -import contextlib import sys import time @@ -114,20 +113,19 @@ class TestOvsNeutronAgent(object): def start(self, interval=0): self.f() - with contextlib.nested( - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'setup_integration_br'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'setup_ancillary_bridges', - return_value=[]), - mock.patch('neutron.agent.linux.utils.get_interface_mac', - return_value='00:00:00:00:00:01'), - mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), - mock.patch('neutron.openstack.common.loopingcall.' - 'FixedIntervalLoopingCall', - new=MockFixedIntervalLoopingCall), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'get_vif_ports', return_value=[])): + with mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_integration_br'),\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_ancillary_bridges', + return_value=[]),\ + mock.patch('neutron.agent.linux.utils.get_interface_mac', + return_value='00:00:00:00:00:01'),\ + mock.patch( + 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\ + mock.patch('neutron.openstack.common.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ + mock.patch( + 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', + return_value=[]): self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), **kwargs) # set back to true because initial report state will succeed due @@ -237,12 +235,12 @@ class TestOvsNeutronAgent(object): updated_ports=None, port_tags_dict=None): if port_tags_dict is None: # Because empty dicts evaluate as False. port_tags_dict = {} - with contextlib.nested( - mock.patch.object(self.agent.int_br, 'get_vif_port_set', - return_value=vif_port_set), - mock.patch.object(self.agent.int_br, 'get_port_tag_dict', - return_value=port_tags_dict) - ): + with mock.patch.object(self.agent.int_br, + 'get_vif_port_set', + return_value=vif_port_set),\ + mock.patch.object(self.agent.int_br, + 'get_port_tag_dict', + return_value=port_tags_dict): return self.agent.scan_ports(registered_ports, updated_ports) def test_scan_ports_returns_current_only_for_unchanged_ports(self): @@ -309,21 +307,19 @@ class TestOvsNeutronAgent(object): added=set([3]), current=vif_port_set, removed=set([2]), updated=set([1]) ) - with contextlib.nested( - mock.patch.dict(self.agent.local_vlan_map, local_vlan_map), - mock.patch.object(self.agent, 'tun_br', autospec=True), - ): + with mock.patch.dict(self.agent.local_vlan_map, local_vlan_map),\ + mock.patch.object(self.agent, 'tun_br', autospec=True): actual = self.mock_scan_ports( vif_port_set, registered_ports, port_tags_dict=port_tags_dict) self.assertEqual(expected, actual) def test_treat_devices_added_returns_raises_for_missing_device(self): - with contextlib.nested( - mock.patch.object(self.agent.plugin_rpc, - 'get_devices_details_list', - side_effect=Exception()), - mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', - return_value=mock.Mock())): + with mock.patch.object(self.agent.plugin_rpc, + 'get_devices_details_list', + side_effect=Exception()),\ + mock.patch.object(self.agent.int_br, + 'get_vif_port_by_id', + return_value=mock.Mock()): self.assertRaises( self.mod_agent.DeviceListRetrievalError, self.agent.treat_devices_added_or_updated, [{}], False) @@ -336,16 +332,16 @@ class TestOvsNeutronAgent(object): :param func_name: the function that should be called :returns: whether the named function was called """ - with contextlib.nested( - mock.patch.object(self.agent.plugin_rpc, - 'get_devices_details_list', - return_value=[details]), - mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', - return_value=port), - mock.patch.object(self.agent.plugin_rpc, 'update_device_up'), - mock.patch.object(self.agent.plugin_rpc, 'update_device_down'), - mock.patch.object(self.agent, func_name) - ) as (get_dev_fn, get_vif_func, upd_dev_up, upd_dev_down, func): + with mock.patch.object(self.agent.plugin_rpc, + 'get_devices_details_list', + return_value=[details]),\ + mock.patch.object(self.agent.int_br, + 'get_vif_port_by_id', + return_value=port),\ + mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),\ + mock.patch.object(self.agent.plugin_rpc, + 'update_device_down'),\ + mock.patch.object(self.agent, func_name) as func: skip_devs, need_bound_devices = ( self.agent.treat_devices_added_or_updated([{}], False)) # The function should not raise @@ -365,11 +361,11 @@ class TestOvsNeutronAgent(object): mock.MagicMock(), port, 'port_dead')) def test_treat_devices_added_does_not_process_missing_port(self): - with contextlib.nested( - mock.patch.object(self.agent.plugin_rpc, 'get_device_details'), - mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', - return_value=None) - ) as (get_dev_fn, get_vif_func): + with mock.patch.object(self.agent.plugin_rpc, + 'get_device_details') as get_dev_fn,\ + mock.patch.object(self.agent.int_br, + 'get_vif_port_by_id', + return_value=None): self.assertFalse(get_dev_fn.called) def test_treat_devices_added_updated_updates_known_port(self): @@ -381,14 +377,14 @@ class TestOvsNeutronAgent(object): def test_treat_devices_added_updated_skips_if_port_not_found(self): dev_mock = mock.MagicMock() dev_mock.__getitem__.return_value = 'the_skipped_one' - with contextlib.nested( - mock.patch.object(self.agent.plugin_rpc, - 'get_devices_details_list', - return_value=[dev_mock]), - mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', - return_value=None), - mock.patch.object(self.agent, 'treat_vif_port') - ) as (get_dev_fn, get_vif_func, treat_vif_port): + with mock.patch.object(self.agent.plugin_rpc, + 'get_devices_details_list', + return_value=[dev_mock]),\ + mock.patch.object(self.agent.int_br, + 'get_vif_port_by_id', + return_value=None),\ + mock.patch.object(self.agent, + 'treat_vif_port') as treat_vif_port: skip_devs = self.agent.treat_devices_added_or_updated([{}], False) # The function should return False for resync and no device # processed @@ -408,14 +404,14 @@ class TestOvsNeutronAgent(object): 'device_owner': 'compute:None' } - with contextlib.nested( - mock.patch.object(self.agent.plugin_rpc, - 'get_devices_details_list', - return_value=[fake_details_dict]), - mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', - return_value=mock.MagicMock()), - mock.patch.object(self.agent, 'treat_vif_port') - ) as (get_dev_fn, get_vif_func, treat_vif_port): + with mock.patch.object(self.agent.plugin_rpc, + 'get_devices_details_list', + return_value=[fake_details_dict]),\ + mock.patch.object(self.agent.int_br, + 'get_vif_port_by_id', + return_value=mock.MagicMock()),\ + mock.patch.object(self.agent, + 'treat_vif_port') as treat_vif_port: skip_devs, need_bound_devices = ( self.agent.treat_devices_added_or_updated([{}], False)) # The function should return False for resync @@ -445,13 +441,15 @@ class TestOvsNeutronAgent(object): self.agent._bind_devices([{'network_id': 'non-existent'}]) def _test_process_network_ports(self, port_info): - with contextlib.nested( - mock.patch.object(self.agent.sg_agent, "setup_port_filters"), - mock.patch.object(self.agent, "treat_devices_added_or_updated", - return_value=([], [])), - mock.patch.object(self.agent, "treat_devices_removed", - return_value=False) - ) as (setup_port_filters, device_added_updated, device_removed): + with mock.patch.object(self.agent.sg_agent, + "setup_port_filters") as setup_port_filters,\ + mock.patch.object( + self.agent, + "treat_devices_added_or_updated", + return_value=([], [])) as device_added_updated,\ + mock.patch.object(self.agent, + "treat_devices_removed", + return_value=False) as device_removed: self.assertFalse(self.agent.process_network_ports(port_info, False)) setup_port_filters.assert_called_once_with( @@ -510,10 +508,9 @@ class TestOvsNeutronAgent(object): self.agent.agent_state, True) def test_network_delete(self): - with contextlib.nested( - mock.patch.object(self.agent, "reclaim_local_vlan"), - mock.patch.object(self.agent.tun_br, "cleanup_tunnel_port") - ) as (recl_fn, clean_tun_fn): + with mock.patch.object(self.agent, "reclaim_local_vlan") as recl_fn,\ + mock.patch.object(self.agent.tun_br, + "cleanup_tunnel_port") as clean_tun_fn: self.agent.network_delete("unused_context", network_id="123") self.assertFalse(recl_fn.called) @@ -537,26 +534,23 @@ class TestOvsNeutronAgent(object): def test_port_delete(self): port_id = "123" port_name = "foo" - with contextlib.nested( - mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', - return_value=mock.MagicMock( - port_name=port_name)), - mock.patch.object(self.agent.int_br, "delete_port") - ) as (get_vif_func, del_port_func): + with mock.patch.object( + self.agent.int_br, + 'get_vif_port_by_id', + return_value=mock.MagicMock(port_name=port_name)) as get_vif_func,\ + mock.patch.object(self.agent.int_br, + "delete_port") as del_port_func: self.agent.port_delete("unused_context", port_id=port_id) self.assertTrue(get_vif_func.called) del_port_func.assert_called_once_with(port_name) def test_setup_physical_bridges(self): - with contextlib.nested( - mock.patch.object(ip_lib, "device_exists"), - mock.patch.object(sys, "exit"), - mock.patch.object(utils, "execute"), - mock.patch.object(self.agent, 'br_phys_cls'), - mock.patch.object(self.agent, 'int_br'), - ) as (devex_fn, sysexit_fn, utilsexec_fn, - phys_br_cls, int_br): + with mock.patch.object(ip_lib, "device_exists") as devex_fn,\ + mock.patch.object(sys, "exit"),\ + mock.patch.object(utils, "execute"),\ + mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ + mock.patch.object(self.agent, 'int_br') as int_br: devex_fn.return_value = True parent = mock.MagicMock() phys_br = phys_br_cls() @@ -593,19 +587,17 @@ class TestOvsNeutronAgent(object): def test_setup_physical_bridges_using_veth_interconnection(self): self.agent.use_veth_interconnection = True - with contextlib.nested( - mock.patch.object(ip_lib, "device_exists"), - mock.patch.object(sys, "exit"), - mock.patch.object(utils, "execute"), - mock.patch.object(self.agent, 'br_phys_cls'), - mock.patch.object(self.agent, 'int_br'), - mock.patch.object(ip_lib.IPWrapper, "add_veth"), - mock.patch.object(ip_lib.IpLinkCommand, "delete"), - mock.patch.object(ip_lib.IpLinkCommand, "set_up"), - mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"), - mock.patch.object(ovs_lib.BaseOVS, "get_bridges") - ) as (devex_fn, sysexit_fn, utilsexec_fn, phys_br_cls, int_br, - addveth_fn, linkdel_fn, linkset_fn, linkmtu_fn, get_br_fn): + with mock.patch.object(ip_lib, "device_exists") as devex_fn,\ + mock.patch.object(sys, "exit"),\ + mock.patch.object(utils, "execute") as utilsexec_fn,\ + mock.patch.object(self.agent, 'br_phys_cls') as phys_br_cls,\ + mock.patch.object(self.agent, 'int_br') as int_br,\ + mock.patch.object(ip_lib.IPWrapper, "add_veth") as addveth_fn,\ + mock.patch.object(ip_lib.IpLinkCommand, + "delete") as linkdel_fn,\ + mock.patch.object(ip_lib.IpLinkCommand, "set_up"),\ + mock.patch.object(ip_lib.IpLinkCommand, "set_mtu"),\ + mock.patch.object(ovs_lib.BaseOVS, "get_bridges") as get_br_fn: devex_fn.return_value = True parent = mock.MagicMock() parent.attach_mock(utilsexec_fn, 'utils_execute') @@ -643,12 +635,13 @@ class TestOvsNeutronAgent(object): def test_setup_tunnel_br(self): self.tun_br = mock.Mock() - with contextlib.nested( - mock.patch.object(self.agent.int_br, "add_patch_port", - return_value=1), - mock.patch.object(self.agent, 'tun_br', autospec=True), - mock.patch.object(sys, "exit") - ) as (intbr_patch_fn, tun_br, exit_fn): + with mock.patch.object(self.agent.int_br, + "add_patch_port", + return_value=1) as intbr_patch_fn,\ + mock.patch.object(self.agent, + 'tun_br', + autospec=True) as tun_br,\ + mock.patch.object(sys, "exit"): tun_br.add_patch_port.return_value = 2 self.agent.reset_tunnel_br(None) self.agent.setup_tunnel_br() @@ -659,11 +652,10 @@ class TestOvsNeutronAgent(object): self.agent.l2_pop = False self.agent.udp_vxlan_port = 8472 self.agent.tun_br_ofports['vxlan'] = {} - with contextlib.nested( - mock.patch.object(self.agent.tun_br, "add_tunnel_port", - return_value='6'), - mock.patch.object(self.agent.tun_br, "add_flow") - ) as (add_tun_port_fn, add_flow_fn): + with mock.patch.object(self.agent.tun_br, + "add_tunnel_port", + return_value='6') as add_tun_port_fn,\ + mock.patch.object(self.agent.tun_br, "add_flow"): self.agent._setup_tunnel_port(self.agent.tun_br, 'portname', '1.2.3.4', 'vxlan') self.assertTrue(add_tun_port_fn.called) @@ -706,12 +698,13 @@ class TestOvsNeutronAgent(object): def test_fdb_ignore_network(self): self._prepare_l2_pop_ofports() fdb_entry = {'net3': {}} - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'add_flow'), - mock.patch.object(self.agent.tun_br, 'delete_flows'), - mock.patch.object(self.agent, '_setup_tunnel_port'), - mock.patch.object(self.agent, 'cleanup_tunnel_port') - ) as (add_flow_fn, del_flow_fn, add_tun_fn, clean_tun_fn): + with mock.patch.object(self.agent.tun_br, 'add_flow') as add_flow_fn,\ + mock.patch.object(self.agent.tun_br, + 'delete_flows') as del_flow_fn,\ + mock.patch.object(self.agent, + '_setup_tunnel_port') as add_tun_fn,\ + mock.patch.object(self.agent, + 'cleanup_tunnel_port') as clean_tun_fn: self.agent.fdb_add(None, fdb_entry) self.assertFalse(add_flow_fn.called) self.assertFalse(add_tun_fn.called) @@ -747,10 +740,10 @@ class TestOvsNeutronAgent(object): [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1), n_const.FLOODING_ENTRY]}}} - with contextlib.nested( - mock.patch.object(self.agent, 'tun_br', autospec=True), - mock.patch.object(self.agent, '_setup_tunnel_port', autospec=True), - ) as (tun_br, add_tun_fn): + with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\ + mock.patch.object(self.agent, + '_setup_tunnel_port', + autospec=True) as add_tun_fn: self.agent.fdb_add(None, fdb_entry) self.assertFalse(add_tun_fn.called) deferred_br_call = mock.call.deferred().__enter__() @@ -796,10 +789,9 @@ class TestOvsNeutronAgent(object): 'segment_id': 'tun1', 'ports': {'1.1.1.1': [l2pop_rpc.PortInfo(FAKE_MAC, FAKE_IP1)]}}} - with contextlib.nested( - mock.patch.object(self.agent, 'tun_br', autospec=True), - mock.patch.object(self.agent, '_setup_tunnel_port') - ) as (tun_br, add_tun_fn): + with mock.patch.object(self.agent, 'tun_br', autospec=True) as tun_br,\ + mock.patch.object(self.agent, + '_setup_tunnel_port') as add_tun_fn: self.agent.fdb_add(None, fdb_entry) self.assertFalse(add_tun_fn.called) fdb_entry['net1']['ports']['10.10.10.10'] = [ @@ -815,10 +807,9 @@ class TestOvsNeutronAgent(object): {'network_type': 'gre', 'segment_id': 'tun2', 'ports': {'2.2.2.2': [n_const.FLOODING_ENTRY]}}} - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'deferred'), - mock.patch.object(self.agent.tun_br, 'delete_port'), - ) as (defer_fn, delete_port_fn): + with mock.patch.object(self.agent.tun_br, 'deferred') as defer_fn,\ + mock.patch.object(self.agent.tun_br, + 'delete_port') as delete_port_fn: self.agent.fdb_remove(None, fdb_entry) deferred_br = defer_fn().__enter__() deferred_br.delete_port.assert_called_once_with('gre-02020202') @@ -845,10 +836,9 @@ class TestOvsNeutronAgent(object): lvm.vlan = 'vlan1' lvm.segmentation_id = 'seg1' lvm.tun_ofports = set(['1', '2']) - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'mod_flow'), - mock.patch.object(self.agent.tun_br, 'delete_flows') - ) as (mod_flow_fn, delete_flows_fn): + with mock.patch.object(self.agent.tun_br, 'mod_flow') as mod_flow_fn,\ + mock.patch.object(self.agent.tun_br, + 'delete_flows') as delete_flows_fn: self.agent.del_fdb_flow(self.agent.tun_br, n_const.FLOODING_ENTRY, '1.1.1.1', lvm, '3') self.assertFalse(mod_flow_fn.called) @@ -880,11 +870,11 @@ class TestOvsNeutronAgent(object): mock_loop.assert_called_once_with(polling_manager=mock.ANY) def test_setup_tunnel_port_invalid_ofport(self): - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'add_tunnel_port', - return_value=ovs_lib.INVALID_OFPORT), - mock.patch.object(self.mod_agent.LOG, 'error') - ) as (add_tunnel_port_fn, log_error_fn): + with mock.patch.object( + self.agent.tun_br, + 'add_tunnel_port', + return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ + mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE) add_tunnel_port_fn.assert_called_once_with( @@ -896,11 +886,11 @@ class TestOvsNeutronAgent(object): self.assertEqual(ofport, 0) def test_setup_tunnel_port_error_negative_df_disabled(self): - with contextlib.nested( - mock.patch.object(self.agent.tun_br, 'add_tunnel_port', - return_value=ovs_lib.INVALID_OFPORT), - mock.patch.object(self.mod_agent.LOG, 'error') - ) as (add_tunnel_port_fn, log_error_fn): + with mock.patch.object( + self.agent.tun_br, + 'add_tunnel_port', + return_value=ovs_lib.INVALID_OFPORT) as add_tunnel_port_fn,\ + mock.patch.object(self.mod_agent.LOG, 'error') as log_error_fn: self.agent.dont_fragment = False ofport = self.agent._setup_tunnel_port( self.agent.tun_br, 'gre-1', 'remote_ip', p_const.TYPE_GRE) @@ -914,11 +904,12 @@ class TestOvsNeutronAgent(object): def test_tunnel_sync_with_ml2_plugin(self): fake_tunnel_details = {'tunnels': [{'ip_address': '100.101.31.15'}]} - with contextlib.nested( - mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', - return_value=fake_tunnel_details), - mock.patch.object(self.agent, '_setup_tunnel_port') - ) as (tunnel_sync_rpc_fn, _setup_tunnel_port_fn): + with mock.patch.object(self.agent.plugin_rpc, + 'tunnel_sync', + return_value=fake_tunnel_details),\ + mock.patch.object( + self.agent, + '_setup_tunnel_port') as _setup_tunnel_port_fn: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() expected_calls = [mock.call(self.agent.tun_br, 'vxlan-64651f0f', @@ -928,11 +919,12 @@ class TestOvsNeutronAgent(object): def test_tunnel_sync_invalid_ip_address(self): fake_tunnel_details = {'tunnels': [{'ip_address': '300.300.300.300'}, {'ip_address': '100.100.100.100'}]} - with contextlib.nested( - mock.patch.object(self.agent.plugin_rpc, 'tunnel_sync', - return_value=fake_tunnel_details), - mock.patch.object(self.agent, '_setup_tunnel_port') - ) as (tunnel_sync_rpc_fn, _setup_tunnel_port_fn): + with mock.patch.object(self.agent.plugin_rpc, + 'tunnel_sync', + return_value=fake_tunnel_details),\ + mock.patch.object( + self.agent, + '_setup_tunnel_port') as _setup_tunnel_port_fn: self.agent.tunnel_types = ['vxlan'] self.agent.tunnel_sync() _setup_tunnel_port_fn.assert_called_once_with(self.agent.tun_br, @@ -973,25 +965,23 @@ class TestOvsNeutronAgent(object): 'added': set([]), 'removed': set(['tap0'])} - with contextlib.nested( - mock.patch.object(async_process.AsyncProcess, "_spawn"), - mock.patch.object(log.KeywordArgumentAdapter, 'exception'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'scan_ports'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'process_network_ports'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'check_ovs_status'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'setup_integration_br'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'setup_physical_bridges'), - mock.patch.object(time, 'sleep'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'update_stale_ofport_rules') - ) as (spawn_fn, log_exception, scan_ports, process_network_ports, - check_ovs_status, setup_int_br, setup_phys_br, time_sleep, - update_stale): + with mock.patch.object(async_process.AsyncProcess, "_spawn"),\ + mock.patch.object(log.KeywordArgumentAdapter, + 'exception') as log_exception,\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'scan_ports') as scan_ports,\ + mock.patch.object( + self.mod_agent.OVSNeutronAgent, + 'process_network_ports') as process_network_ports,\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'check_ovs_status') as check_ovs_status,\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_integration_br') as setup_int_br,\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_physical_bridges') as setup_phys_br,\ + mock.patch.object(time, 'sleep'),\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'update_stale_ofport_rules') as update_stale: log_exception.side_effect = Exception( 'Fake exception to get out of the loop') scan_ports.side_effect = [reply2, reply3] @@ -1182,18 +1172,16 @@ class AncillaryBridgesTest(object): except Exception: return None - with contextlib.nested( - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'setup_integration_br'), - mock.patch('neutron.agent.linux.utils.get_interface_mac', - return_value='00:00:00:00:00:01'), - mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges', - return_value=bridges), - mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' - 'get_bridge_external_bridge_id', - side_effect=pullup_side_effect), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'get_vif_ports', return_value=[])): + with mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_integration_br'),\ + mock.patch('neutron.agent.linux.utils.get_interface_mac', + return_value='00:00:00:00:00:01'),\ + mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges', + return_value=bridges),\ + mock.patch('neutron.agent.common.ovs_lib.BaseOVS.' 'get_bridge_external_bridge_id', side_effect=pullup_side_effect),\ + mock.patch( + 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', + return_value=[]): self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), **self.kwargs) self.assertEqual(len(ancillary), len(self.agent.ancillary_brs)) @@ -1240,20 +1228,19 @@ class TestOvsDvrNeutronAgent(object): def start(self, interval=0): self.f() - with contextlib.nested( - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'setup_integration_br'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'setup_ancillary_bridges', - return_value=[]), - mock.patch('neutron.agent.linux.utils.get_interface_mac', - return_value='00:00:00:00:00:01'), - mock.patch('neutron.agent.common.ovs_lib.BaseOVS.get_bridges'), - mock.patch('neutron.openstack.common.loopingcall.' - 'FixedIntervalLoopingCall', - new=MockFixedIntervalLoopingCall), - mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' - 'get_vif_ports', return_value=[])): + with mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_integration_br'),\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'setup_ancillary_bridges', + return_value=[]),\ + mock.patch('neutron.agent.linux.utils.get_interface_mac', + return_value='00:00:00:00:00:01'),\ + mock.patch( + 'neutron.agent.common.ovs_lib.BaseOVS.get_bridges'),\ + mock.patch('neutron.openstack.common.loopingcall.' 'FixedIntervalLoopingCall', new=MockFixedIntervalLoopingCall),\ + mock.patch( + 'neutron.agent.common.ovs_lib.OVSBridge.' 'get_vif_ports', + return_value=[]): self.agent = self.mod_agent.OVSNeutronAgent(self._bridge_classes(), **kwargs) # set back to true because initial report state will succeed due @@ -1346,29 +1333,26 @@ class TestOvsDvrNeutronAgent(object): phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} - with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_subnet_for_dvr', - return_value={ - 'gateway_ip': gateway_ip, - 'cidr': cidr, - 'ip_version': ip_version, - 'gateway_mac': gateway_mac}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.dict(self.agent.phys_brs, - {physical_network: phys_br}), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - mock.patch.dict(self.agent.dvr_agent.phys_brs, - {physical_network: phys_br}), - ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _, _, _): + with mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_subnet_for_dvr', + return_value={'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_mac': gateway_mac}),\ + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]),\ + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.dict(self.agent.phys_brs, + {physical_network: phys_br}),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ + mock.patch.dict(self.agent.dvr_agent.phys_brs, + {physical_network: phys_br}): self.agent.port_bound( self._port, self._net_uuid, network_type, physical_network, segmentation_id, self._fixed_ips, @@ -1439,29 +1423,26 @@ class TestOvsDvrNeutronAgent(object): phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} - with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_subnet_for_dvr', - return_value={ - 'gateway_ip': gateway_ip, - 'cidr': cidr, - 'ip_version': ip_version, - 'gateway_mac': gateway_mac}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.dict(self.agent.phys_brs, - {physical_network: phys_br}), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - mock.patch.dict(self.agent.dvr_agent.phys_brs, - {physical_network: phys_br}), - ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _, _, _): + with mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_subnet_for_dvr', + return_value={'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_mac': gateway_mac}),\ + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]),\ + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.dict(self.agent.phys_brs, + {physical_network: phys_br}),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ + mock.patch.dict(self.agent.dvr_agent.phys_brs, + {physical_network: phys_br}): self.agent.port_bound( self._port, self._net_uuid, network_type, physical_network, segmentation_id, self._fixed_ips, @@ -1541,24 +1522,22 @@ class TestOvsDvrNeutronAgent(object): tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} - with contextlib.nested( - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', - return_value={'gateway_ip': '1.1.1.1', - 'cidr': '1.1.1.0/24', - 'ip_version': 4, - 'gateway_mac': 'aa:bb:cc:11:22:33'}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _): + with mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_subnet_for_dvr', + return_value={'gateway_ip': '1.1.1.1', + 'cidr': '1.1.1.0/24', + 'ip_version': 4, + 'gateway_mac': 'aa:bb:cc:11:22:33'}),\ + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]),\ + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, @@ -1604,24 +1583,22 @@ class TestOvsDvrNeutronAgent(object): tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} - with contextlib.nested( - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', - return_value={'gateway_ip': gateway_ip, - 'cidr': cidr, - 'ip_version': ip_version, - 'gateway_mac': gateway_mac}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - ) as (get_subnet_fn, get_cphost_fn, _, _, _, _, get_vif_fn): + with mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_subnet_for_dvr', + return_value={'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_mac': gateway_mac}),\ + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, @@ -1643,15 +1620,14 @@ class TestOvsDvrNeutronAgent(object): int_br.reset_mock() tun_br.reset_mock() - with contextlib.nested( - mock.patch.object(self.agent, 'reclaim_local_vlan'), - mock.patch.object(self.agent.plugin_rpc, 'update_device_down', - return_value=None), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): + with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ + mock.patch.object(self.agent.plugin_rpc, + 'update_device_down', + return_value=None),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.treat_devices_removed([self._port.vif_id]) if ip_version == 4: expected = [ @@ -1686,24 +1662,22 @@ class TestOvsDvrNeutronAgent(object): tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} - with contextlib.nested( - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', - return_value={'gateway_ip': gateway_ip, - 'cidr': cidr, - 'ip_version': ip_version, - 'gateway_mac': gateway_mac}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _): + with mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_subnet_for_dvr', + return_value={'gateway_ip': gateway_ip, + 'cidr': cidr, + 'ip_version': ip_version, + 'gateway_mac': gateway_mac}),\ + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]),\ + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, @@ -1748,15 +1722,14 @@ class TestOvsDvrNeutronAgent(object): int_br.reset_mock() tun_br.reset_mock() - with contextlib.nested( - mock.patch.object(self.agent, 'reclaim_local_vlan'), - mock.patch.object(self.agent.plugin_rpc, 'update_device_down', - return_value=None), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): + with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ + mock.patch.object(self.agent.plugin_rpc, + 'update_device_down', + return_value=None),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.treat_devices_removed([self._compute_port.vif_id]) int_br.assert_has_calls([ mock.call.delete_dvr_to_src_mac( @@ -1792,24 +1765,22 @@ class TestOvsDvrNeutronAgent(object): tun_br = mock.create_autospec(self.agent.tun_br) int_br.set_db_attribute.return_value = True int_br.db_get_val.return_value = {} - with contextlib.nested( - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, 'get_subnet_for_dvr', - return_value={'gateway_ip': '1.1.1.1', - 'cidr': '1.1.1.0/24', - 'ip_version': 4, - 'gateway_mac': gateway_mac}), - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_ports_on_host_by_subnet', - return_value=[]), - mock.patch.object(self.agent.dvr_agent.int_br, - 'get_vif_port_by_id', - return_value=self._port), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - ) as (get_subnet_fn, get_cphost_fn, get_vif_fn, _, _, _, _): + with mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_subnet_for_dvr', + return_value={'gateway_ip': '1.1.1.1', + 'cidr': '1.1.1.0/24', + 'ip_version': 4, + 'gateway_mac': gateway_mac}),\ + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_ports_on_host_by_subnet', + return_value=[]),\ + mock.patch.object(self.agent.dvr_agent.int_br, + 'get_vif_port_by_id', + return_value=self._port),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.port_bound( self._port, self._net_uuid, 'vxlan', None, None, self._fixed_ips, @@ -1838,15 +1809,14 @@ class TestOvsDvrNeutronAgent(object): int_br.reset_mock() tun_br.reset_mock() - with contextlib.nested( - mock.patch.object(self.agent, 'reclaim_local_vlan'), - mock.patch.object(self.agent.plugin_rpc, 'update_device_down', - return_value=None), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - ) as (reclaim_vlan_fn, update_dev_down_fn, _, _, _, _): + with mock.patch.object(self.agent, 'reclaim_local_vlan'),\ + mock.patch.object(self.agent.plugin_rpc, + 'update_device_down', + return_value=None),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br): self.agent.treat_devices_removed([self._port.vif_id]) expected_on_int_br = [ mock.call.delete_dvr_to_src_mac( @@ -1863,19 +1833,16 @@ class TestOvsDvrNeutronAgent(object): self._setup_for_dvr_test() int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) - with contextlib.nested( - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - mock.patch.object( - self.agent.dvr_agent.plugin_rpc, - 'get_dvr_mac_address_list', - return_value=[{'host': 'cn1', - 'mac_address': 'aa:bb:cc:dd:ee:ff'}, - {'host': 'cn2', - 'mac_address': '11:22:33:44:55:66'}]) - ) as (_, _, _, _, get_mac_list_fn): + with mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ + mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_dvr_mac_address_list', + return_value=[{'host': 'cn1', + 'mac_address': 'aa:bb:cc:dd:ee:ff'}, + {'host': 'cn2', + 'mac_address': '11:22:33:44:55:66'}]): self.agent.dvr_agent.setup_dvr_flows_on_integ_br() self.assertTrue(self.agent.dvr_agent.in_distributed_mode()) physical_networks = self.agent.dvr_agent.bridge_mappings.keys() @@ -1913,13 +1880,11 @@ class TestOvsDvrNeutronAgent(object): self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None int_br = mock.create_autospec(self.agent.int_br) - with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.plugin_rpc, + with mock.patch.object(self.agent.dvr_agent.plugin_rpc, 'get_dvr_mac_address_by_host', - side_effect=oslo_messaging.RemoteError), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - ) as (gd_mac, _, _): + side_effect=oslo_messaging.RemoteError),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br): self.agent.dvr_agent.get_dvr_mac_address() self.assertIsNone(self.agent.dvr_agent.dvr_mac_address) self.assertFalse(self.agent.dvr_agent.in_distributed_mode()) @@ -1948,14 +1913,12 @@ class TestOvsDvrNeutronAgent(object): self._setup_for_dvr_test() self.agent.dvr_agent.dvr_mac_address = None int_br = mock.create_autospec(self.agent.int_br) - with contextlib.nested( - mock.patch.object(self.agent.dvr_agent.plugin_rpc, - 'get_dvr_mac_address_by_host', - side_effect=raise_timeout), - mock.patch.object(utils, "execute"), - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - ) as (rpc_mock, execute_mock, _, _): + with mock.patch.object(self.agent.dvr_agent.plugin_rpc, + 'get_dvr_mac_address_by_host', + side_effect=raise_timeout),\ + mock.patch.object(utils, "execute"),\ + mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br): self.agent.dvr_agent.get_dvr_mac_address() self.assertIsNone(self.agent.dvr_agent.dvr_mac_address) self.assertFalse(self.agent.dvr_agent.in_distributed_mode()) @@ -1970,16 +1933,14 @@ class TestOvsDvrNeutronAgent(object): tun_br = mock.create_autospec(self.agent.tun_br) phys_br = mock.create_autospec(self.br_phys_cls('br-phys')) physical_network = 'physeth1' - with contextlib.nested( - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.dict(self.agent.phys_brs, - {physical_network: phys_br}), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - mock.patch.dict(self.agent.dvr_agent.phys_brs, - {physical_network: phys_br}), - ): + with mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.dict(self.agent.phys_brs, + {physical_network: phys_br}),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ + mock.patch.dict(self.agent.dvr_agent.phys_brs, + {physical_network: phys_br}): self.agent.dvr_agent.\ dvr_mac_address_update( dvr_macs=[{'host': newhost, @@ -2008,16 +1969,14 @@ class TestOvsDvrNeutronAgent(object): int_br.reset_mock() tun_br.reset_mock() phys_br.reset_mock() - with contextlib.nested( - mock.patch.object(self.agent, 'int_br', new=int_br), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - mock.patch.dict(self.agent.phys_brs, - {physical_network: phys_br}), - mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br), - mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br), - mock.patch.dict(self.agent.dvr_agent.phys_brs, - {physical_network: phys_br}), - ): + with mock.patch.object(self.agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br),\ + mock.patch.dict(self.agent.phys_brs, + {physical_network: phys_br}),\ + mock.patch.object(self.agent.dvr_agent, 'int_br', new=int_br),\ + mock.patch.object(self.agent.dvr_agent, 'tun_br', new=tun_br),\ + mock.patch.dict(self.agent.dvr_agent.phys_brs, + {physical_network: phys_br}): self.agent.dvr_agent.dvr_mac_address_update(dvr_macs=[]) expected_on_int_br = [ mock.call.remove_dvr_mac_vlan( @@ -2047,13 +2006,13 @@ class TestOvsDvrNeutronAgent(object): reset_mocks = [mock.patch.object(self.agent.dvr_agent, method).start() for method in reset_methods] tun_br = mock.create_autospec(self.agent.tun_br) - with contextlib.nested( - mock.patch.object(self.agent, 'check_ovs_status', - return_value=constants.OVS_RESTARTED), - mock.patch.object(self.agent, '_agent_has_updates', - side_effect=TypeError('loop exit')), - mock.patch.object(self.agent, 'tun_br', new=tun_br), - ): + with mock.patch.object(self.agent, + 'check_ovs_status', + return_value=constants.OVS_RESTARTED),\ + mock.patch.object(self.agent, + '_agent_has_updates', + side_effect=TypeError('loop exit')),\ + mock.patch.object(self.agent, 'tun_br', new=tun_br): # block RPC calls and bridge calls self.agent.setup_physical_bridges = mock.Mock() self.agent.setup_integration_br = mock.Mock() diff --git a/neutron/tests/unit/plugins/openvswitch/test_agent_scheduler.py b/neutron/tests/unit/plugins/openvswitch/test_agent_scheduler.py index cd6ea2f7c49..2682014d0af 100644 --- a/neutron/tests/unit/plugins/openvswitch/test_agent_scheduler.py +++ b/neutron/tests/unit/plugins/openvswitch/test_agent_scheduler.py @@ -13,7 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import datetime import mock @@ -272,8 +271,7 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): def test_network_auto_schedule_with_disabled(self): cfg.CONF.set_override('allow_overlapping_ips', True) - with contextlib.nested(self.subnet(), - self.subnet()): + with self.subnet(), self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, @@ -293,8 +291,7 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): def test_network_auto_schedule_with_no_dhcp(self): cfg.CONF.set_override('allow_overlapping_ips', True) - with contextlib.nested(self.subnet(enable_dhcp=False), - self.subnet(enable_dhcp=False)): + with self.subnet(enable_dhcp=False), self.subnet(enable_dhcp=False): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, @@ -314,8 +311,7 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): def test_network_auto_schedule_with_multiple_agents(self): cfg.CONF.set_override('dhcp_agents_per_network', 2) cfg.CONF.set_override('allow_overlapping_ips', True) - with contextlib.nested(self.subnet(), - self.subnet()): + with self.subnet(), self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP, @@ -345,8 +341,7 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): def test_network_auto_schedule_with_hosted(self): # one agent hosts all the networks, other hosts none cfg.CONF.set_override('allow_overlapping_ips', True) - with contextlib.nested(self.subnet(), - self.subnet()) as (sub1, sub2): + with self.subnet() as sub1, self.subnet(): dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback() self._register_agent_states() dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA) @@ -474,10 +469,8 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): is_eligible_agent = ('neutron.db.agentschedulers_db.' 'AgentSchedulerDbMixin.is_eligible_agent') dhcp_mixin = agentschedulers_db.DhcpAgentSchedulerDbMixin() - with contextlib.nested( - mock.patch(agent_startup), - mock.patch(is_eligible_agent) - ) as (startup, elig): + with mock.patch(agent_startup) as startup,\ + mock.patch(is_eligible_agent) as elig: tests = [(True, True), (True, False), (False, True), @@ -698,7 +691,7 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): service_constants.L3_ROUTER_NAT) l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() - with contextlib.nested(self.router(), self.router()) as (r1, r2): + with self.router() as r1, self.router() as r2: # schedule the routers to host A l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) @@ -823,8 +816,7 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): self.assertEqual(L3_HOSTB, l3_agents_2['agents'][0]['host']) def test_router_auto_schedule_with_disabled(self): - with contextlib.nested(self.router(), - self.router()): + with self.router(), self.router(): l3_rpc_cb = l3_rpc.L3RpcCallback() self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, @@ -844,8 +836,7 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): self.assertEqual(0, num_hosta_routers) def test_router_auto_schedule_with_candidates(self): - with contextlib.nested(self.router(), - self.router()) as (router1, router2): + with self.router() as router1, self.router() as router2: l3_rpc_cb = l3_rpc.L3RpcCallback() agent = helpers.register_l3_agent( host=L3_HOSTA, router_id=router1['router']['id']) @@ -869,9 +860,8 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA) self.assertEqual(0, len(ret_a)) - with contextlib.nested(self.router(), - self.router(), - self.router()) as routers: + with self.router() as v1, self.router() as v2, self.router() as v3: + routers = (v1, v2, v3) router_ids = [r['router']['id'] for r in routers] # Get all routers @@ -915,8 +905,11 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) - with contextlib.nested(self.router(), self.router(), - self.router(), self.router()) as routers: + with self.router() as v1,\ + self.router() as v2,\ + self.router() as v3,\ + self.router() as v4: + routers = (v1, v2, v3, v4) router_ids = [r['router']['id'] for r in routers] # Sync router1 (router1 is scheduled) _sync_router_with_ids([router_ids[0]], 1, 1, hosta_id) @@ -931,13 +924,10 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): _sync_router_with_ids(router_ids, 4, 4, hosta_id) def test_router_schedule_with_candidates(self): - with contextlib.nested(self.router(), - self.router(), - self.subnet(), - self.subnet(cidr='10.0.3.0/24')) as (router1, - router2, - subnet1, - subnet2): + with self.router() as router1,\ + self.router() as router2,\ + self.subnet() as subnet1,\ + self.subnet(cidr='10.0.3.0/24') as subnet2: agent = helpers.register_l3_agent( host=L3_HOSTA, router_id=router1['router']['id']) self._router_interface_action('add', @@ -986,11 +976,9 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): self.assertEqual(0, len(l3agents)) def test_router_sync_data(self): - with contextlib.nested( - self.subnet(), - self.subnet(cidr='10.0.2.0/24'), - self.subnet(cidr='10.0.3.0/24') - ) as (s1, s2, s3): + with self.subnet() as s1,\ + self.subnet(cidr='10.0.2.0/24') as s2,\ + self.subnet(cidr='10.0.3.0/24') as s3: self._register_agent_states() self._set_net_external(s1['subnet']['network_id']) data = {'router': {'tenant_id': uuidutils.generate_uuid()}} @@ -1287,15 +1275,13 @@ class OvsDhcpAgentNotifierTestCase(test_l3.L3NatTestCaseMixin, def _is_schedule_network_called(self, device_id): plugin = manager.NeutronManager.get_plugin() notifier = plugin.agent_notifiers[constants.AGENT_TYPE_DHCP] - with contextlib.nested( - self.subnet(), - mock.patch.object(plugin, - 'get_dhcp_agents_hosting_networks', - return_value=[]), - mock.patch.object(notifier, - '_schedule_network', - return_value=[]) - ) as (subnet, _, mock_sched): + with self.subnet() as subnet,\ + mock.patch.object(plugin, + 'get_dhcp_agents_hosting_networks', + return_value=[]),\ + mock.patch.object(notifier, + '_schedule_network', + return_value=[]) as mock_sched: with self.port(subnet=subnet, device_id=device_id): return mock_sched.called @@ -1347,14 +1333,12 @@ class OvsL3AgentNotifierTestCase(test_l3.L3NatTestCaseMixin, l3_plugin = (manager.NeutronManager.get_service_plugins() [service_constants.L3_ROUTER_NAT]) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] - with contextlib.nested( - mock.patch.object(l3_notifier.client, 'prepare', - return_value=l3_notifier.client), - mock.patch.object(l3_notifier.client, 'cast'), - self.router(), - ) as ( - mock_prepare, mock_cast, router1 - ): + with mock.patch.object( + l3_notifier.client, + 'prepare', + return_value=l3_notifier.client) as mock_prepare,\ + mock.patch.object(l3_notifier.client, 'cast') as mock_cast,\ + self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) @@ -1372,14 +1356,12 @@ class OvsL3AgentNotifierTestCase(test_l3.L3NatTestCaseMixin, l3_plugin = (manager.NeutronManager.get_service_plugins() [service_constants.L3_ROUTER_NAT]) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] - with contextlib.nested( - mock.patch.object(l3_notifier.client, 'prepare', - return_value=l3_notifier.client), - mock.patch.object(l3_notifier.client, 'cast'), - self.router(), - ) as ( - mock_prepare, mock_cast, router1 - ): + with mock.patch.object( + l3_notifier.client, + 'prepare', + return_value=l3_notifier.client) as mock_prepare,\ + mock.patch.object(l3_notifier.client, 'cast') as mock_cast,\ + self.router() as router1: self._register_agent_states() hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA) @@ -1399,13 +1381,11 @@ class OvsL3AgentNotifierTestCase(test_l3.L3NatTestCaseMixin, l3_plugin = (manager.NeutronManager.get_service_plugins() [service_constants.L3_ROUTER_NAT]) l3_notifier = l3_plugin.agent_notifiers[constants.AGENT_TYPE_L3] - with contextlib.nested( - mock.patch.object(l3_notifier.client, 'prepare', - return_value=l3_notifier.client), - mock.patch.object(l3_notifier.client, 'cast'), - ) as ( - mock_prepare, mock_cast - ): + with mock.patch.object( + l3_notifier.client, + 'prepare', + return_value=l3_notifier.client) as mock_prepare,\ + mock.patch.object(l3_notifier.client, 'cast') as mock_cast: agent_id = helpers.register_l3_agent(L3_HOSTA).id self._disable_agent(agent_id, admin_state_up=False) diff --git a/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py b/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py index 64d130b4bbe..fe8d9a00e25 100644 --- a/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py +++ b/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py @@ -14,7 +14,6 @@ # under the License. # -import contextlib import time import mock @@ -495,19 +494,18 @@ class TunnelTest(object): self.ovs_bridges[self.INT_BRIDGE].check_canary_table.return_value = \ constants.OVS_NORMAL - with contextlib.nested( - mock.patch.object(log.KeywordArgumentAdapter, 'exception'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'scan_ports'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'process_network_ports'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'tunnel_sync'), - mock.patch.object(time, 'sleep'), - mock.patch.object(self.mod_agent.OVSNeutronAgent, - 'update_stale_ofport_rules') - ) as (log_exception, scan_ports, process_network_ports, - ts, time_sleep, update_stale): + with mock.patch.object(log.KeywordArgumentAdapter, + 'exception') as log_exception,\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'scan_ports') as scan_ports,\ + mock.patch.object( + self.mod_agent.OVSNeutronAgent, + 'process_network_ports') as process_network_ports,\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'tunnel_sync'),\ + mock.patch.object(time, 'sleep'),\ + mock.patch.object(self.mod_agent.OVSNeutronAgent, + 'update_stale_ofport_rules') as update_stale: log_exception.side_effect = Exception( 'Fake exception to get out of the loop') scan_ports.side_effect = [reply2, reply3] diff --git a/neutron/tests/unit/plugins/sriovnicagent/test_eswitch_manager.py b/neutron/tests/unit/plugins/sriovnicagent/test_eswitch_manager.py index 3aacb722c75..3cc1b25d81c 100644 --- a/neutron/tests/unit/plugins/sriovnicagent/test_eswitch_manager.py +++ b/neutron/tests/unit/plugins/sriovnicagent/test_eswitch_manager.py @@ -14,7 +14,6 @@ # limitations under the License. -import contextlib import os import mock @@ -33,28 +32,23 @@ class TestCreateESwitchManager(base.BaseTestCase): def test_create_eswitch_mgr_fail(self): device_mappings = {'physnet1': 'p6p1'} - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.scan_vf_devices", - side_effect=exc.InvalidDeviceError(dev_name="p6p1", - reason="device" - " not found")), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.is_assigned_vf", - return_value=True)): + with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.scan_vf_devices", + side_effect=exc.InvalidDeviceError( + dev_name="p6p1", reason="device" " not found")),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.is_assigned_vf", return_value=True): with testtools.ExpectedException(exc.InvalidDeviceError): esm.ESwitchManager(device_mappings, None) def test_create_eswitch_mgr_ok(self): device_mappings = {'physnet1': 'p6p1'} - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.scan_vf_devices", - return_value=self.SCANNED_DEVICES), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.is_assigned_vf", - return_value=True)): + with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.scan_vf_devices", + return_value=self.SCANNED_DEVICES),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.is_assigned_vf", return_value=True): esm.ESwitchManager(device_mappings, None) @@ -72,13 +66,11 @@ class TestESwitchManagerApi(base.BaseTestCase): def setUp(self): super(TestESwitchManagerApi, self).setUp() device_mappings = {'physnet1': 'p6p1'} - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.scan_vf_devices", - return_value=self.SCANNED_DEVICES), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.is_assigned_vf", - return_value=True)): + with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.scan_vf_devices", + return_value=self.SCANNED_DEVICES),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.is_assigned_vf", return_value=True): self.eswitch_mgr = esm.ESwitchManager(device_mappings, None) def test_get_assigned_devices(self): @@ -89,37 +81,32 @@ class TestESwitchManagerApi(base.BaseTestCase): self.assertEqual(set([self.ASSIGNED_MAC]), result) def test_get_device_status_true(self): - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.get_pci_device", - return_value=self.ASSIGNED_MAC), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.get_device_state", - return_value=True)): + with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.get_pci_device", + return_value=self.ASSIGNED_MAC),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.get_device_state", return_value=True): result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertTrue(result) def test_get_device_status_false(self): - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.get_pci_device", - return_value=self.ASSIGNED_MAC), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.get_device_state", - return_value=False)): + with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.get_pci_device", + return_value=self.ASSIGNED_MAC),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.get_device_state", + return_value=False): result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC, self.PCI_SLOT) self.assertFalse(result) def test_get_device_status_mismatch(self): - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.get_pci_device", - return_value=self.ASSIGNED_MAC), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.get_device_state", - return_value=True)): + with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.get_pci_device", + return_value=self.ASSIGNED_MAC),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.get_device_state", return_value=True): with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." "LOG.warning") as log_mock: result = self.eswitch_mgr.get_device_state(self.WRONG_MAC, @@ -131,22 +118,20 @@ class TestESwitchManagerApi(base.BaseTestCase): self.assertFalse(result) def test_set_device_status(self): - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.get_pci_device", - return_value=self.ASSIGNED_MAC), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.set_device_state")): + with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.get_pci_device", + return_value=self.ASSIGNED_MAC),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.set_device_state"): self.eswitch_mgr.set_device_state(self.ASSIGNED_MAC, self.PCI_SLOT, True) def test_set_device_status_mismatch(self): - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.get_pci_device", - return_value=self.ASSIGNED_MAC), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "EmbSwitch.set_device_state")): + with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.get_pci_device", + return_value=self.ASSIGNED_MAC),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "EmbSwitch.set_device_state"): with mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." "LOG.warning") as log_mock: self.eswitch_mgr.set_device_state(self.WRONG_MAC, @@ -209,13 +194,11 @@ class TestEmbSwitch(base.BaseTestCase): exclude_devices) def test_get_assigned_devices(self): - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.pci_lib." - "PciDeviceIPWrapper.get_assigned_macs", - return_value=[self.ASSIGNED_MAC]), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.is_assigned_vf", - return_value=True)): + with mock.patch("neutron.plugins.sriovnicagent.pci_lib." + "PciDeviceIPWrapper.get_assigned_macs", + return_value=[self.ASSIGNED_MAC]),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.is_assigned_vf", return_value=True): result = self.emb_switch.get_assigned_devices() self.assertEqual([self.ASSIGNED_MAC], result) @@ -257,24 +240,20 @@ class TestEmbSwitch(base.BaseTestCase): self.WRONG_PCI_SLOT, True) def test_get_pci_device(self): - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.pci_lib." - "PciDeviceIPWrapper.get_assigned_macs", - return_value=[self.ASSIGNED_MAC]), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.is_assigned_vf", - return_value=True)): + with mock.patch("neutron.plugins.sriovnicagent.pci_lib." + "PciDeviceIPWrapper.get_assigned_macs", + return_value=[self.ASSIGNED_MAC]),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.is_assigned_vf", return_value=True): result = self.emb_switch.get_pci_device(self.PCI_SLOT) self.assertEqual(self.ASSIGNED_MAC, result) def test_get_pci_device_fail(self): - with contextlib.nested( - mock.patch("neutron.plugins.sriovnicagent.pci_lib." - "PciDeviceIPWrapper.get_assigned_macs", - return_value=[self.ASSIGNED_MAC]), - mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." - "PciOsWrapper.is_assigned_vf", - return_value=True)): + with mock.patch("neutron.plugins.sriovnicagent.pci_lib." + "PciDeviceIPWrapper.get_assigned_macs", + return_value=[self.ASSIGNED_MAC]),\ + mock.patch("neutron.plugins.sriovnicagent.eswitch_manager." + "PciOsWrapper.is_assigned_vf", return_value=True): result = self.emb_switch.get_pci_device(self.WRONG_PCI_SLOT) self.assertIsNone(result) @@ -313,15 +292,10 @@ class TestPciOsWrapper(base.BaseTestCase): file_name = os.path.basename(file_path) return self.LINKS[file_name] - with contextlib.nested( - mock.patch("os.path.isdir", - return_value=True), - mock.patch("os.listdir", - return_value=self.DIR_CONTENTS), - mock.patch("os.path.islink", - return_value=True), - mock.patch("os.readlink", - side_effect=_get_link),): + with mock.patch("os.path.isdir", return_value=True),\ + mock.patch("os.listdir", return_value=self.DIR_CONTENTS),\ + mock.patch("os.path.islink", return_value=True),\ + mock.patch("os.readlink", side_effect=_get_link): result = esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME) self.assertEqual(self.PCI_SLOTS, result) @@ -332,21 +306,16 @@ class TestPciOsWrapper(base.BaseTestCase): self.DEV_NAME) def test_scan_vf_devices_no_content(self): - with contextlib.nested( - mock.patch("os.path.isdir", - return_value=True), - mock.patch("os.listdir", - return_value=[])): + with mock.patch("os.path.isdir", return_value=True),\ + mock.patch("os.listdir", return_value=[]): self.assertRaises(exc.InvalidDeviceError, esm.PciOsWrapper.scan_vf_devices, self.DEV_NAME) def test_scan_vf_devices_no_match(self): - with contextlib.nested( - mock.patch("os.path.isdir", - return_value=True), - mock.patch("os.listdir", - return_value=self.DIR_CONTENTS_NO_MATCH)): + with mock.patch("os.path.isdir", return_value=True),\ + mock.patch("os.listdir", + return_value=self.DIR_CONTENTS_NO_MATCH): self.assertRaises(exc.InvalidDeviceError, esm.PciOsWrapper.scan_vf_devices, self.DEV_NAME) @@ -368,11 +337,8 @@ class TestPciOsWrapper(base.BaseTestCase): def _glob(file_path): return ["upper_macvtap0"] if macvtap_exists else [] - with contextlib.nested( - mock.patch("os.path.isdir", - return_value=True), - mock.patch("glob.glob", - side_effect=_glob)): + with mock.patch("os.path.isdir", return_value=True),\ + mock.patch("glob.glob", side_effect=_glob): result = esm.PciOsWrapper.is_assigned_vf(self.DEV_NAME, self.VF_INDEX) self.assertEqual(macvtap_exists, result) diff --git a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py index c43e2a22055..9faa8ab5bfb 100644 --- a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import contextlib - import mock from oslo_config import cfg from oslo_utils import importutils @@ -185,13 +183,14 @@ class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, self._test_schedule_bind_network([agents[0]], self.network_id) self._save_networks(["foo-network-2"]) self._test_schedule_bind_network([agents[1]], "foo-network-2") - with contextlib.nested( - mock.patch.object(self, 'remove_network_from_dhcp_agent'), - mock.patch.object(self, 'schedule_network', - return_value=[agents[1]]), - mock.patch.object(self, 'get_network', create=True, - return_value={'id': self.network_id}) - ) as (rn, sch, getn): + with mock.patch.object(self, 'remove_network_from_dhcp_agent') as rn,\ + mock.patch.object(self, + 'schedule_network', + return_value=[agents[1]]) as sch,\ + mock.patch.object(self, + 'get_network', + create=True, + return_value={'id': self.network_id}): notifier = mock.MagicMock() self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier self.remove_networks_from_down_agents() @@ -204,15 +203,16 @@ class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, def _test_failed_rescheduling(self, rn_side_effect=None): agents = self._create_and_set_agents_down(['host-a'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) - with contextlib.nested( - mock.patch.object( - self, 'remove_network_from_dhcp_agent', - side_effect=rn_side_effect), - mock.patch.object(self, 'schedule_network', - return_value=None), - mock.patch.object(self, 'get_network', create=True, - return_value={'id': self.network_id}) - ) as (rn, sch, getn): + with mock.patch.object(self, + 'remove_network_from_dhcp_agent', + side_effect=rn_side_effect) as rn,\ + mock.patch.object(self, + 'schedule_network', + return_value=None) as sch,\ + mock.patch.object(self, + 'get_network', + create=True, + return_value={'id': self.network_id}): notifier = mock.MagicMock() self.agent_notifiers[constants.AGENT_TYPE_DHCP] = notifier self.remove_networks_from_down_agents() diff --git a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py index 07f06db7a11..87bbc9e0306 100644 --- a/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_l3_agent_scheduler.py @@ -128,10 +128,10 @@ class L3SchedulerBaseTestCase(base.BaseTestCase): def test_auto_schedule_routers(self): self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY] - with contextlib.nested( - mock.patch.object(self.scheduler, '_get_routers_to_schedule'), - mock.patch.object(self.scheduler, '_get_routers_can_schedule') - ) as (gs, gr): + with mock.patch.object(self.scheduler, + '_get_routers_to_schedule') as gs,\ + mock.patch.object(self.scheduler, + '_get_routers_can_schedule') as gr: result = self.scheduler.auto_schedule_routers( self.plugin, mock.ANY, mock.ANY, mock.ANY) self.assertTrue(self.plugin.get_enabled_agent_on_host.called) @@ -159,10 +159,12 @@ class L3SchedulerBaseTestCase(base.BaseTestCase): def test_auto_schedule_routers_no_target_routers(self): self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY] - with contextlib.nested( - mock.patch.object(self.scheduler, '_get_routers_to_schedule'), - mock.patch.object(self.scheduler, '_get_routers_can_schedule') - ) as (mock_unscheduled_routers, mock_target_routers): + with mock.patch.object( + self.scheduler, + '_get_routers_to_schedule') as mock_unscheduled_routers,\ + mock.patch.object( + self.scheduler, + '_get_routers_can_schedule') as mock_target_routers: mock_unscheduled_routers.return_value = mock.ANY mock_target_routers.return_value = None result = self.scheduler.auto_schedule_routers( @@ -250,12 +252,11 @@ class L3SchedulerBaseTestCase(base.BaseTestCase): def _test__bind_routers_ha(self, has_binding): routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}] agent = agents_db.Agent(id='foo_agent') - with contextlib.nested( - mock.patch.object(self.scheduler, '_router_has_binding', - return_value=has_binding), - mock.patch.object(self.scheduler, '_create_ha_router_binding') - ) as ( - mock_has_binding, mock_bind): + with mock.patch.object(self.scheduler, + '_router_has_binding', + return_value=has_binding) as mock_has_binding,\ + mock.patch.object(self.scheduler, + '_create_ha_router_binding') as mock_bind: self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent) mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router', 'foo_agent') @@ -336,12 +337,11 @@ class L3SchedulerTestBaseMixin(object): router['router']['external_gateway_info'] = external_gw if already_scheduled: self._test_schedule_bind_router(agent, router) - with contextlib.nested( - mock.patch.object(self, "validate_agent_router_combination"), - mock.patch.object(self, "create_router_to_agent_binding"), - mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', - return_value=router['router']) - ) as (valid, auto_s, gr): + with mock.patch.object(self, "validate_agent_router_combination"),\ + mock.patch.object(self, + "create_router_to_agent_binding") as auto_s,\ + mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', + return_value=router['router']): self.add_router_to_l3_agent(self.adminContext, agent_id, router['router']['id']) self.assertNotEqual(already_scheduled, auto_s.called) @@ -377,10 +377,9 @@ class L3SchedulerTestBaseMixin(object): expected_exception=None): router = self._create_router_for_l3_agent_dvr_test( distributed=distributed, external_gw=external_gw) - with contextlib.nested( - mock.patch.object(self, "create_router_to_agent_binding"), - mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', - return_value=router['router'])): + with mock.patch.object(self, "create_router_to_agent_binding"),\ + mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', + return_value=router['router']): self.assertRaises(expected_exception, self.add_router_to_l3_agent, self.adminContext, agent_id, @@ -416,12 +415,12 @@ class L3SchedulerTestBaseMixin(object): router = self._create_router_for_l3_agent_dvr_test( distributed=True, external_gw=external_gw_info) - with contextlib.nested( - mock.patch.object(self, "validate_agent_router_combination"), - mock.patch.object(self, "create_router_to_agent_binding"), - mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', - return_value=router['router']) - ) as (valid_agent_rtr, rtr_agent_binding, get_rtr): + with mock.patch.object(self, "validate_agent_router_combination"),\ + mock.patch.object( + self, + "create_router_to_agent_binding") as rtr_agent_binding,\ + mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', + return_value=router['router']): self.add_router_to_l3_agent(self.adminContext, agent_id, router['router']['id']) @@ -470,10 +469,10 @@ class L3SchedulerTestBaseMixin(object): 'distributed': True } plugin.get_router.return_value = sync_router - with contextlib.nested( - mock.patch.object(scheduler, 'bind_router'), - mock.patch.object( - plugin, 'get_snat_bindings', return_value=False)): + with mock.patch.object(scheduler, 'bind_router'),\ + mock.patch.object(plugin, + 'get_snat_bindings', + return_value=False): scheduler._schedule_router( plugin, self.adminContext, 'foo_router_id', None) expected_calls = [ @@ -954,15 +953,16 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): 'distributed': True, } - with contextlib.nested( - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' - '.get_ports', return_value=[dvr_port]), - mock.patch('neutron.manager.NeutronManager.get_service_plugins', - return_value=mock.Mock()), - mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', - return_value=r1), - mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api' - '.L3AgentNotifyAPI')): + with mock.patch( + 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', + return_value=[dvr_port]),\ + mock.patch( + 'neutron.manager.NeutronManager.get_service_plugins', + return_value=mock.Mock()),\ + mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', + return_value=r1),\ + mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api' + '.L3AgentNotifyAPI'): self.dut.dvr_update_router_addvm(self.adminContext, port) def test_get_dvr_routers_by_portid(self): @@ -982,11 +982,11 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): 'distributed': True, } - with contextlib.nested( - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' - '.get_port', return_value=dvr_port), - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' - '.get_ports', return_value=[dvr_port])): + with mock.patch( + 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_port', + return_value=dvr_port),\ + mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' + '.get_ports', return_value=[dvr_port]): router_id = self.dut.get_dvr_routers_by_portid(self.adminContext, dvr_port['id']) self.assertEqual(router_id.pop(), r1['id']) @@ -1008,9 +1008,9 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): 'distributed': True, } - with contextlib.nested( - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' - '.get_ports', return_value=[dvr_port])): + with mock.patch( + 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', + return_value=[dvr_port]): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) self.assertEqual(sub_ids.pop(), @@ -1034,15 +1034,16 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): 'id': 'r1', 'distributed': True, } - with contextlib.nested( - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2' - '.get_ports', return_value=[dvr_port]), - mock.patch('neutron.manager.NeutronManager.get_service_plugins', - return_value=mock.Mock()), - mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', - return_value=r1), - mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api' - '.L3AgentNotifyAPI')): + with mock.patch( + 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2' '.get_ports', + return_value=[dvr_port]),\ + mock.patch( + 'neutron.manager.NeutronManager.get_service_plugins', + return_value=mock.Mock()),\ + mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router', + return_value=r1),\ + mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api' + '.L3AgentNotifyAPI'): sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext, r1['id']) result = self.dut.check_ports_active_on_host_and_subnet( @@ -1109,16 +1110,16 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): vm_port_id = vm_port['id'] fakePortDB = FakePortDB([vm_port]) - with contextlib.nested( - mock.patch.object(my_context, 'elevated', - return_value=self.adminContext), - mock.patch('neutron.plugins.ml2.db.' - 'get_port_binding_host', return_value=vm_port_host), - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' - 'get_ports', side_effect=fakePortDB.get_ports), - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' - 'get_port', return_value=vm_port)) as ( - _, mock_get_port_binding_host, _, _): + with mock.patch.object(my_context, + 'elevated', + return_value=self.adminContext),\ + mock.patch( + 'neutron.plugins.ml2.db.get_port_binding_host', + return_value=vm_port_host) as mock_get_port_binding_host,\ + mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' + 'get_ports', side_effect=fakePortDB.get_ports),\ + mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' + 'get_port', return_value=vm_port): routers = self.dut.dvr_deletens_if_no_port(my_context, vm_port_id) self.assertEqual([], routers) @@ -1155,20 +1156,21 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): 'host': vm_port_host } - with contextlib.nested( - mock.patch.object(my_context, 'elevated', - return_value=self.adminContext), - mock.patch('neutron.plugins.ml2.db.get_port_binding_host', - return_value=vm_port_host), - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' - 'get_port', side_effect=fakePortDB.get_port), - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' - 'get_ports', side_effect=fakePortDB.get_ports), - mock.patch('neutron.plugins.ml2.db.get_dvr_port_binding_by_host', - return_value=vm_port_binding)) as (_, - mock_get_port_binding_host, _, - mock_get_ports, - mock_get_dvr_port_binding_by_host): + with mock.patch.object(my_context, + 'elevated', + return_value=self.adminContext),\ + mock.patch( + 'neutron.plugins.ml2.db.get_port_binding_host', + return_value=vm_port_host) as mock_get_port_binding_host,\ + mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' + 'get_port', side_effect=fakePortDB.get_port),\ + mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' + 'get_ports', side_effect=fakePortDB.get_ports) as\ + mock_get_ports,\ + mock.patch('neutron.plugins.ml2.db.' + 'get_dvr_port_binding_by_host', + return_value=vm_port_binding) as\ + mock_get_dvr_port_binding_by_host: routers = self.dut.dvr_deletens_if_no_port( my_context, deleted_vm_port_id) @@ -1220,24 +1222,24 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): vm_port_host, constants.AGENT_TYPE_L3) - with contextlib.nested( - mock.patch.object(my_context, 'elevated', - return_value=self.adminContext), - mock.patch('neutron.plugins.ml2.db.get_port_binding_host', - return_value=vm_port_host), - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' - 'get_port', side_effect=fakePortDB.get_port), - mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' - 'get_ports', side_effect=fakePortDB.get_ports), - mock.patch('neutron.plugins.ml2.db.get_dvr_port_binding_by_host', - return_value=dvr_port_binding), - mock.patch('neutron.db.agents_db.AgentDbMixin.' - '_get_agent_by_type_and_host', - return_value=l3_agent_on_vm_host)) as (_, - mock_get_port_binding_host, _, - mock_get_ports, - mock_get_dvr_port_binding_by_host, - mock__get_agent_by_type_and_host): + with mock.patch.object(my_context, + 'elevated', + return_value=self.adminContext),\ + mock.patch( + 'neutron.plugins.ml2.db.get_port_binding_host', + return_value=vm_port_host) as mock_get_port_binding_host,\ + mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' + 'get_port', side_effect=fakePortDB.get_port),\ + mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.' + 'get_ports', side_effect=fakePortDB.get_ports) as\ + mock_get_ports,\ + mock.patch('neutron.plugins.ml2.db.' + 'get_dvr_port_binding_by_host', + return_value=dvr_port_binding) as\ + mock_get_dvr_port_binding_by_host,\ + mock.patch('neutron.db.agents_db.AgentDbMixin.' + '_get_agent_by_type_and_host', + return_value=l3_agent_on_vm_host): routers = self.dut.dvr_deletens_if_no_port( my_context, deleted_vm_port_id) @@ -1299,25 +1301,30 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): def test_schedule_snat_router_duplicate_entry(self): self._prepare_schedule_snat_tests() - with contextlib.nested( - mock.patch.object(self.dut, 'get_l3_agents'), - mock.patch.object(self.dut, 'get_snat_candidates'), - mock.patch.object(self.dut, 'bind_snat_servicenode', - side_effect=db_exc.DBDuplicateEntry()), - mock.patch.object(self.dut, 'bind_dvr_router_servicenode') - ) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr): + with mock.patch.object(self.dut, 'get_l3_agents'),\ + mock.patch.object(self.dut, 'get_snat_candidates'),\ + mock.patch.object( + self.dut, + 'bind_snat_servicenode', + side_effect=db_exc.DBDuplicateEntry()) as mock_bind_snat,\ + mock.patch.object( + self.dut, + 'bind_dvr_router_servicenode') as mock_bind_dvr: self.dut.schedule_snat_router(self.adminContext, 'foo', 'bar') self.assertTrue(mock_bind_snat.called) self.assertFalse(mock_bind_dvr.called) def test_schedule_snat_router_return_value(self): agent, router = self._prepare_schedule_snat_tests() - with contextlib.nested( - mock.patch.object(self.dut, 'get_l3_agents'), - mock.patch.object(self.dut, 'get_snat_candidates'), - mock.patch.object(self.dut, 'bind_snat_servicenode'), - mock.patch.object(self.dut, 'bind_dvr_router_servicenode') - ) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr): + with mock.patch.object(self.dut, 'get_l3_agents'),\ + mock.patch.object( + self.dut, + 'get_snat_candidates') as mock_snat_canidates,\ + mock.patch.object(self.dut, + 'bind_snat_servicenode') as mock_bind_snat,\ + mock.patch.object( + self.dut, + 'bind_dvr_router_servicenode') as mock_bind_dvr: mock_snat_canidates.return_value = [agent] mock_bind_snat.return_value = [agent] mock_bind_dvr.return_value = [agent] @@ -1330,11 +1337,11 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): 'id': 'foo_router_id', 'distributed': True } - with contextlib.nested( - mock.patch.object(self.dut, 'get_router'), - mock.patch.object(self.dut, 'get_snat_bindings'), - mock.patch.object(self.dut, 'unbind_snat_servicenode') - ) as (mock_rd, mock_snat_bind, mock_unbind): + with mock.patch.object(self.dut, 'get_router') as mock_rd,\ + mock.patch.object(self.dut, + 'get_snat_bindings') as mock_snat_bind,\ + mock.patch.object(self.dut, + 'unbind_snat_servicenode') as mock_unbind: mock_rd.return_value = router mock_snat_bind.return_value = False self.dut.schedule_snat_router( @@ -1343,15 +1350,14 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): def test_schedule_snat_router_with_snat_candidates(self): agent, router = self._prepare_schedule_snat_tests() - with contextlib.nested( - mock.patch.object(query.Query, 'first'), - mock.patch.object(self.dut, 'get_l3_agents'), - mock.patch.object(self.dut, 'get_snat_candidates'), - mock.patch.object(self.dut, 'get_router'), - mock.patch.object(self.dut, 'bind_dvr_router_servicenode'), - mock.patch.object(self.dut, 'bind_snat_servicenode')) as ( - mock_query, mock_agents, - mock_candidates, mock_rd, mock_dvr, mock_bind): + with mock.patch.object(query.Query, 'first') as mock_query,\ + mock.patch.object(self.dut, 'get_l3_agents') as mock_agents,\ + mock.patch.object(self.dut, + 'get_snat_candidates') as mock_candidates,\ + mock.patch.object(self.dut, 'get_router') as mock_rd,\ + mock.patch.object(self.dut, 'bind_dvr_router_servicenode'),\ + mock.patch.object(self.dut, + 'bind_snat_servicenode') as mock_bind: mock_rd.return_value = router mock_query.return_value = [] mock_agents.return_value = [agent] @@ -1373,12 +1379,13 @@ class L3DvrSchedulerTestCase(testlib_api.SqlTestCase): binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding( router_id=router_id, l3_agent_id='foo_l3_agent_id', l3_agent=agents_db.Agent()) - with contextlib.nested( - mock.patch.object(query.Query, 'one'), - mock.patch.object(self.adminContext.session, 'delete'), - mock.patch.object(query.Query, 'delete'), - mock.patch.object(self.dut, 'get_subnet_ids_on_router')) as ( - mock_query, mock_session, mock_delete, mock_get_subnets): + with mock.patch.object(query.Query, 'one') as mock_query,\ + mock.patch.object(self.adminContext.session, + 'delete') as mock_session,\ + mock.patch.object(query.Query, 'delete') as mock_delete,\ + mock.patch.object( + self.dut, + 'get_subnet_ids_on_router') as mock_get_subnets: mock_query.return_value = binding mock_get_subnets.return_value = ['foo_subnet_id'] self.dut.unbind_snat_servicenode(self.adminContext, router_id) From 5c7e7c0ca7229ac7bb32964378a8bbef142bcdbf Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 29 May 2015 14:28:34 -0700 Subject: [PATCH 085/292] Fix formatting of core-reviewers doc Fix some RST formatting issues with the core-reviewers policy document. When reading the RST rendered version of that document at http://docs.openstack.org/developer/neutron/policies/core-reviewers.html I noticed a few rendering issues where were bothering me, so I fixed them. Change-Id: Ic6eedc2bb18f6bbb6424542dbf4a88ed52ebea1d --- doc/source/policies/core-reviewers.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/source/policies/core-reviewers.rst b/doc/source/policies/core-reviewers.rst index 6ac4f25d8a0..808f97de9f0 100644 --- a/doc/source/policies/core-reviewers.rst +++ b/doc/source/policies/core-reviewers.rst @@ -81,6 +81,7 @@ The following are the current Neutron Lieutenants. +------------------------+---------------------------+----------------------+ Some notes on the above: + * "Built-In Control Plane" means the L2 agents, DHCP agents, SGs, metadata agents and the portion of ML2 which communicates with the agents. * The client includes commands installed server side. @@ -124,6 +125,7 @@ responsibility over the areas of code listed below: Neutron Core Reviewer Team -------------------------- Neutron core reviewers have merge rights to the following git repositories: + * `openstack/neutron `_ * `openstack/python-neutronclient `_ @@ -135,18 +137,21 @@ Neutron FWaaS Core Reviewer Team -------------------------------- Neutron FWaaS core reviewers have merge rights to the following git repositories: + * `openstack/neutron-fwaas `_ Neutron LBaaS Core Reviewer Team -------------------------------- Neutron LBaaS core reviewers have merge rights to the following git repositories: + * `openstack/neutron-lbaas `_ Neutron VPNaaS Core Reviewer Team --------------------------------- Neutron VPNaaS core reviewers have merge rights to the following git repositories: + * `openstack/neutron-vpnaas `_ Neutron Core Reviewer Teams for Plugins and Drivers @@ -155,6 +160,7 @@ The plugin decomposition effort has led to having many drivers with code in separate repositories with their own core reviewer teams. For each one of these repositories in the following repository list, there is a core team associated with it: + * `Neutron project team `_ These teams are also responsible for handling their own specs/RFEs/features if @@ -166,6 +172,7 @@ Neutron Specs Core Reviewer Team -------------------------------- Neutron specs core reviewers have merge rights to the following git repositories: + * `openstack/neutron-specs `_ The Neutron specs core reviewer team is responsible for reviewing and merging From ef5b4f52749b246d8165058ff320399c9c13f5ac Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Tue, 26 May 2015 14:29:15 +0200 Subject: [PATCH 086/292] Enable random hash seeds Neutron tests have been updated in order to support random hash seed. It allows to remove PYTHONHASHSEED=0 in tox.ini and remove hashtest tox environment. Closes-Bug: #1348818 Change-Id: I1063304dda887eb82c0de8516d5a483fa8943fc1 Depends-On: I8408365825ec1e97a83c2181f38ec1f9468df91e Depends-On: I1b2bd4100c19004f12822c414aefc86aae9849db Depends-On: I5077764045a34d1be0e85bb4b80f3655e87692cc --- tox.ini | 7 ------- 1 file changed, 7 deletions(-) diff --git a/tox.ini b/tox.ini index 1c8761c7051..5d9710a9b7a 100644 --- a/tox.ini +++ b/tox.ini @@ -7,7 +7,6 @@ skipsdist = True # Note the hash seed is set to 0 until neutron can be tested with a # random hash seed successfully. setenv = VIRTUAL_ENV={envdir} - PYTHONHASHSEED=0 usedevelop = True install_command = pip install -U {opts} {packages} deps = -r{toxinidir}/requirements.txt @@ -19,12 +18,6 @@ commands = # there is also secret magic in pretty_tox.sh which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. -[testenv:hashtest] -# This is the same as default environment, but with a random PYTHONHASHSEED. -# You can also specify a specific hashseed (for test repeatability) as follows: -# tox --hashseed 1235130571 -e hashtest -setenv = VIRTUAL_ENV={envdir} - [testenv:api] setenv = OS_TEST_PATH=./neutron/tests/api TEMPEST_CONFIG_DIR={env:TEMPEST_CONFIG_DIR:/opt/stack/tempest/etc} From e007167a700aa5b80ecb48adff0ac36bb330591d Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Thu, 30 Apr 2015 17:14:44 -0700 Subject: [PATCH 087/292] Don't delete port from bridge on delete_port event Commit d6a55c17360d1aa8ca91849199987ae71e8600ee added logic to the OVS agent to delete a port from the integration bridge when a port was deleted on the Neutron side. However, this led to several races where whoever created the initial port (e.g. Nova, L3 agent, DHCP agent) would be trying to remove the port from the bridge at the same time. These would result in ugly exceptions on one side or the other. The original commit was trying to address the problem where the port would maintain connectivity even though it was removed from the integration bridge. This patch addresses both cases by removing the iptables rules for the deleted port and putting it in the dead VLAN so it loses connectivity. However, it still leaves the port attached to the integration bridge so the original creator can delete it. Related-Bug: #1333365 Closes-Bug: #1448148 Change-Id: I7ae7750b7ac7d15325ed9f2d517ca171543b53be --- neutron/agent/common/ovs_lib.py | 9 +++--- .../openvswitch/agent/ovs_neutron_agent.py | 32 +++++++++++++++---- .../agent/test_ovs_neutron_agent.py | 26 ++++++++------- .../plugins/openvswitch/test_ovs_tunnel.py | 6 ++-- 4 files changed, 48 insertions(+), 25 deletions(-) diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index b316584f9eb..513dbc53674 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -129,16 +129,17 @@ class BaseOVS(object): return self.ovsdb.br_get_external_id(bridge, 'bridge-id').execute() def set_db_attribute(self, table_name, record, column, value, - check_error=False): + check_error=False, log_errors=True): self.ovsdb.db_set(table_name, record, (column, value)).execute( - check_error=check_error) + check_error=check_error, log_errors=log_errors) def clear_db_attribute(self, table_name, record, column): self.ovsdb.db_clear(table_name, record, column).execute() - def db_get_val(self, table, record, column, check_error=False): + def db_get_val(self, table, record, column, check_error=False, + log_errors=True): return self.ovsdb.db_get(table, record, column).execute( - check_error=check_error) + check_error=check_error, log_errors=log_errors) class OVSBridge(BaseOVS): diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index 003cbdf06d3..dfe156bfcfd 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -207,6 +207,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.setup_integration_br() # Stores port update notifications for processing in main rpc loop self.updated_ports = set() + # Stores port delete notifications + self.deleted_ports = set() # keeps association between ports and ofports to detect ofport change self.vifname_to_ofport_map = {} self.setup_rpc() @@ -366,10 +368,21 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, def port_delete(self, context, **kwargs): port_id = kwargs.get('port_id') - port = self.int_br.get_vif_port_by_id(port_id) - # If port exists, delete it - if port: - self.int_br.delete_port(port.port_name) + self.deleted_ports.add(port_id) + LOG.debug("port_delete message processed for port %s", port_id) + + def process_deleted_ports(self): + while self.deleted_ports: + port_id = self.deleted_ports.pop() + # Flush firewall rules and move to dead VLAN so deleted ports no + # longer have access to the network + self.sg_agent.remove_devices_filter([port_id]) + port = self.int_br.get_vif_port_by_id(port_id) + if port: + # don't log errors since there is a chance someone will be + # removing the port from the bridge at the same time + self.port_dead(port, log_errors=False) + self.port_unbound(port_id) def tunnel_update(self, context, **kwargs): LOG.debug("tunnel_update received") @@ -791,16 +804,17 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, if not lvm.vif_ports: self.reclaim_local_vlan(net_uuid) - def port_dead(self, port): + def port_dead(self, port, log_errors=True): '''Once a port has no binding, put it on the "dead vlan". :param port: a ovs_lib.VifPort object. ''' # Don't kill a port if it's already dead - cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag") + cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag", + log_errors=log_errors) if cur_tag != DEAD_VLAN_TAG: self.int_br.set_db_attribute("Port", port.port_name, "tag", - DEAD_VLAN_TAG) + DEAD_VLAN_TAG, log_errors=log_errors) self.int_br.drop_port(in_port=port.ofport) def setup_integration_br(self): @@ -1502,6 +1516,10 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.updated_ports = set() reg_ports = (set() if ovs_restarted else ports) port_info = self.scan_ports(reg_ports, updated_ports_copy) + # don't try to process removed ports as deleted ports since + # they are already gone + self.deleted_ports -= port_info['removed'] + self.process_deleted_ports() self.update_stale_ofport_rules() LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " "port information retrieved. " diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index d29750dbaf6..7ac299e4a1a 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -221,7 +221,8 @@ class TestOvsNeutronAgent(object): else: int_br.assert_has_calls([ mock.call.set_db_attribute("Port", mock.ANY, "tag", - self.mod_agent.DEAD_VLAN_TAG), + self.mod_agent.DEAD_VLAN_TAG, + log_errors=True), mock.call.drop_port(in_port=port.ofport), ]) @@ -532,18 +533,19 @@ class TestOvsNeutronAgent(object): self.assertEqual(set(['123']), self.agent.updated_ports) def test_port_delete(self): - port_id = "123" - port_name = "foo" - with mock.patch.object( - self.agent.int_br, - 'get_vif_port_by_id', - return_value=mock.MagicMock(port_name=port_name)) as get_vif_func,\ - mock.patch.object(self.agent.int_br, - "delete_port") as del_port_func: + vif = FakeVif() + with mock.patch.object(self.agent, 'int_br') as int_br: + int_br.get_vif_by_port_id.return_value = vif.port_name + int_br.get_vif_port_by_id.return_value = vif self.agent.port_delete("unused_context", - port_id=port_id) - self.assertTrue(get_vif_func.called) - del_port_func.assert_called_once_with(port_name) + port_id='id') + self.agent.process_deleted_ports() + # the main things we care about are that it gets put in the + # dead vlan and gets blocked + int_br.set_db_attribute.assert_any_call( + 'Port', vif.port_name, 'tag', self.mod_agent.DEAD_VLAN_TAG, + log_errors=False) + int_br.drop_port.assert_called_once_with(in_port=vif.ofport) def test_setup_physical_bridges(self): with mock.patch.object(ip_lib, "device_exists") as devex_fn,\ diff --git a/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py b/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py index fe8d9a00e25..1828e394e46 100644 --- a/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py +++ b/neutron/tests/unit/plugins/openvswitch/test_ovs_tunnel.py @@ -443,10 +443,12 @@ class TunnelTest(object): def test_port_dead(self): self.mock_int_bridge_expected += [ - mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag'), + mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag', + log_errors=True), mock.call.set_db_attribute( 'Port', VIF_PORT.port_name, - 'tag', self.mod_agent.DEAD_VLAN_TAG), + 'tag', self.mod_agent.DEAD_VLAN_TAG, + log_errors=True), mock.call.drop_port(in_port=VIF_PORT.ofport), ] From c44506bfd60b2dd6036e113464f1ea682cfaeb6c Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Fri, 17 Apr 2015 04:03:38 -0700 Subject: [PATCH 088/292] Don't update floating IP status if no change The floating IP status was going through all of the status update code every time the L3 agent sent in an update, even if the status didn't change. This patch skips sending updates to the server if the agent doesn't change the status. Change-Id: Ic3736bed3dc3e4ccb91f4acfabbf033949e09ce0 Partial-Bug: #1445412 --- neutron/agent/l3/agent.py | 6 +++++ neutron/agent/l3/router_info.py | 5 ++++ neutron/tests/unit/agent/l3/test_agent.py | 24 +++++++++++++++++++ .../tests/unit/agent/l3/test_router_info.py | 3 ++- 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index 2c1c1696ee8..03f40d61c94 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -29,6 +29,7 @@ from neutron.agent.l3 import ha_router from neutron.agent.l3 import legacy_router from neutron.agent.l3 import namespace_manager from neutron.agent.l3 import namespaces +from neutron.agent.l3 import router_info as rinf from neutron.agent.l3 import router_processing_queue as queue from neutron.agent.linux import external_process from neutron.agent.linux import ip_lib @@ -348,6 +349,11 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, ri.floating_ips = set(fip_statuses.keys()) for fip_id in existing_floating_ips - ri.floating_ips: fip_statuses[fip_id] = l3_constants.FLOATINGIP_STATUS_DOWN + # filter out statuses that didn't change + fip_statuses = {f: stat for f, stat in fip_statuses.items() + if stat != rinf.FLOATINGIP_STATUS_NOCHANGE} + if not fip_statuses: + return LOG.debug('Sending floating ip statuses: %s', fip_statuses) # Update floating IP status on the neutron server self.plugin_rpc.update_floatingip_statuses( diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index 3f0d801a660..adb668bc3db 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -30,6 +30,7 @@ INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX EXTERNAL_INGRESS_MARK_MASK = '0xffffffff' +FLOATINGIP_STATUS_NOCHANGE = object() class RouterInfo(object): @@ -247,6 +248,10 @@ class RouterInfo(object): {'id': fip['id'], 'status': fip_statuses.get(fip['id'])}) + # mark the status as not changed. we can't remove it because + # that's how the caller determines that it was removed + if fip_statuses[fip['id']] == fip['status']: + fip_statuses[fip['id']] = FLOATINGIP_STATUS_NOCHANGE fips_to_remove = ( ip_cidr for ip_cidr in existing_cidrs - new_cidrs if common_utils.is_cidr_host(ip_cidr)) diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index bdf62fbbd99..6c1057c9010 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -231,6 +231,7 @@ def prepare_router_data(ip_version=4, enable_snat=None, num_internal_ports=1, router[l3_constants.FLOATINGIP_KEY] = [{ 'id': _uuid(), 'port_id': _uuid(), + 'status': 'DOWN', 'floating_ip_address': '19.4.4.2', 'fixed_ip_address': '10.0.0.1'}] @@ -1274,6 +1275,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): {'id': _uuid(), 'floating_ip_address': '15.1.2.3', 'fixed_ip_address': '192.168.0.1', + 'status': 'DOWN', 'floating_network_id': _uuid(), 'port_id': _uuid(), 'host': HOSTNAME}]} @@ -1690,6 +1692,27 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self.assertNotIn( router[l3_constants.INTERFACE_KEY][0], ri.internal_ports) + def test_process_router_floatingip_nochange(self): + agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) + with mock.patch.object( + agent.plugin_rpc, 'update_floatingip_statuses' + ) as mock_update_fip_status: + router = prepare_router_data(num_internal_ports=1) + fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8', + 'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE', + 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']} + fip2 = copy.copy(fip1) + fip2.update({'id': _uuid(), 'status': 'DOWN'}) + router[l3_constants.FLOATINGIP_KEY] = [fip1, fip2] + + ri = legacy_router.LegacyRouter(router['id'], router, + **self.ri_kwargs) + ri.external_gateway_added = mock.Mock() + ri.process(agent) + # make sure only the one that went from DOWN->ACTIVE was sent + mock_update_fip_status.assert_called_once_with( + mock.ANY, ri.router_id, {fip2['id']: 'ACTIVE'}) + def test_process_router_floatingip_disabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) with mock.patch.object( @@ -1701,6 +1724,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): {'id': fip_id, 'floating_ip_address': '8.8.8.8', 'fixed_ip_address': '7.7.7.7', + 'status': 'DOWN', 'port_id': router[l3_constants.INTERFACE_KEY][0]['id']}] ri = legacy_router.LegacyRouter(router['id'], diff --git a/neutron/tests/unit/agent/l3/test_router_info.py b/neutron/tests/unit/agent/l3/test_router_info.py index 04aa55748c3..5e60aa12c8f 100644 --- a/neutron/tests/unit/agent/l3/test_router_info.py +++ b/neutron/tests/unit/agent/l3/test_router_info.py @@ -283,7 +283,8 @@ class TestFloatingIpWithMockDevice(BasicRouterTestCaseFramework): fip = { 'id': fip_id, 'port_id': _uuid(), 'floating_ip_address': '15.1.2.3', - 'fixed_ip_address': '192.168.0.2' + 'fixed_ip_address': '192.168.0.2', + 'status': 'DOWN' } ri = self._create_router() ri.add_floating_ip = mock.Mock( From 9a73722992cc302127472a5a98ca05348db52a5f Mon Sep 17 00:00:00 2001 From: Alexander Ignatov Date: Sat, 30 May 2015 00:49:31 +0300 Subject: [PATCH 089/292] Removed duplicate keys in dicts in test Test test_create_security_group_rule_invalid_ethertype_for_prefix contained dict with duplicate keys, remote ip prefixes. Test was successful because incorrect items for tests were overwitten: '192.168.1.1/24': 'ipv4' by '192.168.1.1/24': 'IPv6' '2001:db8:1234::/48': 'ipv6' by '2001:db8:1234::/48': 'IPv4' This patch removes incorrect and useless items for tests. Also added additional item with invalid ethertype. Change-Id: I29cd2b843a7905986de13a1ecfba0cb5797ccaf8 --- neutron/tests/unit/extensions/test_securitygroup.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/neutron/tests/unit/extensions/test_securitygroup.py b/neutron/tests/unit/extensions/test_securitygroup.py index e21813b354e..38c60777ed3 100644 --- a/neutron/tests/unit/extensions/test_securitygroup.py +++ b/neutron/tests/unit/extensions/test_securitygroup.py @@ -79,8 +79,7 @@ class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): 'direction': direction, 'protocol': proto, 'ethertype': ethertype, - 'tenant_id': tenant_id, - 'ethertype': ethertype}} + 'tenant_id': tenant_id}} if port_range_min: data['security_group_rule']['port_range_min'] = port_range_min @@ -454,14 +453,12 @@ class TestSecurityGroups(SecurityGroupDBTestCase): def test_create_security_group_rule_invalid_ethertype_for_prefix(self): name = 'webservers' description = 'my webservers' - test_addr = {'192.168.1.1/24': 'ipv4', '192.168.1.1/24': 'IPv6', - '2001:db8:1234::/48': 'ipv6', - '2001:db8:1234::/48': 'IPv4'} - for prefix, ether in test_addr.iteritems(): + test_addr = {'192.168.1.1/24': 'IPv6', + '2001:db8:1234::/48': 'IPv4', + '192.168.2.1/24': 'BadEthertype'} + for remote_ip_prefix, ethertype in test_addr.iteritems(): with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] - ethertype = ether - remote_ip_prefix = prefix rule = self._build_security_group_rule( sg_id, 'ingress', From b7ffbf96ff414ce615c10c83fea85d7f1fdcce70 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Sat, 30 May 2015 22:04:59 +0200 Subject: [PATCH 090/292] Refactor type_gre.vxlan tests to reduce duplicate code gre and vxlan type drivers unittests have similar tests. This change abstracts these tests and moves them to TunnelTypeTestMixin[1]. [1] neutron.tests.unit.plugins.ml2.drivers.base_type_tunnel Change-Id: I13b0507991e840ff34de7ed7ffd31d359691b0ca --- .../plugins/ml2/drivers/base_type_tunnel.py | 48 +++++++++++++ .../unit/plugins/ml2/drivers/test_type_gre.py | 54 +++----------- .../plugins/ml2/drivers/test_type_vxlan.py | 71 ++++++------------- 3 files changed, 77 insertions(+), 96 deletions(-) diff --git a/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py b/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py index cd5469fd001..f239f9eafcb 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py +++ b/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py @@ -34,6 +34,7 @@ UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)] class TunnelTypeTestMixin(object): + DRIVER_MODULE = None DRIVER_CLASS = None TYPE = None @@ -196,6 +197,53 @@ class TunnelTypeTestMixin(object): segment[api.SEGMENTATION_ID] = tunnel_id self.driver.release_segment(self.session, segment) + def add_endpoint(self, ip=TUNNEL_IP_ONE, host=HOST_ONE): + return self.driver.add_endpoint(ip, host) + + def test_add_endpoint(self): + endpoint = self.add_endpoint() + self.assertEqual(TUNNEL_IP_ONE, endpoint.ip_address) + self.assertEqual(HOST_ONE, endpoint.host) + return endpoint + + def test_add_endpoint_for_existing_tunnel_ip(self): + self.add_endpoint() + + log = getattr(self.DRIVER_MODULE, 'LOG') + with mock.patch.object(log, 'warning') as log_warn: + self.add_endpoint() + log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE) + + def test_get_endpoint_by_host(self): + self.add_endpoint() + + host_endpoint = self.driver.get_endpoint_by_host(HOST_ONE) + self.assertEqual(TUNNEL_IP_ONE, host_endpoint.ip_address) + return host_endpoint + + def test_get_endpoint_by_host_for_not_existing_host(self): + ip_endpoint = self.driver.get_endpoint_by_host(HOST_TWO) + self.assertIsNone(ip_endpoint) + + def test_get_endpoint_by_ip(self): + self.add_endpoint() + + ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_ONE) + self.assertEqual(HOST_ONE, ip_endpoint.host) + return ip_endpoint + + def test_get_endpoint_by_ip_for_not_existing_tunnel_ip(self): + ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_TWO) + self.assertIsNone(ip_endpoint) + + def test_delete_endpoint(self): + self.add_endpoint() + + self.assertIsNone(self.driver.delete_endpoint(TUNNEL_IP_ONE)) + # Get all the endpoints and verify its empty + endpoints = self.driver.get_endpoints() + self.assertNotIn(TUNNEL_IP_ONE, endpoints) + class TunnelTypeMultiRangeTestMixin(object): DRIVER_CLASS = None diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py index 01f36ab464e..c61f07b0e63 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py +++ b/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py @@ -46,59 +46,21 @@ def _get_allocation(session, gre_id): class GreTypeTest(base_type_tunnel.TunnelTypeTestMixin, testlib_api.SqlTestCase): + DRIVER_MODULE = type_gre DRIVER_CLASS = type_gre.GreTypeDriver TYPE = p_const.TYPE_GRE - def test_add_endpoint(self): - endpoint = self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) - self.assertEqual(TUNNEL_IP_ONE, endpoint.ip_address) - self.assertEqual(HOST_ONE, endpoint.host) - - def test_add_endpoint_for_existing_tunnel_ip(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) - - with mock.patch.object(type_gre.LOG, 'warning') as log_warn: - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) - log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE) - - def test_get_endpoint_by_host(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) - - host_endpoint = self.driver.get_endpoint_by_host(HOST_ONE) - self.assertEqual(TUNNEL_IP_ONE, host_endpoint.ip_address) - - def test_get_endpoint_by_host_for_not_existing_host(self): - ip_endpoint = self.driver.get_endpoint_by_host(HOST_TWO) - self.assertIsNone(ip_endpoint) - - def test_get_endpoint_by_ip(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) - - ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_ONE) - self.assertEqual(HOST_ONE, ip_endpoint.host) - - def test_get_endpoint_by_ip_for_not_existing_tunnel_ip(self): - ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_TWO) - self.assertIsNone(ip_endpoint) - def test_get_endpoints(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) - self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO) + self.add_endpoint() + self.add_endpoint( + base_type_tunnel.TUNNEL_IP_TWO, base_type_tunnel.HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: - if endpoint['ip_address'] == TUNNEL_IP_ONE: - self.assertEqual(HOST_ONE, endpoint['host']) - elif endpoint['ip_address'] == TUNNEL_IP_TWO: - self.assertEqual(HOST_TWO, endpoint['host']) - - def test_delete_endpoint(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE) - - self.assertIsNone(self.driver.delete_endpoint(TUNNEL_IP_ONE)) - # Get all the endpoints and verify its empty - endpoints = self.driver.get_endpoints() - self.assertNotIn(TUNNEL_IP_ONE, endpoints) + if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE: + self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host']) + elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO: + self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) def test_sync_allocations_entry_added_during_session(self): with mock.patch.object(self.driver, '_add_allocation', diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py index b59f0118080..8827fedb4d8 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py +++ b/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -import mock - from neutron.plugins.common import constants as p_const from neutron.plugins.ml2 import config from neutron.plugins.ml2.drivers import type_vxlan @@ -23,76 +21,49 @@ from neutron.tests.unit.plugins.ml2 import test_rpc from neutron.tests.unit import testlib_api -TUNNEL_IP_ONE = "10.10.10.10" -TUNNEL_IP_TWO = "10.10.10.20" -HOST_ONE = 'fake_host_one' -HOST_TWO = 'fake_host_two' VXLAN_UDP_PORT_ONE = 9999 VXLAN_UDP_PORT_TWO = 8888 class VxlanTypeTest(base_type_tunnel.TunnelTypeTestMixin, testlib_api.SqlTestCase): + DRIVER_MODULE = type_vxlan DRIVER_CLASS = type_vxlan.VxlanTypeDriver TYPE = p_const.TYPE_VXLAN + def add_endpoint(self, ip=base_type_tunnel.TUNNEL_IP_ONE, + host=base_type_tunnel.HOST_ONE): + if ip == base_type_tunnel.TUNNEL_IP_ONE: + port = VXLAN_UDP_PORT_ONE + else: + port = VXLAN_UDP_PORT_TWO + return self.driver.add_endpoint(ip, host, port) + def test_add_endpoint(self): - endpoint = self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE, - VXLAN_UDP_PORT_ONE) - self.assertEqual(TUNNEL_IP_ONE, endpoint.ip_address) + endpoint = super(VxlanTypeTest, self).test_add_endpoint() self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) - self.assertEqual(HOST_ONE, endpoint.host) - - def test_add_endpoint_for_existing_tunnel_ip(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE, VXLAN_UDP_PORT_ONE) - - with mock.patch.object(type_vxlan.LOG, 'warning') as log_warn: - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE, - VXLAN_UDP_PORT_ONE) - log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE) def test_get_endpoint_by_host(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE, VXLAN_UDP_PORT_ONE) - - host_endpoint = self.driver.get_endpoint_by_host(HOST_ONE) - self.assertEqual(TUNNEL_IP_ONE, host_endpoint.ip_address) - self.assertEqual(VXLAN_UDP_PORT_ONE, host_endpoint.udp_port) - - def test_get_endpoint_by_host_for_not_existing_host(self): - ip_endpoint = self.driver.get_endpoint_by_host(HOST_TWO) - self.assertIsNone(ip_endpoint) + endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_host() + self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoint_by_ip(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE, VXLAN_UDP_PORT_ONE) - - ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_ONE) - self.assertEqual(HOST_ONE, ip_endpoint.host) - self.assertEqual(VXLAN_UDP_PORT_ONE, ip_endpoint.udp_port) - - def test_get_endpoint_by_ip_for_not_existing_tunnel_ip(self): - ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_TWO) - self.assertIsNone(ip_endpoint) + endpoint = super(VxlanTypeTest, self).test_get_endpoint_by_ip() + self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint.udp_port) def test_get_endpoints(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE, VXLAN_UDP_PORT_ONE) - self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO, VXLAN_UDP_PORT_TWO) + self.add_endpoint() + self.add_endpoint(base_type_tunnel.TUNNEL_IP_TWO, + base_type_tunnel.HOST_TWO) endpoints = self.driver.get_endpoints() for endpoint in endpoints: - if endpoint['ip_address'] == TUNNEL_IP_ONE: + if endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_ONE: self.assertEqual(VXLAN_UDP_PORT_ONE, endpoint['udp_port']) - self.assertEqual(HOST_ONE, endpoint['host']) - elif endpoint['ip_address'] == TUNNEL_IP_TWO: + self.assertEqual(base_type_tunnel.HOST_ONE, endpoint['host']) + elif endpoint['ip_address'] == base_type_tunnel.TUNNEL_IP_TWO: self.assertEqual(VXLAN_UDP_PORT_TWO, endpoint['udp_port']) - self.assertEqual(HOST_TWO, endpoint['host']) - - def test_delete_endpoint(self): - self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE, VXLAN_UDP_PORT_ONE) - - self.assertIsNone(self.driver.delete_endpoint(TUNNEL_IP_ONE)) - # Get all the endpoints and verify its empty - endpoints = self.driver.get_endpoints() - self.assertNotIn(TUNNEL_IP_ONE, endpoints) + self.assertEqual(base_type_tunnel.HOST_TWO, endpoint['host']) def test_get_mtu(self): config.cfg.CONF.set_override('segment_mtu', 1500, group='ml2') From 193e745ac63d3a404b577ab61dd331779beb6a4b Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 29 May 2015 15:00:45 +0900 Subject: [PATCH 091/292] test_ovs_neutron_agent: Remove unnecessary mocking Leftover from change I90b4d2485e3e491f496dfb7bdee03d57f393be35. Change-Id: Ia2596718678d3102ad99acbdf30be7ef0f52ad27 --- .../unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index d29750dbaf6..a88c41d5d8f 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -338,9 +338,6 @@ class TestOvsNeutronAgent(object): mock.patch.object(self.agent.int_br, 'get_vif_port_by_id', return_value=port),\ - mock.patch.object(self.agent.plugin_rpc, 'update_device_up'),\ - mock.patch.object(self.agent.plugin_rpc, - 'update_device_down'),\ mock.patch.object(self.agent, func_name) as func: skip_devs, need_bound_devices = ( self.agent.treat_devices_added_or_updated([{}], False)) From c60c3e4f88aa28b9d2e6b6dcc7bc2a858433952d Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Sun, 31 May 2015 14:51:50 -0400 Subject: [PATCH 092/292] Remove unused _uuid function alias from test_iptables.py Change-Id: If80de5ee6588ffd2338387e6592cf2812b106e1f --- neutron/tests/unit/services/metering/drivers/test_iptables.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/neutron/tests/unit/services/metering/drivers/test_iptables.py b/neutron/tests/unit/services/metering/drivers/test_iptables.py index 5818a9775d3..91434653cc4 100644 --- a/neutron/tests/unit/services/metering/drivers/test_iptables.py +++ b/neutron/tests/unit/services/metering/drivers/test_iptables.py @@ -19,9 +19,6 @@ from oslo_config import cfg from neutron.services.metering.drivers.iptables import iptables_driver from neutron.tests import base -from neutron.tests.unit.api.v2 import test_base - -_uuid = test_base._uuid TEST_ROUTERS = [ From 9b29f62491ec0e1cb27601e6ad53cf152de5eb66 Mon Sep 17 00:00:00 2001 From: Moshe Levi Date: Sun, 31 May 2015 13:27:56 +0300 Subject: [PATCH 093/292] Refactor mlnx mechanism driver to support infiniband only The old mlnx mechanism driver was used for SR-IOV with Ethernet and infiniband support but the PCI allocation wasn't done by nova. Juno introduced sriovnicswitch mechanism driver for SR-IOV with Ethernet support. Mellanox recommends on using it. The updated mlnx mechanism driver supports SR-IOV infiniband. * support only port vnic_type 'direct' * update vif_type name to ib_hostdev (the ib_hostdev is generic vif_type for SR-IOV infiniband see https://review.openstack.org/#/c/187052/) Closes-Bug: #1460430 DocImpact Change-Id: Ia822b492afdfeb1aecf373d5a8cdb72174512884 --- neutron/extensions/portbindings.py | 4 +-- neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py | 11 ++------ .../unit/plugins/ml2/_test_mech_agent.py | 22 ++++++++++------ .../ml2/drivers/mlnx/test_mech_mlnx.py | 25 ++++--------------- 4 files changed, 24 insertions(+), 38 deletions(-) diff --git a/neutron/extensions/portbindings.py b/neutron/extensions/portbindings.py index 81dde03ffeb..63fca88ec09 100644 --- a/neutron/extensions/portbindings.py +++ b/neutron/extensions/portbindings.py @@ -59,14 +59,14 @@ VIF_TYPE_802_QBG = '802.1qbg' VIF_TYPE_802_QBH = '802.1qbh' VIF_TYPE_HYPERV = 'hyperv' VIF_TYPE_MIDONET = 'midonet' -VIF_TYPE_MLNX_HOSTDEV = 'hostdev' +VIF_TYPE_IB_HOSTDEV = 'ib_hostdev' VIF_TYPE_HW_VEB = 'hw_veb' VIF_TYPE_VROUTER = 'vrouter' VIF_TYPE_OTHER = 'other' VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS, VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG, VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET, - VIF_TYPE_MLNX_HOSTDEV, VIF_TYPE_HW_VEB, + VIF_TYPE_IB_HOSTDEV, VIF_TYPE_HW_VEB, VIF_TYPE_DVS, VIF_TYPE_OTHER, VIF_TYPE_DISTRIBUTED, VIF_TYPE_VROUTER] diff --git a/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py b/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py index 6205b7fcd0b..9484a61e870 100644 --- a/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py +++ b/neutron/plugins/ml2/drivers/mlnx/mech_mlnx.py @@ -36,18 +36,11 @@ class MlnxMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): """ def __init__(self): - # REVISIT(irenab): update supported_vnic_types to contain - # only VNIC_DIRECT once its possible to specify - # vnic_type via nova API/GUI. Currently VNIC_NORMAL is included - # to enable VM creation via GUI. It should be noted, that if - # several MDs are capable to bring bind port on chosen host, the - # first listed MD will bind the port for VNIC_NORMAL super(MlnxMechanismDriver, self).__init__( agent_type=n_const.AGENT_TYPE_MLNX, - vif_type=portbindings.VIF_TYPE_MLNX_HOSTDEV, + vif_type=portbindings.VIF_TYPE_IB_HOSTDEV, vif_details={portbindings.CAP_PORT_FILTER: False}, - supported_vnic_types=[portbindings.VNIC_DIRECT, - portbindings.VNIC_NORMAL]) + supported_vnic_types=[portbindings.VNIC_DIRECT]) def get_allowed_network_types(self, agent=None): return [p_constants.TYPE_LOCAL, p_constants.TYPE_FLAT, diff --git a/neutron/tests/unit/plugins/ml2/_test_mech_agent.py b/neutron/tests/unit/plugins/ml2/_test_mech_agent.py index c69d837f9b9..c6186c4a92b 100644 --- a/neutron/tests/unit/plugins/ml2/_test_mech_agent.py +++ b/neutron/tests/unit/plugins/ml2/_test_mech_agent.py @@ -146,6 +146,7 @@ class AgentMechanismBaseTestCase(base.BaseTestCase): AGENTS = None AGENTS_DEAD = None AGENTS_BAD = None + VNIC_TYPE = portbindings.VNIC_NORMAL def _check_unbound(self, context): self.assertIsNone(context._bound_segment_id) @@ -177,7 +178,8 @@ class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase): def test_unknown_type(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, - self.UNKNOWN_TYPE_SEGMENTS) + self.UNKNOWN_TYPE_SEGMENTS, + vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) @@ -191,14 +193,16 @@ class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase): def test_type_local(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, - self.LOCAL_SEGMENTS) + self.LOCAL_SEGMENTS, + vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.LOCAL_SEGMENTS[1]) def test_type_local_dead(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_DEAD, - self.LOCAL_SEGMENTS) + self.LOCAL_SEGMENTS, + vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) @@ -213,14 +217,16 @@ class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase): def test_type_flat(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, - self.FLAT_SEGMENTS) + self.FLAT_SEGMENTS, + vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.FLAT_SEGMENTS[1]) def test_type_flat_bad(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, - self.FLAT_SEGMENTS) + self.FLAT_SEGMENTS, + vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) @@ -236,14 +242,16 @@ class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase): def test_type_vlan(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS, - self.VLAN_SEGMENTS) + self.VLAN_SEGMENTS, + vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_bound(context, self.VLAN_SEGMENTS[1]) def test_type_vlan_bad(self): context = FakePortContext(self.AGENT_TYPE, self.AGENTS_BAD, - self.VLAN_SEGMENTS) + self.VLAN_SEGMENTS, + vnic_type=self.VNIC_TYPE) self.driver.bind_port(context) self._check_unbound(context) diff --git a/neutron/tests/unit/plugins/ml2/drivers/mlnx/test_mech_mlnx.py b/neutron/tests/unit/plugins/ml2/drivers/mlnx/test_mech_mlnx.py index 4aee7a4cb56..1237b8444bb 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/mlnx/test_mech_mlnx.py +++ b/neutron/tests/unit/plugins/ml2/drivers/mlnx/test_mech_mlnx.py @@ -33,9 +33,10 @@ with mock.patch.dict(sys.modules, class MlnxMechanismBaseTestCase(base.AgentMechanismBaseTestCase): - VIF_TYPE = portbindings.VIF_TYPE_MLNX_HOSTDEV + VIF_TYPE = portbindings.VIF_TYPE_IB_HOSTDEV CAP_PORT_FILTER = False AGENT_TYPE = constants.AGENT_TYPE_MLNX + VNIC_TYPE = portbindings.VNIC_DIRECT GOOD_MAPPINGS = {'fake_physical_network': 'fake_bridge'} GOOD_CONFIGS = {'interface_mappings': GOOD_MAPPINGS} @@ -77,25 +78,9 @@ class MlnxMechanismFlatTestCase(MlnxMechanismBaseTestCase, pass -class MlnxMechanismVnicTypeTestCase(MlnxMechanismBaseTestCase, - base.AgentMechanismVlanTestCase): - - def _check_vif_type_for_vnic_type(self, vnic_type, - expected_vif_type): - context = base.FakePortContext(self.AGENT_TYPE, - self.AGENTS, - self.VLAN_SEGMENTS, - vnic_type) - self.driver.bind_port(context) - self.assertEqual(expected_vif_type, context._bound_vif_type) - - def test_vnic_type_direct(self): - self._check_vif_type_for_vnic_type(portbindings.VNIC_DIRECT, - portbindings.VIF_TYPE_MLNX_HOSTDEV) - - def test_vnic_type_normal(self): - self._check_vif_type_for_vnic_type(portbindings.VNIC_NORMAL, - self.VIF_TYPE) +class MlnxMechanismVlanTestCase(MlnxMechanismBaseTestCase, + base.AgentMechanismVlanTestCase): + pass class MlnxMechanismVifDetailsTestCase(MlnxMechanismBaseTestCase): From 4a234ab1a35708bf85fe57d9290056ac74227eab Mon Sep 17 00:00:00 2001 From: Elena Ezhova Date: Mon, 1 Jun 2015 12:58:32 +0300 Subject: [PATCH 094/292] Remove comment about hash seed in tox.ini Enable random hash seeds change [1] has merged so the comment can be safely removed. [1] https://review.openstack.org/#/c/185572/ Change-Id: I93f64841792f9c38f5121ca47c130ad769c11dc4 --- tox.ini | 2 -- 1 file changed, 2 deletions(-) diff --git a/tox.ini b/tox.ini index 5d9710a9b7a..bd28ed3e269 100644 --- a/tox.ini +++ b/tox.ini @@ -4,8 +4,6 @@ minversion = 1.8 skipsdist = True [testenv] -# Note the hash seed is set to 0 until neutron can be tested with a -# random hash seed successfully. setenv = VIRTUAL_ENV={envdir} usedevelop = True install_command = pip install -U {opts} {packages} From bacd69386d9df2abb41f62d1fa90f9cfaa215c62 Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Thu, 15 Jan 2015 15:00:02 +0300 Subject: [PATCH 095/292] Implement IPAM Driver loader IPAM Driver is loaded based on value of 'ipam_driver'. Added new variable 'ipam_driver' in config. DocImpact Partially-Implements: blueprint neutron-ipam Change-Id: Ia52ad70ef4f0b02cf82cfefcf50b9f1e30b05b79 --- etc/neutron.conf | 8 ++++ neutron/common/config.py | 2 + neutron/ipam/driver.py | 11 ++++- neutron/manager.py | 10 ++++- neutron/tests/unit/ipam/fake_driver.py | 35 +++++++++++++++ neutron/tests/unit/test_ipam.py | 61 ++++++++++++++++++++++++++ setup.cfg | 3 ++ 7 files changed, 127 insertions(+), 3 deletions(-) mode change 100644 => 100755 etc/neutron.conf create mode 100755 neutron/tests/unit/ipam/fake_driver.py mode change 100644 => 100755 neutron/tests/unit/test_ipam.py mode change 100644 => 100755 setup.cfg diff --git a/etc/neutron.conf b/etc/neutron.conf old mode 100644 new mode 100755 index ee42954bfe9..5b58519d0ea --- a/etc/neutron.conf +++ b/etc/neutron.conf @@ -60,6 +60,14 @@ # core_plugin = # Example: core_plugin = ml2 +# (StrOpt) Neutron IPAM (IP address management) driver to be loaded from the +# neutron.ipam_drivers namespace. See setup.cfg for the entry point names. +# If ipam_driver is not set (default behavior), no ipam driver is used. +# Example: ipam_driver = +# In order to use the reference implementation of neutron ipam driver, use +# 'internal'. +# Example: ipam_driver = internal + # (ListOpt) List of service plugin entrypoints to be loaded from the # neutron.service_plugins namespace. See setup.cfg for the entrypoint names of # the plugins included in the neutron source distribution. For compatibility diff --git a/neutron/common/config.py b/neutron/common/config.py index 2837b4ca695..93f57159f3e 100644 --- a/neutron/common/config.py +++ b/neutron/common/config.py @@ -131,6 +131,8 @@ core_opts = [ help=_('If True, effort is made to advertise MTU settings ' 'to VMs via network methods (DHCP and RA MTU options) ' 'when the network\'s preferred MTU is known.')), + cfg.StrOpt('ipam_driver', default=None, + help=_('IPAM driver to use.')), cfg.BoolOpt('vlan_transparent', default=False, help=_('If True, then allow plugins that support it to ' 'create VLAN transparent networks.')), diff --git a/neutron/ipam/driver.py b/neutron/ipam/driver.py index ed40b5eee8d..0e54e8856da 100644 --- a/neutron/ipam/driver.py +++ b/neutron/ipam/driver.py @@ -12,9 +12,11 @@ import abc +from oslo_config import cfg +from oslo_log import log import six -from oslo_log import log +from neutron import manager LOG = log.getLogger(__name__) @@ -43,7 +45,12 @@ class Pool(object): :type subnet_pool: dict :returns: An instance of Driver for the given subnet pool """ - raise NotImplementedError + ipam_driver_name = cfg.CONF.ipam_driver + mgr = manager.NeutronManager + LOG.debug("Loading ipam driver: %s", ipam_driver_name) + driver_class = mgr.load_class_for_provider('neutron.ipam_drivers', + ipam_driver_name) + return driver_class(subnet_pool, context) @abc.abstractmethod def allocate_subnet(self, request): diff --git a/neutron/manager.py b/neutron/manager.py index 503d79448e7..19e50047ea3 100644 --- a/neutron/manager.py +++ b/neutron/manager.py @@ -127,7 +127,11 @@ class NeutronManager(object): self.service_plugins = {constants.CORE: self.plugin} self._load_service_plugins() - def _get_plugin_instance(self, namespace, plugin_provider): + @staticmethod + def load_class_for_provider(namespace, plugin_provider): + if not plugin_provider: + LOG.exception(_LE("Error, plugin is not set")) + raise ImportError(_("Plugin not found.")) try: # Try to resolve plugin by name mgr = driver.DriverManager(namespace, plugin_provider) @@ -140,6 +144,10 @@ class NeutronManager(object): LOG.exception(_LE("Error loading plugin by name, %s"), e1) LOG.exception(_LE("Error loading plugin by class, %s"), e2) raise ImportError(_("Plugin not found.")) + return plugin_class + + def _get_plugin_instance(self, namespace, plugin_provider): + plugin_class = self.load_class_for_provider(namespace, plugin_provider) return plugin_class() def _load_services_from_core_plugin(self): diff --git a/neutron/tests/unit/ipam/fake_driver.py b/neutron/tests/unit/ipam/fake_driver.py new file mode 100755 index 00000000000..3236a4c2e98 --- /dev/null +++ b/neutron/tests/unit/ipam/fake_driver.py @@ -0,0 +1,35 @@ +# Copyright (c) 2015 Infoblox Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.ipam import driver + + +class FakeDriver(driver.Pool): + """Fake IPAM driver for tests only + + Just implement IPAM Driver interface without any functionality inside + """ + + def allocate_subnet(self, subnet): + return driver.Subnet() + + def get_subnet(self, cidr): + return driver.Subnet() + + def update_subnet(self, request): + return driver.Subnet() + + def remove_subnet(self, cidr): + pass diff --git a/neutron/tests/unit/test_ipam.py b/neutron/tests/unit/test_ipam.py old mode 100644 new mode 100755 index aeec959a5da..bb8759f63d0 --- a/neutron/tests/unit/test_ipam.py +++ b/neutron/tests/unit/test_ipam.py @@ -10,14 +10,24 @@ # License for the specific language governing permissions and limitations # under the License. +import types + +import mock import netaddr +from oslo_config import cfg from neutron.common import constants from neutron.common import ipv6_utils +from neutron import context from neutron import ipam +from neutron.ipam import driver from neutron.ipam import exceptions as ipam_exc +from neutron import manager from neutron.openstack.common import uuidutils from neutron.tests import base +from neutron.tests.unit.ipam import fake_driver + +FAKE_IPAM_CLASS = 'neutron.tests.unit.ipam.fake_driver.FakeDriver' class IpamSubnetRequestTestCase(base.BaseTestCase): @@ -224,3 +234,54 @@ class TestAddressRequest(base.BaseTestCase): mac='meh', alien='et', prefix='meh') + + +class TestIpamDriverLoader(base.BaseTestCase): + + def setUp(self): + super(TestIpamDriverLoader, self).setUp() + self.ctx = context.get_admin_context() + + def _verify_fake_ipam_driver_is_loaded(self, driver_name): + mgr = manager.NeutronManager + ipam_driver = mgr.load_class_for_provider('neutron.ipam_drivers', + driver_name) + + self.assertEqual( + fake_driver.FakeDriver, ipam_driver, + "loaded ipam driver should be FakeDriver") + + def _verify_import_error_is_generated(self, driver_name): + mgr = manager.NeutronManager + self.assertRaises(ImportError, mgr.load_class_for_provider, + 'neutron.ipam_drivers', + driver_name) + + def test_ipam_driver_is_loaded_by_class(self): + self._verify_fake_ipam_driver_is_loaded(FAKE_IPAM_CLASS) + + def test_ipam_driver_is_loaded_by_name(self): + self._verify_fake_ipam_driver_is_loaded('fake') + + def test_ipam_driver_raises_import_error(self): + self._verify_import_error_is_generated( + 'neutron.tests.unit.ipam.SomeNonExistentClass') + + def test_ipam_driver_raises_import_error_for_none(self): + self._verify_import_error_is_generated(None) + + def _load_ipam_driver(self, driver_name, subnet_pool_id): + cfg.CONF.set_override("ipam_driver", driver_name) + return driver.Pool.get_instance(subnet_pool_id, self.ctx) + + def test_ipam_driver_is_loaded_from_ipam_driver_config_value(self): + ipam_driver = self._load_ipam_driver('fake', None) + self.assertIsInstance( + ipam_driver, (fake_driver.FakeDriver, types.ClassType), + "loaded ipam driver should be of type FakeDriver") + + @mock.patch(FAKE_IPAM_CLASS) + def test_ipam_driver_is_loaded_with_subnet_pool_id(self, ipam_mock): + subnet_pool_id = 'SomePoolID' + self._load_ipam_driver('fake', subnet_pool_id) + ipam_mock.assert_called_once_with(subnet_pool_id, self.ctx) diff --git a/setup.cfg b/setup.cfg old mode 100644 new mode 100755 index 3934f54a42a..593760c5c67 --- a/setup.cfg +++ b/setup.cfg @@ -198,6 +198,9 @@ neutron.ml2.extension_drivers = cisco_n1kv_ext = neutron.plugins.ml2.drivers.cisco.n1kv.n1kv_ext_driver:CiscoN1kvExtensionDriver neutron.openstack.common.cache.backends = memory = neutron.openstack.common.cache._backends.memory:MemoryBackend +neutron.ipam_drivers = + fake = neutron.tests.unit.ipam.fake_driver:FakeDriver + internal = neutron.ipam.drivers.neutrondb_ipam.driver:NeutronDbPool # These are for backwards compat with Icehouse notification_driver configuration values oslo.messaging.notify.drivers = neutron.openstack.common.notifier.log_notifier = oslo_messaging.notify._impl_log:LogDriver From 70e07629bf263370102350d922d62970c60d5aa4 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 19 May 2015 09:27:14 -0700 Subject: [PATCH 096/292] Add a non-mixin function for model queries This patch simply adds a version of model_query in neutron.db.common_db_mixin which can be invoked without having to declare a class which inherits the mixin. To this aim, model_query_scope has been refactored as well. As the model query function being introduced in this patch cannot use model query hooks (and does not need to), the method was re-implemented rather than bringing out of the mixin as it has been done for model_query_scope. This change will allow for developing DB APIs without having to use the baseDB/mixin classes models used so far. Related-Blueprint: better-quotas Change-Id: I7a79980f626e9eaf2775711c8a25f508067e5716 --- neutron/db/common_db_mixin.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/neutron/db/common_db_mixin.py b/neutron/db/common_db_mixin.py index 143c8f0e416..edd4039a1ac 100644 --- a/neutron/db/common_db_mixin.py +++ b/neutron/db/common_db_mixin.py @@ -22,6 +22,25 @@ from neutron.common import exceptions as n_exc from neutron.db import sqlalchemyutils +def model_query_scope(context, model): + # Unless a context has 'admin' or 'advanced-service' rights the + # query will be scoped to a single tenant_id + return ((not context.is_admin and hasattr(model, 'tenant_id')) and + (not context.is_advsvc and hasattr(model, 'tenant_id'))) + + +def model_query(context, model): + query = context.session.query(model) + # define basic filter condition for model query + query_filter = None + if model_query_scope(context, model): + query_filter = (model.tenant_id == context.tenant_id) + + if query_filter is not None: + query = query.filter(query_filter) + return query + + class CommonDbMixin(object): """Common methods used in core and service plugins.""" # Plugins, mixin classes implementing extension will register @@ -72,11 +91,7 @@ class CommonDbMixin(object): return weakref.proxy(self) def model_query_scope(self, context, model): - # NOTE(jkoelker) non-admin queries are scoped to their tenant_id - # NOTE(salvatore-orlando): unless the model allows for shared objects - # NOTE(mestery): Or the user has the advsvc role - return ((not context.is_admin and hasattr(model, 'tenant_id')) and - (not context.is_advsvc and hasattr(model, 'tenant_id'))) + return model_query_scope(context, model) def _model_query(self, context, model): query = context.session.query(model) From 96e2314c8c0f06967b53828b0a771973f0031243 Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Fri, 29 May 2015 19:17:34 -0400 Subject: [PATCH 097/292] Modify ipset functional tests to pass on older machines Production code uses ipset exclusively in the root namespace, however functional testing uses ipset in namespace for isolation. This poses an issue as ipset is not supported in namespaces on all kernels and distributions (I'm looking at you CentOS/RHEL 7.1). This patch changes the ipset functional tests to work in the root namespace while taking care of cleanups. Change-Id: I08b2f59197ed76e59b2e58b5a10820653e857cda Closes-Bug: #1460220 --- .../functional/agent/linux/test_ipset.py | 59 +++++++++++-------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/neutron/tests/functional/agent/linux/test_ipset.py b/neutron/tests/functional/agent/linux/test_ipset.py index 703db22449b..a575ad6318b 100644 --- a/neutron/tests/functional/agent/linux/test_ipset.py +++ b/neutron/tests/functional/agent/linux/test_ipset.py @@ -1,4 +1,4 @@ -# Copyright (c) 2014 Red Hat, Inc. +# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -21,9 +21,8 @@ from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import base from neutron.tests.functional import base as functional_base -IPSET_SET = 'test-set' +MAX_IPSET_NAME_LENGTH = 28 IPSET_ETHERTYPE = 'IPv4' -ICMP_ACCEPT_RULE = '-p icmp -m set --match-set %s src -j ACCEPT' % IPSET_SET UNRELATED_IP = '1.1.1.1' @@ -36,13 +35,17 @@ class IpsetBase(functional_base.BaseSudoTestCase): self.source, self.destination = self.useFixture( machine_fixtures.PeerMachines(bridge)).machines + self.ipset_name = base.get_rand_name(MAX_IPSET_NAME_LENGTH, 'set-') + self.icmp_accept_rule = ('-p icmp -m set --match-set %s src -j ACCEPT' + % self.ipset_name) self.ipset = self._create_ipset_manager_and_set( - ip_lib.IPWrapper(self.destination.namespace), IPSET_SET) - + ip_lib.IPWrapper(self.destination.namespace), self.ipset_name) + self.addCleanup(self.ipset._destroy, self.ipset_name) self.dst_iptables = iptables_manager.IptablesManager( namespace=self.destination.namespace) - self._add_iptables_ipset_rules(self.dst_iptables) + self._add_iptables_ipset_rules() + self.addCleanup(self._remove_iptables_ipset_rules) def _create_ipset_manager_and_set(self, dst_ns, set_name): ipset = ipset_manager.IpsetManager( @@ -51,45 +54,49 @@ class IpsetBase(functional_base.BaseSudoTestCase): ipset._create_set(set_name, IPSET_ETHERTYPE) return ipset - @staticmethod - def _remove_iptables_ipset_rules(iptables_manager): - iptables_manager.ipv4['filter'].remove_rule('INPUT', ICMP_ACCEPT_RULE) - iptables_manager.apply() + def _remove_iptables_ipset_rules(self): + self.dst_iptables.ipv4['filter'].remove_rule( + 'INPUT', base.ICMP_BLOCK_RULE) + self.dst_iptables.ipv4['filter'].remove_rule( + 'INPUT', self.icmp_accept_rule) + self.dst_iptables.apply() - @staticmethod - def _add_iptables_ipset_rules(iptables_manager): - iptables_manager.ipv4['filter'].add_rule('INPUT', ICMP_ACCEPT_RULE) - iptables_manager.ipv4['filter'].add_rule('INPUT', base.ICMP_BLOCK_RULE) - iptables_manager.apply() + def _add_iptables_ipset_rules(self): + self.dst_iptables.ipv4['filter'].add_rule( + 'INPUT', self.icmp_accept_rule) + self.dst_iptables.ipv4['filter'].add_rule( + 'INPUT', base.ICMP_BLOCK_RULE) + self.dst_iptables.apply() class IpsetManagerTestCase(IpsetBase): def test_add_member_allows_ping(self): self.source.assert_no_ping(self.destination.ip) - self.ipset._add_member_to_set(IPSET_SET, self.source.ip) + self.ipset._add_member_to_set(self.ipset_name, self.source.ip) self.source.assert_ping(self.destination.ip) def test_del_member_denies_ping(self): - self.ipset._add_member_to_set(IPSET_SET, self.source.ip) + self.ipset._add_member_to_set(self.ipset_name, self.source.ip) self.source.assert_ping(self.destination.ip) - self.ipset._del_member_from_set(IPSET_SET, self.source.ip) + self.ipset._del_member_from_set(self.ipset_name, self.source.ip) self.source.assert_no_ping(self.destination.ip) def test_refresh_ipset_allows_ping(self): - self.ipset._refresh_set(IPSET_SET, [UNRELATED_IP], IPSET_ETHERTYPE) + self.ipset._refresh_set( + self.ipset_name, [UNRELATED_IP], IPSET_ETHERTYPE) self.source.assert_no_ping(self.destination.ip) - self.ipset._refresh_set(IPSET_SET, [UNRELATED_IP, self.source.ip], - IPSET_ETHERTYPE) + self.ipset._refresh_set( + self.ipset_name, [UNRELATED_IP, self.source.ip], IPSET_ETHERTYPE) self.source.assert_ping(self.destination.ip) - self.ipset._refresh_set(IPSET_SET, [self.source.ip, UNRELATED_IP], - IPSET_ETHERTYPE) + self.ipset._refresh_set( + self.ipset_name, [self.source.ip, UNRELATED_IP], IPSET_ETHERTYPE) self.source.assert_ping(self.destination.ip) def test_destroy_ipset_set(self): - self.assertRaises(RuntimeError, self.ipset._destroy, IPSET_SET) - self._remove_iptables_ipset_rules(self.dst_iptables) - self.ipset._destroy(IPSET_SET) + self.assertRaises(RuntimeError, self.ipset._destroy, self.ipset_name) + self._remove_iptables_ipset_rules() + self.ipset._destroy(self.ipset_name) From 3751f9ab349ea5b4c3d9836592e7928c37211091 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Mon, 1 Jun 2015 20:53:46 +0000 Subject: [PATCH 098/292] Python3: use six.iteritems() instead of dict.iteritems() This also adds a check to neutron/hacking/checks.py that should catch this error in the future. Blueprint: neutron-python3 Change-Id: Ie7b833ffa173772d39b85ee3ecaddace18e1274f --- HACKING.rst | 1 + neutron/agent/common/ovs_lib.py | 2 +- neutron/agent/l3/router_info.py | 3 ++- neutron/agent/linux/dhcp.py | 2 +- neutron/agent/linux/iptables_firewall.py | 9 +++++---- neutron/agent/linux/iptables_manager.py | 3 ++- neutron/api/api_common.py | 3 ++- neutron/api/extensions.py | 16 ++++++++-------- .../rpc/agentnotifiers/metering_rpc_agent_api.py | 3 ++- neutron/api/rpc/handlers/l3_rpc.py | 3 ++- neutron/api/v2/attributes.py | 8 ++++---- neutron/api/v2/base.py | 13 +++++++------ neutron/api/v2/router.py | 3 ++- neutron/common/utils.py | 3 ++- neutron/db/common_db_mixin.py | 12 ++++++------ neutron/db/l3_agentschedulers_db.py | 3 ++- neutron/db/l3_db.py | 3 ++- .../14be42f3d0a5_default_sec_group_table.py | 3 ++- neutron/hacking/checks.py | 10 +++++++++- neutron/manager.py | 4 +++- neutron/plugins/ibm/agent/sdnve_neutron_agent.py | 3 ++- neutron/plugins/ml2/db.py | 3 ++- neutron/plugins/ml2/drivers/cisco/ncs/driver.py | 3 ++- neutron/plugins/ml2/drivers/type_flat.py | 3 ++- neutron/plugins/ml2/drivers/type_local.py | 3 ++- neutron/plugins/ml2/managers.py | 3 ++- .../openvswitch/agent/ovs_neutron_agent.py | 5 +++-- neutron/plugins/sriovnicagent/eswitch_manager.py | 3 ++- neutron/policy.py | 3 ++- neutron/tests/api/admin/test_quotas.py | 5 +++-- neutron/tests/api/test_dhcp_ipv6.py | 9 +++++---- neutron/tests/api/test_fwaas_extensions.py | 7 ++++--- neutron/tests/api/test_load_balancer.py | 11 ++++++----- neutron/tests/api/test_networks.py | 3 ++- neutron/tests/api/test_routers.py | 3 ++- neutron/tests/api/test_vpnaas_extensions.py | 5 +++-- neutron/tests/base.py | 5 +++-- neutron/tests/fullstack/config_fixtures.py | 2 +- neutron/tests/functional/agent/test_l3_agent.py | 3 ++- neutron/tests/tempest/common/custom_matchers.py | 3 ++- .../tempest/common/generator/base_generator.py | 3 ++- .../tempest/common/generator/valid_generator.py | 3 ++- neutron/tests/tools.py | 5 +++-- .../unit/agent/linux/test_iptables_firewall.py | 3 ++- neutron/tests/unit/api/test_extensions.py | 5 +++-- neutron/tests/unit/api/v2/test_base.py | 3 ++- neutron/tests/unit/db/test_db_base_plugin_v2.py | 3 ++- .../tests/unit/extensions/test_securitygroup.py | 7 ++++--- .../unit/extensions/test_vlantransparent.py | 5 +++-- neutron/tests/unit/hacking/test_checks.py | 5 +++++ neutron/wsgi.py | 3 ++- 51 files changed, 147 insertions(+), 90 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 101726231dc..8db85525d38 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -16,6 +16,7 @@ Neutron Specific Commandments - [N324] Prevent use of deprecated contextlib.nested. - [N325] Python 3: Do not use xrange. - [N326] Python 3: do not use basestring. +- [N327] Python 3: do not use dict.iteritems. Creating Unit Tests ------------------- diff --git a/neutron/agent/common/ovs_lib.py b/neutron/agent/common/ovs_lib.py index b316584f9eb..4fa5236ce65 100644 --- a/neutron/agent/common/ovs_lib.py +++ b/neutron/agent/common/ovs_lib.py @@ -555,7 +555,7 @@ def _build_flow_expr_str(flow_dict, cmd): raise exceptions.InvalidInput(error_message=msg) actions = "actions=%s" % flow_dict.pop('actions') - for key, value in flow_dict.iteritems(): + for key, value in six.iteritems(flow_dict): if key == 'proto': flow_expr_arr.append(value) else: diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index c1336355e6e..fa3d186e40a 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -15,6 +15,7 @@ import netaddr from oslo_log import log as logging +import six from neutron.agent.l3 import namespaces from neutron.agent.linux import ip_lib @@ -499,7 +500,7 @@ class RouterInfo(object): if ex_gw_port: def _gateway_ports_equal(port1, port2): def _get_filtered_dict(d, ignore): - return dict((k, v) for k, v in d.iteritems() + return dict((k, v) for k, v in six.iteritems(d) if k not in ignore) keys_to_ignore = set(['binding:host_id']) diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index ba3431b8d4a..a6f0d6d6f03 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -74,7 +74,7 @@ class DictModel(dict): else: return item - for key, value in self.iteritems(): + for key, value in six.iteritems(self): if isinstance(value, (list, tuple)): # Keep the same type but convert dicts to DictModels self[key] = type(value)( diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py index 840fba7f6f7..4dd988fde8b 100644 --- a/neutron/agent/linux/iptables_firewall.py +++ b/neutron/agent/linux/iptables_firewall.py @@ -17,6 +17,7 @@ import collections import netaddr from oslo_config import cfg from oslo_log import log as logging +import six from neutron.agent import firewall from neutron.agent.linux import ipset_manager @@ -591,7 +592,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): remote_sgs_to_remove = self._determine_remote_sgs_to_remove( filtered_ports) - for ip_version, remote_sg_ids in remote_sgs_to_remove.iteritems(): + for ip_version, remote_sg_ids in six.iteritems(remote_sgs_to_remove): self._clear_sg_members(ip_version, remote_sg_ids) if self.enable_ipset: self._remove_ipsets_for_remote_sgs(ip_version, remote_sg_ids) @@ -613,7 +614,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): remote_group_id_sets = self._get_remote_sg_ids_sets_by_ipversion( filtered_ports) for ip_version, remote_group_id_set in ( - remote_group_id_sets.iteritems()): + six.iteritems(remote_group_id_sets)): sgs_to_remove_per_ipversion[ip_version].update( set(self.pre_sg_members) - remote_group_id_set) return sgs_to_remove_per_ipversion @@ -623,8 +624,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver): remote_group_id_sets = {constants.IPv4: set(), constants.IPv6: set()} for port in filtered_ports: - for ip_version, sg_ids in self._get_remote_sg_ids( - port).iteritems(): + remote_sg_ids = self._get_remote_sg_ids(port) + for ip_version, sg_ids in six.iteritems(remote_sg_ids): remote_group_id_sets[ip_version].update(sg_ids) return remote_group_id_sets diff --git a/neutron/agent/linux/iptables_manager.py b/neutron/agent/linux/iptables_manager.py index 38e40f0c735..20d38a3f12b 100644 --- a/neutron/agent/linux/iptables_manager.py +++ b/neutron/agent/linux/iptables_manager.py @@ -28,6 +28,7 @@ from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils +import six from neutron.agent.common import config from neutron.agent.linux import iptables_comments as ic @@ -347,7 +348,7 @@ class IptablesManager(object): elif ip_version == 6: tables = self.ipv6 - for table, chains in builtin_chains[ip_version].iteritems(): + for table, chains in six.iteritems(builtin_chains[ip_version]): for chain in chains: tables[table].add_chain(chain) tables[table].add_rule(chain, '-j $%s' % diff --git a/neutron/api/api_common.py b/neutron/api/api_common.py index b97e3a39130..e8a310247ae 100644 --- a/neutron/api/api_common.py +++ b/neutron/api/api_common.py @@ -17,6 +17,7 @@ import urllib from oslo_config import cfg from oslo_log import log as logging +import six from webob import exc from neutron.common import constants @@ -36,7 +37,7 @@ def get_filters(request, attr_info, skips=[]): {'check': [u'a', u'b'], 'name': [u'Bob']} """ res = {} - for key, values in request.GET.dict_of_lists().iteritems(): + for key, values in six.iteritems(request.GET.dict_of_lists()): if key in skips: continue values = [v for v in values if v] diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py index aa4e2b9572d..fa275bfe02f 100644 --- a/neutron/api/extensions.py +++ b/neutron/api/extensions.py @@ -170,7 +170,7 @@ class ExtensionDescriptor(object): if not extension_attrs_map: return - for resource, attrs in extension_attrs_map.iteritems(): + for resource, attrs in six.iteritems(extension_attrs_map): extended_attrs = extended_attributes.get(resource) if extended_attrs: attrs.update(extended_attrs) @@ -200,7 +200,7 @@ class ActionExtensionController(wsgi.Controller): def action(self, request, id): input_dict = self._deserialize(request.body, request.get_content_type()) - for action_name, handler in self.action_handlers.iteritems(): + for action_name, handler in six.iteritems(self.action_handlers): if action_name in input_dict: return handler(input_dict, request, id) # no action handler found (bump to downstream application) @@ -242,7 +242,7 @@ class ExtensionController(wsgi.Controller): def index(self, request): extensions = [] - for _alias, ext in self.extension_manager.extensions.iteritems(): + for _alias, ext in six.iteritems(self.extension_manager.extensions): extensions.append(self._translate(ext)) return dict(extensions=extensions) @@ -283,7 +283,7 @@ class ExtensionMiddleware(wsgi.Middleware): LOG.debug('Extended resource: %s', resource.collection) - for action, method in resource.collection_actions.iteritems(): + for action, method in six.iteritems(resource.collection_actions): conditions = dict(method=[method]) path = "/%s/%s" % (resource.collection, action) with mapper.submapper(controller=resource.controller, @@ -474,11 +474,11 @@ class ExtensionManager(object): continue try: extended_attrs = ext.get_extended_resources(version) - for resource, resource_attrs in extended_attrs.iteritems(): - if attr_map.get(resource, None): - attr_map[resource].update(resource_attrs) + for res, resource_attrs in six.iteritems(extended_attrs): + if attr_map.get(res, None): + attr_map[res].update(resource_attrs) else: - attr_map[resource] = resource_attrs + attr_map[res] = resource_attrs except AttributeError: LOG.exception(_LE("Error fetching extended attributes for " "extension '%s'"), ext.get_name()) diff --git a/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py b/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py index 7ce721c8a18..6787781b866 100644 --- a/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py +++ b/neutron/api/rpc/agentnotifiers/metering_rpc_agent_api.py @@ -14,6 +14,7 @@ from oslo_log import log as logging import oslo_messaging +import six from neutron.common import constants from neutron.common import rpc as n_rpc @@ -58,7 +59,7 @@ class MeteringAgentNotifyAPI(object): l3_router.append(router) l3_routers[l3_agent.host] = l3_router - for host, routers in l3_routers.iteritems(): + for host, routers in six.iteritems(l3_routers): cctxt = self.client.prepare(server=host) cctxt.cast(context, method, routers=routers) diff --git a/neutron/api/rpc/handlers/l3_rpc.py b/neutron/api/rpc/handlers/l3_rpc.py index 936bf0b27e5..3cc50a5dbff 100644 --- a/neutron/api/rpc/handlers/l3_rpc.py +++ b/neutron/api/rpc/handlers/l3_rpc.py @@ -17,6 +17,7 @@ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_serialization import jsonutils +import six from neutron.common import constants from neutron.common import exceptions @@ -162,7 +163,7 @@ class L3RpcCallback(object): def update_floatingip_statuses(self, context, router_id, fip_statuses): """Update operational status for a floating IP.""" with context.session.begin(subtransactions=True): - for (floatingip_id, status) in fip_statuses.iteritems(): + for (floatingip_id, status) in six.iteritems(fip_statuses): LOG.debug("New status for floating IP %(floatingip_id)s: " "%(status)s", {'floatingip_id': floatingip_id, 'status': status}) diff --git a/neutron/api/v2/attributes.py b/neutron/api/v2/attributes.py index 6bf9e94d2cd..8adecc1ed38 100644 --- a/neutron/api/v2/attributes.py +++ b/neutron/api/v2/attributes.py @@ -409,7 +409,7 @@ def _validate_dict_item(key, key_validator, data): # TODO(salv-orlando): Structure of dict attributes should be improved # to avoid iterating over items val_func = val_params = None - for (k, v) in key_validator.iteritems(): + for (k, v) in six.iteritems(key_validator): if k.startswith('type:'): # ask forgiveness, not permission try: @@ -435,7 +435,7 @@ def _validate_dict(data, key_specs=None): return # Check whether all required keys are present - required_keys = [key for key, spec in key_specs.iteritems() + required_keys = [key for key, spec in six.iteritems(key_specs) if spec.get('required')] if required_keys: @@ -445,7 +445,7 @@ def _validate_dict(data, key_specs=None): # Perform validation and conversion of all values # according to the specifications. - for key, key_validator in [(k, v) for k, v in key_specs.iteritems() + for key, key_validator in [(k, v) for k, v in six.iteritems(key_specs) if k in data]: msg = _validate_dict_item(key, key_validator, data) if msg: @@ -546,7 +546,7 @@ def convert_kvp_list_to_dict(kvp_list): key, value = convert_kvp_str_to_list(kvp_str) kvp_map.setdefault(key, set()) kvp_map[key].add(value) - return dict((x, list(y)) for x, y in kvp_map.iteritems()) + return dict((x, list(y)) for x, y in six.iteritems(kvp_map)) def convert_none_to_empty_list(value): diff --git a/neutron/api/v2/base.py b/neutron/api/v2/base.py index 4e795cf2a75..ea4d45b2cb4 100644 --- a/neutron/api/v2/base.py +++ b/neutron/api/v2/base.py @@ -19,6 +19,7 @@ import netaddr from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils +import six import webob.exc from neutron.api import api_common @@ -109,7 +110,7 @@ class Controller(object): self._resource) def _get_primary_key(self, default_primary_key='id'): - for key, value in self._attr_info.iteritems(): + for key, value in six.iteritems(self._attr_info): if value.get('primary_key', False): return key return default_primary_key @@ -170,7 +171,7 @@ class Controller(object): def _filter_attributes(self, context, data, fields_to_strip=None): if not fields_to_strip: return data - return dict(item for item in data.iteritems() + return dict(item for item in six.iteritems(data) if (item[0] not in fields_to_strip)) def _do_field_list(self, original_fields): @@ -517,7 +518,7 @@ class Controller(object): # Load object to check authz # but pass only attributes in the original body and required # by the policy engine to the policy 'brain' - field_list = [name for (name, value) in self._attr_info.iteritems() + field_list = [name for (name, value) in six.iteritems(self._attr_info) if (value.get('required_by_policy') or value.get('primary_key') or 'default' not in value)] @@ -621,7 +622,7 @@ class Controller(object): Controller._verify_attributes(res_dict, attr_info) if is_create: # POST - for attr, attr_vals in attr_info.iteritems(): + for attr, attr_vals in six.iteritems(attr_info): if attr_vals['allow_post']: if ('default' not in attr_vals and attr not in res_dict): @@ -635,12 +636,12 @@ class Controller(object): msg = _("Attribute '%s' not allowed in POST") % attr raise webob.exc.HTTPBadRequest(msg) else: # PUT - for attr, attr_vals in attr_info.iteritems(): + for attr, attr_vals in six.iteritems(attr_info): if attr in res_dict and not attr_vals['allow_put']: msg = _("Cannot update read-only attribute %s") % attr raise webob.exc.HTTPBadRequest(msg) - for attr, attr_vals in attr_info.iteritems(): + for attr, attr_vals in six.iteritems(attr_info): if (attr not in res_dict or res_dict[attr] is attributes.ATTR_NOT_SPECIFIED): continue diff --git a/neutron/api/v2/router.py b/neutron/api/v2/router.py index 1ae7852936a..c76f2d02ac5 100644 --- a/neutron/api/v2/router.py +++ b/neutron/api/v2/router.py @@ -16,6 +16,7 @@ from oslo_config import cfg from oslo_log import log as logging import routes as routes_mapper +import six import six.moves.urllib.parse as urlparse import webob import webob.dec @@ -51,7 +52,7 @@ class Index(wsgi.Application): metadata = {} layout = [] - for name, collection in self.resources.iteritems(): + for name, collection in six.iteritems(self.resources): href = urlparse.urljoin(req.path_url, collection) resource = {'name': name, 'collection': collection, diff --git a/neutron/common/utils.py b/neutron/common/utils.py index a88e8d63408..53308908b30 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -35,6 +35,7 @@ from oslo_concurrency import lockutils from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils +import six from neutron.common import constants as q_const @@ -235,7 +236,7 @@ def compare_elements(a, b): def dict2str(dic): return ','.join("%s=%s" % (key, val) - for key, val in sorted(dic.iteritems())) + for key, val in sorted(six.iteritems(dic))) def str2dict(string): diff --git a/neutron/db/common_db_mixin.py b/neutron/db/common_db_mixin.py index 143c8f0e416..ca8ca740e2d 100644 --- a/neutron/db/common_db_mixin.py +++ b/neutron/db/common_db_mixin.py @@ -89,8 +89,8 @@ class CommonDbMixin(object): else: query_filter = (model.tenant_id == context.tenant_id) # Execute query hooks registered from mixins and plugins - for _name, hooks in self._model_query_hooks.get(model, - {}).iteritems(): + for _name, hooks in six.iteritems(self._model_query_hooks.get(model, + {})): query_hook = hooks.get('query') if isinstance(query_hook, six.string_types): query_hook = getattr(self, query_hook, None) @@ -132,15 +132,15 @@ class CommonDbMixin(object): def _apply_filters_to_query(self, query, model, filters): if filters: - for key, value in filters.iteritems(): + for key, value in six.iteritems(filters): column = getattr(model, key, None) if column: if not value: query = query.filter(sql.false()) return query query = query.filter(column.in_(value)) - for _name, hooks in self._model_query_hooks.get(model, - {}).iteritems(): + for _nam, hooks in six.iteritems(self._model_query_hooks.get(model, + {})): result_filter = hooks.get('result_filters', None) if isinstance(result_filter, six.string_types): result_filter = getattr(self, result_filter, None) @@ -201,4 +201,4 @@ class CommonDbMixin(object): """ columns = [c.name for c in model.__table__.columns] return dict((k, v) for (k, v) in - data.iteritems() if k in columns) + six.iteritems(data) if k in columns) diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index f661dcc6221..29edd45d5ee 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -17,6 +17,7 @@ from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log as logging import oslo_messaging +import six import sqlalchemy as sa from sqlalchemy import func from sqlalchemy import or_ @@ -348,7 +349,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, if active is not None: query = (query.filter(agents_db.Agent.admin_state_up == active)) if filters: - for key, value in filters.iteritems(): + for key, value in six.iteritems(filters): column = getattr(agents_db.Agent, key, None) if column: if not value: diff --git a/neutron/db/l3_db.py b/neutron/db/l3_db.py index a3aadd9d754..8f8f70b86e9 100644 --- a/neutron/db/l3_db.py +++ b/neutron/db/l3_db.py @@ -19,6 +19,7 @@ from sqlalchemy import orm from sqlalchemy.orm import exc from oslo_utils import excutils +import six from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api from neutron.api.v2 import attributes @@ -1013,7 +1014,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase): marker_obj = self._get_marker_obj(context, 'floatingip', limit, marker) if filters is not None: - for key, val in API_TO_DB_COLUMN_MAP.iteritems(): + for key, val in six.iteritems(API_TO_DB_COLUMN_MAP): if key in filters: filters[val] = filters.pop(key) diff --git a/neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py b/neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py index d403f6c699b..ca5c6aafb47 100644 --- a/neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py +++ b/neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py @@ -26,6 +26,7 @@ revision = '14be42f3d0a5' down_revision = '26b54cf9024d' from alembic import op +import six import sqlalchemy as sa from neutron.common import exceptions @@ -69,7 +70,7 @@ def check_sanity(connection): raise DuplicateSecurityGroupsNamedDefault( duplicates='; '.join('tenant %s: %s' % (tenant_id, ', '.join(groups)) - for tenant_id, groups in res.iteritems())) + for tenant_id, groups in six.iteritems(res))) def get_duplicate_default_security_groups(connection): diff --git a/neutron/hacking/checks.py b/neutron/hacking/checks.py index c6160072d58..4b56325f9c8 100644 --- a/neutron/hacking/checks.py +++ b/neutron/hacking/checks.py @@ -15,6 +15,7 @@ import re import pep8 +import six # Guidelines for writing new hacking checks # @@ -48,7 +49,7 @@ def _regex_for_level(level, hint): log_translation_hint = re.compile( '|'.join('(?:%s)' % _regex_for_level(level, hint) - for level, hint in _all_log_levels.iteritems())) + for level, hint in six.iteritems(_all_log_levels))) oslo_namespace_imports_dot = re.compile(r"import[\s]+oslo[.][^\s]+") oslo_namespace_imports_from_dot = re.compile(r"from[\s]+oslo[.]") @@ -166,6 +167,12 @@ def check_no_basestring(logical_line): yield(0, msg) +def check_python3_no_iteritems(logical_line): + if re.search(r".*\.iteritems\(\)", logical_line): + msg = ("N327: Use six.iteritems() instead of dict.iteritems().") + yield(0, msg) + + def factory(register): register(validate_log_translations) register(use_jsonutils) @@ -175,3 +182,4 @@ def factory(register): register(check_no_contextlib_nested) register(check_python3_xrange) register(check_no_basestring) + register(check_python3_no_iteritems) diff --git a/neutron/manager.py b/neutron/manager.py index 503d79448e7..293861f46d7 100644 --- a/neutron/manager.py +++ b/neutron/manager.py @@ -19,6 +19,7 @@ from oslo_config import cfg from oslo_log import log as logging import oslo_messaging from oslo_utils import importutils +import six from neutron.common import utils from neutron.i18n import _LE, _LI @@ -224,5 +225,6 @@ class NeutronManager(object): @classmethod def get_service_plugins(cls): # Return weakrefs to minimize gc-preventing references. + service_plugins = cls.get_instance().service_plugins return dict((x, weakref.proxy(y)) - for x, y in cls.get_instance().service_plugins.iteritems()) + for x, y in six.iteritems(service_plugins)) diff --git a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py b/neutron/plugins/ibm/agent/sdnve_neutron_agent.py index 71c4a2a7875..f00c9aca484 100644 --- a/neutron/plugins/ibm/agent/sdnve_neutron_agent.py +++ b/neutron/plugins/ibm/agent/sdnve_neutron_agent.py @@ -25,6 +25,7 @@ eventlet.monkey_patch() from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +import six from neutron.agent.common import ovs_lib from neutron.agent.linux import ip_lib @@ -185,7 +186,7 @@ class SdnveNeutronAgent(object): :param interface_mappings: map physical net names to interface names. ''' - for physical_network, interface in interface_mappings.iteritems(): + for physical_network, interface in six.iteritems(interface_mappings): LOG.info(_LI("Mapping physical network %(physical_network)s to " "interface %(interface)s"), {'physical_network': physical_network, diff --git a/neutron/plugins/ml2/db.py b/neutron/plugins/ml2/db.py index c6aef07d682..fc4b50434de 100644 --- a/neutron/plugins/ml2/db.py +++ b/neutron/plugins/ml2/db.py @@ -15,6 +15,7 @@ from oslo_db import exception as db_exc from oslo_log import log +import six from sqlalchemy import or_ from sqlalchemy.orm import exc @@ -268,7 +269,7 @@ def get_ports_and_sgs(context, port_ids): return [] ports_to_sg_ids = get_sg_ids_grouped_by_port(context, port_ids) return [make_port_dict_with_security_groups(port, sec_groups) - for port, sec_groups in ports_to_sg_ids.iteritems()] + for port, sec_groups in six.iteritems(ports_to_sg_ids)] def get_sg_ids_grouped_by_port(context, port_ids): diff --git a/neutron/plugins/ml2/drivers/cisco/ncs/driver.py b/neutron/plugins/ml2/drivers/cisco/ncs/driver.py index 8fcfa721c87..df79db083d3 100644 --- a/neutron/plugins/ml2/drivers/cisco/ncs/driver.py +++ b/neutron/plugins/ml2/drivers/cisco/ncs/driver.py @@ -19,6 +19,7 @@ from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils import requests +import six from neutron.plugins.ml2 import driver_api as api @@ -173,7 +174,7 @@ class NCSMechanismDriver(api.MechanismDriver): """ if isinstance(obj, dict): obj = dict((self.escape(k), self.escape_keys(v)) - for k, v in obj.iteritems()) + for k, v in six.iteritems(obj)) if isinstance(obj, list): obj = [self.escape_keys(x) for x in obj] return obj diff --git a/neutron/plugins/ml2/drivers/type_flat.py b/neutron/plugins/ml2/drivers/type_flat.py index ed055f56083..ad51bf0f37c 100644 --- a/neutron/plugins/ml2/drivers/type_flat.py +++ b/neutron/plugins/ml2/drivers/type_flat.py @@ -16,6 +16,7 @@ from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log +import six import sqlalchemy as sa from neutron.common import exceptions as exc @@ -97,7 +98,7 @@ class FlatTypeDriver(helpers.BaseTypeDriver): % physical_network) raise exc.InvalidInput(error_message=msg) - for key, value in segment.iteritems(): + for key, value in six.iteritems(segment): if value and key not in [api.NETWORK_TYPE, api.PHYSICAL_NETWORK]: msg = _("%s prohibited for flat provider network") % key diff --git a/neutron/plugins/ml2/drivers/type_local.py b/neutron/plugins/ml2/drivers/type_local.py index 3bb7e2493f3..791df111dc7 100644 --- a/neutron/plugins/ml2/drivers/type_local.py +++ b/neutron/plugins/ml2/drivers/type_local.py @@ -14,6 +14,7 @@ # under the License. from oslo_log import log +import six from neutron.common import exceptions as exc from neutron.i18n import _LI @@ -46,7 +47,7 @@ class LocalTypeDriver(api.TypeDriver): return False def validate_provider_segment(self, segment): - for key, value in segment.iteritems(): + for key, value in six.iteritems(segment): if value and key != api.NETWORK_TYPE: msg = _("%s prohibited for local provider network") % key raise exc.InvalidInput(error_message=msg) diff --git a/neutron/plugins/ml2/managers.py b/neutron/plugins/ml2/managers.py index 889455f88cc..1d1d204a0c5 100644 --- a/neutron/plugins/ml2/managers.py +++ b/neutron/plugins/ml2/managers.py @@ -15,6 +15,7 @@ from oslo_config import cfg from oslo_log import log +import six import stevedore from neutron.api.v2 import attributes @@ -164,7 +165,7 @@ class TypeManager(stevedore.named.NamedExtensionManager): network[provider.SEGMENTATION_ID] = segment[api.SEGMENTATION_ID] def initialize(self): - for network_type, driver in self.drivers.iteritems(): + for network_type, driver in six.iteritems(self.drivers): LOG.info(_LI("Initializing driver for type '%s'"), network_type) driver.obj.initialize() diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index 003cbdf06d3..927502590b2 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -22,6 +22,7 @@ import netaddr from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +import six from six import moves from neutron.agent.common import ovs_lib @@ -340,7 +341,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, start_listening=False) def get_net_uuid(self, vif_id): - for network_id, vlan_mapping in self.local_vlan_map.iteritems(): + for network_id, vlan_mapping in six.iteritems(self.local_vlan_map): if vif_id in vlan_mapping.vif_ports: return network_id @@ -917,7 +918,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, ip_wrapper = ip_lib.IPWrapper() ovs = ovs_lib.BaseOVS() ovs_bridges = ovs.get_bridges() - for physical_network, bridge in bridge_mappings.iteritems(): + for physical_network, bridge in six.iteritems(bridge_mappings): LOG.info(_LI("Mapping physical network %(physical_network)s to " "bridge %(bridge)s"), {'physical_network': physical_network, diff --git a/neutron/plugins/sriovnicagent/eswitch_manager.py b/neutron/plugins/sriovnicagent/eswitch_manager.py index 82b8a72c769..760a0e52ca4 100644 --- a/neutron/plugins/sriovnicagent/eswitch_manager.py +++ b/neutron/plugins/sriovnicagent/eswitch_manager.py @@ -18,6 +18,7 @@ import os import re from oslo_log import log as logging +import six from neutron.i18n import _LE, _LW from neutron.plugins.sriovnicagent.common import exceptions as exc @@ -259,7 +260,7 @@ class ESwitchManager(object): """ if exclude_devices is None: exclude_devices = {} - for phys_net, dev_name in device_mappings.iteritems(): + for phys_net, dev_name in six.iteritems(device_mappings): self._create_emb_switch(phys_net, dev_name, exclude_devices.get(dev_name, set())) diff --git a/neutron/policy.py b/neutron/policy.py index ab25fb54e93..9352a00a1b9 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -25,6 +25,7 @@ import re from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import importutils +import six from neutron.api.v2 import attributes from neutron.common import constants as const @@ -146,7 +147,7 @@ def _should_validate_sub_attributes(attribute, sub_attr): validate = attribute.get('validate') return (validate and isinstance(sub_attr, collections.Iterable) and any([k.startswith('type:dict') and - v for (k, v) in validate.iteritems()])) + v for (k, v) in six.iteritems(validate)])) def _build_subattr_match_rule(attr_name, attr, action, target): diff --git a/neutron/tests/api/admin/test_quotas.py b/neutron/tests/api/admin/test_quotas.py index 7e04e0306ac..0dfe7987584 100644 --- a/neutron/tests/api/admin/test_quotas.py +++ b/neutron/tests/api/admin/test_quotas.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import six from tempest_lib.common.utils import data_utils from neutron.tests.api import base @@ -59,7 +60,7 @@ class QuotasTest(base.BaseAdminNetworkTest): quota_set = self.admin_client.update_quotas(tenant_id, **new_quotas) self.addCleanup(self.admin_client.reset_quotas, tenant_id) - for key, value in new_quotas.iteritems(): + for key, value in six.iteritems(new_quotas): self.assertEqual(value, quota_set[key]) # Confirm our tenant is listed among tenants with non default quotas @@ -73,7 +74,7 @@ class QuotasTest(base.BaseAdminNetworkTest): # Confirm from API quotas were changed as requested for tenant quota_set = self.admin_client.show_quotas(tenant_id) quota_set = quota_set['quota'] - for key, value in new_quotas.iteritems(): + for key, value in six.iteritems(new_quotas): self.assertEqual(value, quota_set[key]) # Reset quotas to default and confirm diff --git a/neutron/tests/api/test_dhcp_ipv6.py b/neutron/tests/api/test_dhcp_ipv6.py index 389970d8ad1..0adfc3f6149 100644 --- a/neutron/tests/api/test_dhcp_ipv6.py +++ b/neutron/tests/api/test_dhcp_ipv6.py @@ -16,6 +16,7 @@ import netaddr import random +import six from tempest_lib.common.utils import data_utils from tempest_lib import exceptions as lib_exc @@ -126,7 +127,7 @@ class NetworksTestDHCPv6(base.BaseNetworkTest): ): kwargs = {'ipv6_ra_mode': ra_mode, 'ipv6_address_mode': add_mode} - kwargs = {k: v for k, v in kwargs.iteritems() if v} + kwargs = {k: v for k, v in six.iteritems(kwargs) if v} real_ip, eui_ip = self._get_ips_from_subnet(**kwargs) self._clean_network() self.assertEqual(eui_ip, real_ip, @@ -286,7 +287,7 @@ class NetworksTestDHCPv6(base.BaseNetworkTest): ): kwargs = {'ipv6_ra_mode': ra_mode, 'ipv6_address_mode': add_mode} - kwargs = {k: v for k, v in kwargs.iteritems() if v} + kwargs = {k: v for k, v in six.iteritems(kwargs) if v} subnet = self.create_subnet(self.network, **kwargs) port = self.create_port(self.network) port_ip = next(iter(port['fixed_ips']), None)['ip_address'] @@ -313,7 +314,7 @@ class NetworksTestDHCPv6(base.BaseNetworkTest): ): kwargs = {'ipv6_ra_mode': ra_mode, 'ipv6_address_mode': add_mode} - kwargs = {k: v for k, v in kwargs.iteritems() if v} + kwargs = {k: v for k, v in six.iteritems(kwargs) if v} subnet = self.create_subnet(self.network, **kwargs) ip_range = netaddr.IPRange(subnet["allocation_pools"][0]["start"], subnet["allocation_pools"][0]["end"]) @@ -391,7 +392,7 @@ class NetworksTestDHCPv6(base.BaseNetworkTest): ): kwargs = {'ipv6_ra_mode': ra_mode, 'ipv6_address_mode': add_mode} - kwargs = {k: v for k, v in kwargs.iteritems() if v} + kwargs = {k: v for k, v in six.iteritems(kwargs) if v} subnet, port = self._create_subnet_router(kwargs) port_ip = next(iter(port['fixed_ips']), None)['ip_address'] self._clean_network() diff --git a/neutron/tests/api/test_fwaas_extensions.py b/neutron/tests/api/test_fwaas_extensions.py index 3471f7c1b51..3755196fd98 100644 --- a/neutron/tests/api/test_fwaas_extensions.py +++ b/neutron/tests/api/test_fwaas_extensions.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import six from tempest_lib.common.utils import data_utils from tempest_lib import exceptions as lib_exc @@ -142,7 +143,7 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest): def test_show_firewall_rule(self): # show a created firewall rule fw_rule = self.client.show_firewall_rule(self.fw_rule['id']) - for key, value in fw_rule['firewall_rule'].iteritems(): + for key, value in six.iteritems(fw_rule['firewall_rule']): self.assertEqual(self.fw_rule[key], value) @test.idempotent_id('1086dd93-a4c0-4bbb-a1bd-6d4bc62c199f') @@ -184,7 +185,7 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest): # show a created firewall policy fw_policy = self.client.show_firewall_policy(self.fw_policy['id']) fw_policy = fw_policy['firewall_policy'] - for key, value in fw_policy.iteritems(): + for key, value in six.iteritems(fw_policy): self.assertEqual(self.fw_policy[key], value) @test.idempotent_id('02082a03-3cdd-4789-986a-1327dd80bfb7') @@ -213,7 +214,7 @@ class FWaaSExtensionTestJSON(base.BaseNetworkTest): firewall = self.client.show_firewall(firewall_id) firewall = firewall['firewall'] - for key, value in firewall.iteritems(): + for key, value in six.iteritems(firewall): if key == 'status': continue self.assertEqual(created_firewall[key], value) diff --git a/neutron/tests/api/test_load_balancer.py b/neutron/tests/api/test_load_balancer.py index fde60dc00ab..fc8ed260de9 100644 --- a/neutron/tests/api/test_load_balancer.py +++ b/neutron/tests/api/test_load_balancer.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import six from tempest_lib.common.utils import data_utils from tempest_lib import decorators @@ -72,7 +73,7 @@ class LoadBalancerTestJSON(base.BaseNetworkTest): body = create_obj(**kwargs) obj = body[obj_name] self.addCleanup(delete_obj, obj['id']) - for key, value in obj.iteritems(): + for key, value in six.iteritems(obj): # It is not relevant to filter by all arguments. That is why # there is a list of attr to except if key not in attr_exceptions: @@ -169,7 +170,7 @@ class LoadBalancerTestJSON(base.BaseNetworkTest): # Verifies the details of a vip body = self.client.show_vip(self.vip['id']) vip = body['vip'] - for key, value in vip.iteritems(): + for key, value in six.iteritems(vip): # 'status' should not be confirmed in api tests if key != 'status': self.assertEqual(self.vip[key], value) @@ -187,7 +188,7 @@ class LoadBalancerTestJSON(base.BaseNetworkTest): # Verifies the details of a pool body = self.client.show_pool(pool['id']) shown_pool = body['pool'] - for key, value in pool.iteritems(): + for key, value in six.iteritems(pool): # 'status' should not be confirmed in api tests if key != 'status': self.assertEqual(value, shown_pool[key]) @@ -251,7 +252,7 @@ class LoadBalancerTestJSON(base.BaseNetworkTest): # Verifies the details of a member body = self.client.show_member(self.member['id']) member = body['member'] - for key, value in member.iteritems(): + for key, value in six.iteritems(member): # 'status' should not be confirmed in api tests if key != 'status': self.assertEqual(self.member[key], value) @@ -330,7 +331,7 @@ class LoadBalancerTestJSON(base.BaseNetworkTest): # Verifies the details of a health_monitor body = self.client.show_health_monitor(self.health_monitor['id']) health_monitor = body['health_monitor'] - for key, value in health_monitor.iteritems(): + for key, value in six.iteritems(health_monitor): # 'status' should not be confirmed in api tests if key != 'status': self.assertEqual(self.health_monitor[key], value) diff --git a/neutron/tests/api/test_networks.py b/neutron/tests/api/test_networks.py index 6300c0797c2..868c08d7c67 100644 --- a/neutron/tests/api/test_networks.py +++ b/neutron/tests/api/test_networks.py @@ -15,6 +15,7 @@ import itertools import netaddr +import six from tempest_lib.common.utils import data_utils from tempest_lib import exceptions as lib_exc @@ -163,7 +164,7 @@ class NetworksTestJSON(base.BaseNetworkTest): **kwargs) compare_args_full = dict(gateway_ip=gateway, cidr=cidr, mask_bits=mask_bits, **kwargs) - compare_args = dict((k, v) for k, v in compare_args_full.iteritems() + compare_args = dict((k, v) for k, v in six.iteritems(compare_args_full) if v is not None) if 'dns_nameservers' in set(subnet).intersection(compare_args): diff --git a/neutron/tests/api/test_routers.py b/neutron/tests/api/test_routers.py index 4b759f06315..6593f979962 100644 --- a/neutron/tests/api/test_routers.py +++ b/neutron/tests/api/test_routers.py @@ -14,6 +14,7 @@ # under the License. import netaddr +import six from tempest_lib.common.utils import data_utils from neutron.tests.api import base_routers as base @@ -173,7 +174,7 @@ class RoutersTest(base.BaseRouterTest): self.assertIsNone(actual_ext_gw_info) return # Verify only keys passed in exp_ext_gw_info - for k, v in exp_ext_gw_info.iteritems(): + for k, v in six.iteritems(exp_ext_gw_info): self.assertEqual(v, actual_ext_gw_info[k]) def _verify_gateway_port(self, router_id): diff --git a/neutron/tests/api/test_vpnaas_extensions.py b/neutron/tests/api/test_vpnaas_extensions.py index 03b5cc1e147..6076e52bfb6 100644 --- a/neutron/tests/api/test_vpnaas_extensions.py +++ b/neutron/tests/api/test_vpnaas_extensions.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import six from tempest_lib.common.utils import data_utils from tempest_lib import exceptions as lib_exc @@ -79,7 +80,7 @@ class VPNaaSTestJSON(base.BaseAdminNetworkTest): def _assertExpected(self, expected, actual): # Check if not expected keys/values exists in actual response body - for key, value in expected.iteritems(): + for key, value in six.iteritems(expected): self.assertIn(key, actual) self.assertEqual(value, actual[key]) @@ -250,7 +251,7 @@ class VPNaaSTestJSON(base.BaseAdminNetworkTest): # Confirm that update was successful by verifying using 'show' body = self.client.show_ikepolicy(ikepolicy['id']) ike_policy = body['ikepolicy'] - for key, value in new_ike.iteritems(): + for key, value in six.iteritems(new_ike): self.assertIn(key, ike_policy) self.assertEqual(value, ike_policy[key]) diff --git a/neutron/tests/base.py b/neutron/tests/base.py index 87f820cc423..7b18901044b 100644 --- a/neutron/tests/base.py +++ b/neutron/tests/base.py @@ -32,6 +32,7 @@ from oslo_concurrency.fixture import lockutils from oslo_config import cfg from oslo_messaging import conffixture as messaging_conffixture from oslo_utils import strutils +import six import testtools from neutron.agent.linux import external_process @@ -182,7 +183,7 @@ class DietTestCase(testtools.TestCase): self.assertEqual(expect_val, actual_val) def sort_dict_lists(self, dic): - for key, value in dic.iteritems(): + for key, value in six.iteritems(dic): if isinstance(value, list): dic[key] = sorted(value) elif isinstance(value, dict): @@ -361,7 +362,7 @@ class BaseTestCase(DietTestCase): test by the fixtures cleanup process. """ group = kw.pop('group', None) - for k, v in kw.iteritems(): + for k, v in six.iteritems(kw): CONF.set_override(k, v, group) def setup_coreplugin(self, core_plugin=None): diff --git a/neutron/tests/fullstack/config_fixtures.py b/neutron/tests/fullstack/config_fixtures.py index 77756878fc9..65b3695d287 100644 --- a/neutron/tests/fullstack/config_fixtures.py +++ b/neutron/tests/fullstack/config_fixtures.py @@ -34,7 +34,7 @@ class ConfigDict(base.AttributeDict): :param other: dictionary to be directly modified. """ - for key, value in other.iteritems(): + for key, value in six.iteritems(other): if isinstance(value, dict): if not isinstance(value, base.AttributeDict): other[key] = base.AttributeDict(value) diff --git a/neutron/tests/functional/agent/test_l3_agent.py b/neutron/tests/functional/agent/test_l3_agent.py index c979c03b666..bd461164f7e 100644 --- a/neutron/tests/functional/agent/test_l3_agent.py +++ b/neutron/tests/functional/agent/test_l3_agent.py @@ -21,6 +21,7 @@ import mock import netaddr from oslo_config import cfg from oslo_log import log as logging +import six import testtools import webob import webob.dec @@ -326,7 +327,7 @@ class L3AgentTestCase(L3AgentTestFramework): # Get the last state reported for each router actual_router_states = {} for call in calls: - for router_id, state in call.iteritems(): + for router_id, state in six.iteritems(call): actual_router_states[router_id] = state return actual_router_states == expected diff --git a/neutron/tests/tempest/common/custom_matchers.py b/neutron/tests/tempest/common/custom_matchers.py index 298a94ec2de..839088c6544 100644 --- a/neutron/tests/tempest/common/custom_matchers.py +++ b/neutron/tests/tempest/common/custom_matchers.py @@ -14,6 +14,7 @@ import re +import six from testtools import helpers @@ -121,7 +122,7 @@ class AreAllWellFormatted(object): """ def match(self, actual): - for key, value in actual.iteritems(): + for key, value in six.iteritems(actual): if key in ('content-length', 'x-account-bytes-used', 'x-account-container-count', 'x-account-object-count', 'x-container-bytes-used', 'x-container-object-count')\ diff --git a/neutron/tests/tempest/common/generator/base_generator.py b/neutron/tests/tempest/common/generator/base_generator.py index f81f4055d6f..5ac927c497f 100644 --- a/neutron/tests/tempest/common/generator/base_generator.py +++ b/neutron/tests/tempest/common/generator/base_generator.py @@ -17,6 +17,7 @@ import copy import functools import jsonschema +import six from oslo_log import log as logging @@ -122,7 +123,7 @@ class BasicGeneratorSet(object): if schema_type == 'object': properties = schema["properties"] - for attribute, definition in properties.iteritems(): + for attribute, definition in six.iteritems(properties): current_path = copy.copy(path) if path is not None: current_path.append(attribute) diff --git a/neutron/tests/tempest/common/generator/valid_generator.py b/neutron/tests/tempest/common/generator/valid_generator.py index 8ff9259bdce..269820052f2 100644 --- a/neutron/tests/tempest/common/generator/valid_generator.py +++ b/neutron/tests/tempest/common/generator/valid_generator.py @@ -14,6 +14,7 @@ # under the License. from oslo_log import log as logging +import six import neutron.tests.tempest.common.generator.base_generator as base @@ -51,7 +52,7 @@ class ValidTestGenerator(base.BasicGeneratorSet): @base.simple_generator def generate_valid_object(self, schema): obj = {} - for k, v in schema["properties"].iteritems(): + for k, v in six.iteritems(schema["properties"]): obj[k] = self.generate_valid(v) return obj diff --git a/neutron/tests/tools.py b/neutron/tests/tools.py index c9b80b70ee9..fd53793fee6 100644 --- a/neutron/tests/tools.py +++ b/neutron/tests/tools.py @@ -14,6 +14,7 @@ # under the License. import fixtures +import six from neutron.api.v2 import attributes @@ -40,8 +41,8 @@ class AttributeMapMemento(fixtures.Fixture): # deeper than a shallow copy. super(AttributeMapMemento, self).setUp() self.contents_backup = {} - for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): - self.contents_backup[resource] = attrs.copy() + for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): + self.contents_backup[res] = attrs.copy() self.addCleanup(self.restore) def restore(self): diff --git a/neutron/tests/unit/agent/linux/test_iptables_firewall.py b/neutron/tests/unit/agent/linux/test_iptables_firewall.py index 0eab3eefd3f..53726f81c73 100644 --- a/neutron/tests/unit/agent/linux/test_iptables_firewall.py +++ b/neutron/tests/unit/agent/linux/test_iptables_firewall.py @@ -17,6 +17,7 @@ import copy import mock from oslo_config import cfg +import six from neutron.agent.common import config as a_cfg from neutron.agent.linux import ipset_manager @@ -1443,7 +1444,7 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): remote_groups = remote_groups or {_IPv4: [FAKE_SGID], _IPv6: [FAKE_SGID]} rules = [] - for ip_version, remote_group_list in remote_groups.iteritems(): + for ip_version, remote_group_list in six.iteritems(remote_groups): for remote_group in remote_group_list: rules.append(self._fake_sg_rule_for_ethertype(ip_version, remote_group)) diff --git a/neutron/tests/unit/api/test_extensions.py b/neutron/tests/unit/api/test_extensions.py index cbde52239aa..aabece09d4c 100644 --- a/neutron/tests/unit/api/test_extensions.py +++ b/neutron/tests/unit/api/test_extensions.py @@ -20,6 +20,7 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import routes +import six import webob import webob.exc as webexc import webtest @@ -743,8 +744,8 @@ class ExtensionExtendedAttributeTestCase(base.BaseTestCase): self._tenant_id = "8c70909f-b081-452d-872b-df48e6c355d1" # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} - for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): - self.saved_attr_map[resource] = attrs.copy() + for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): + self.saved_attr_map[res] = attrs.copy() # Add the resources to the global attribute map # This is done here as the setup process won't # initialize the main API router which extends diff --git a/neutron/tests/unit/api/v2/test_base.py b/neutron/tests/unit/api/v2/test_base.py index 65ea1b2a0ac..ab05215e3f9 100644 --- a/neutron/tests/unit/api/v2/test_base.py +++ b/neutron/tests/unit/api/v2/test_base.py @@ -17,6 +17,7 @@ import os import mock from oslo_config import cfg +import six from six import moves import six.moves.urllib.parse as urlparse import webob @@ -547,7 +548,7 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase): output_dict = res['networks'][0] input_dict['shared'] = False self.assertEqual(len(input_dict), len(output_dict)) - for k, v in input_dict.iteritems(): + for k, v in six.iteritems(input_dict): self.assertEqual(v, output_dict[k]) else: # expect no results diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 21989c0bfde..cd381d7874b 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -22,6 +22,7 @@ import netaddr from oslo_config import cfg from oslo_db import exception as db_exc from oslo_utils import importutils +import six from sqlalchemy import orm from testtools import matchers import webob.exc @@ -107,7 +108,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): cfg.CONF.set_override( 'service_plugins', [test_lib.test_config.get(key, default) - for key, default in (service_plugins or {}).iteritems()] + for key, default in six.iteritems(service_plugins or {})] ) cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab") diff --git a/neutron/tests/unit/extensions/test_securitygroup.py b/neutron/tests/unit/extensions/test_securitygroup.py index 5c90f381194..cf616cc45d6 100644 --- a/neutron/tests/unit/extensions/test_securitygroup.py +++ b/neutron/tests/unit/extensions/test_securitygroup.py @@ -17,6 +17,7 @@ import contextlib import mock import oslo_db.exception as exc +import six import testtools import webob.exc @@ -160,7 +161,7 @@ class SecurityGroupsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): """Asserts that the sg rule has expected key/value pairs passed in as expected_kvs dictionary """ - for k, v in expected_kvs.iteritems(): + for k, v in six.iteritems(expected_kvs): self.assertEqual(security_group_rule[k], v) @@ -441,7 +442,7 @@ class TestSecurityGroups(SecurityGroupDBTestCase): test_addr = {'192.168.1.1/24': 'IPv6', '2001:db8:1234::/48': 'IPv4', '192.168.2.1/24': 'BadEthertype'} - for remote_ip_prefix, ethertype in test_addr.iteritems(): + for remote_ip_prefix, ethertype in six.iteritems(test_addr): with self.security_group(name, description) as sg: sg_id = sg['security_group']['id'] rule = self._build_security_group_rule( @@ -1501,7 +1502,7 @@ class TestConvertIPPrefixToCIDR(base.BaseTestCase): def test_convert_ip_prefix_no_netmask_to_cidr(self): addr = {'10.1.2.3': '32', 'fe80::2677:3ff:fe7d:4c': '128'} - for k, v in addr.iteritems(): + for k, v in six.iteritems(addr): self.assertEqual(ext_sg.convert_ip_prefix_to_cidr(k), '%s/%s' % (k, v)) diff --git a/neutron/tests/unit/extensions/test_vlantransparent.py b/neutron/tests/unit/extensions/test_vlantransparent.py index 38ffbf68839..7d6d99a05a2 100644 --- a/neutron/tests/unit/extensions/test_vlantransparent.py +++ b/neutron/tests/unit/extensions/test_vlantransparent.py @@ -13,6 +13,7 @@ # under the License. from oslo_config import cfg +import six from webob import exc as web_exc from neutron.api.v2 import attributes @@ -55,8 +56,8 @@ class VlanTransparentExtensionTestCase(test_db_base_plugin_v2.TestNetworksV2): # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} - for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems(): - self.saved_attr_map[resource] = attrs.copy() + for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP): + self.saved_attr_map[res] = attrs.copy() # Update the plugin and extensions path self.setup_coreplugin(plugin) diff --git a/neutron/tests/unit/hacking/test_checks.py b/neutron/tests/unit/hacking/test_checks.py index 99305008d98..7e2d81c0c13 100644 --- a/neutron/tests/unit/hacking/test_checks.py +++ b/neutron/tests/unit/hacking/test_checks.py @@ -149,3 +149,8 @@ class HackingTestCase(base.BaseTestCase): def test_no_basestring(self): self.assertEqual(1, len(list(checks.check_no_basestring("isinstance(x, basestring)")))) + + def test_check_python3_iteritems(self): + f = checks.check_python3_no_iteritems + self.assertLineFails(f, "d.iteritems()") + self.assertLinePasses(f, "six.iteritems(d)") diff --git a/neutron/wsgi.py b/neutron/wsgi.py index a31367ac6a6..86c1c94ff6b 100644 --- a/neutron/wsgi.py +++ b/neutron/wsgi.py @@ -33,6 +33,7 @@ from oslo_log import loggers from oslo_serialization import jsonutils from oslo_utils import excutils import routes.middleware +import six import webob.dec import webob.exc @@ -666,7 +667,7 @@ class Debug(Middleware): resp = req.get_response(self.application) print(("*" * 40) + " RESPONSE HEADERS") - for (key, value) in resp.headers.iteritems(): + for (key, value) in six.iteritems(resp.headers): print(key, "=", value) print() From cda0b14112484d4776c7ccb6be78b7e23fdd5424 Mon Sep 17 00:00:00 2001 From: Eugene Nikanorov Date: Tue, 2 Jun 2015 06:15:39 +0400 Subject: [PATCH 099/292] Add route to metadata IP by default Windows VMs try to resolve metadata ip 169.254.169.254 as local address by default, which results in very slow access to metadata url during boot. Injecting direct route to metadata ip through a subnet's default gateway helps Windows to avoid wasting time on mac resolution. So this patch injects host route for metadata ip for networks plugged into a router. Closes-Bug: #1460793 Change-Id: Ic991f9d7f9a8cf942e3e29f2da03d73c0f26470a --- neutron/agent/linux/dhcp.py | 4 ++ neutron/tests/unit/agent/linux/test_dhcp.py | 63 ++++++++++++++------- 2 files changed, 48 insertions(+), 19 deletions(-) diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index ba3431b8d4a..155872331ec 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -696,6 +696,10 @@ class Dnsmasq(DhcpLocalProcess): host_routes.append( '%s/32,%s' % (METADATA_DEFAULT_IP, subnet_dhcp_ip) ) + elif not isolated_subnets[subnet.id] and gateway: + host_routes.append( + '%s/32,%s' % (METADATA_DEFAULT_IP, gateway) + ) if subnet.ip_version == 4: host_routes.extend(["%s,0.0.0.0" % (s.cidr) for s in diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py index 380e8af804a..483680326a4 100644 --- a/neutron/tests/unit/agent/linux/test_dhcp.py +++ b/neutron/tests/unit/agent/linux/test_dhcp.py @@ -1042,8 +1042,9 @@ class TestDnsmasq(TestBase): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' - '0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:tag1,option6:dns-server,%s\n' 'tag:tag1,option6:domain-search,openstacklocal').lstrip() % ( @@ -1054,6 +1055,10 @@ class TestDnsmasq(TestBase): def test_output_opts_file_gateway_route(self): fake_v6 = '2001:0200:feed:7ac0::1' expected = ('tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,' + '192.168.0.1\ntag:tag0,249,169.254.169.254/32,' + '192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:tag1,option6:dns-server,%s\n' 'tag:tag1,option6:domain-search,' @@ -1062,7 +1067,10 @@ class TestDnsmasq(TestBase): self._test_output_opts_file(expected, FakeDualNetworkGatewayRoute()) def test_output_opts_file_multiple_agents_without_dns_provided(self): - expected = ('tag:tag0,option:router,192.168.0.1\n' + expected = ('tag:tag0,option:classless-static-route,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,' + '192.168.0.1\ntag:tag0,option:router,192.168.0.1\n' 'tag:tag0,option:dns-server,192.168.0.5,' '192.168.0.6').lstrip() @@ -1071,6 +1079,10 @@ class TestDnsmasq(TestBase): def test_output_opts_file_multiple_agents_with_dns_provided(self): expected = ('tag:tag0,option:dns-server,8.8.8.8\n' + 'tag:tag0,option:classless-static-route,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,169.254.169.254/32,192.168.0.1,0.0.0.0/0,' + '192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1').lstrip() self._test_output_opts_file(expected, @@ -1080,8 +1092,10 @@ class TestDnsmasq(TestBase): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,' '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,192.168.1.0/24,0.0.0.0,' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,' '0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1').lstrip() @@ -1091,14 +1105,18 @@ class TestDnsmasq(TestBase): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,' '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,192.168.1.0/24,0.0.0.0,' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,' '0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:tag1,option:dns-server,8.8.8.8\n' - 'tag:tag1,option:classless-static-route,192.168.0.0/24,0.0.0.0,' - '0.0.0.0/0,192.168.1.1\n' - 'tag:tag1,249,192.168.0.0/24,0.0.0.0,0.0.0.0/0,192.168.1.1\n' + 'tag:tag1,option:classless-static-route,' + '169.254.169.254/32,192.168.1.1,' + '192.168.0.0/24,0.0.0.0,0.0.0.0/0,192.168.1.1\n' + 'tag:tag1,249,169.254.169.254/32,192.168.1.1,' + '192.168.0.0/24,0.0.0.0,0.0.0.0/0,192.168.1.1\n' 'tag:tag1,option:router,192.168.1.1').lstrip() self._test_output_opts_file(expected, FakeDualNetworkDualDHCP()) @@ -1130,8 +1148,9 @@ class TestDnsmasq(TestBase): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' - '0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1').lstrip() ipm_retval = {FakeV4Subnet.id: '192.168.0.1'} @@ -1142,8 +1161,9 @@ class TestDnsmasq(TestBase): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' - '0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' @@ -1164,8 +1184,9 @@ class TestDnsmasq(TestBase): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' - '0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'option:tftp-server,192.168.0.3\n' @@ -1187,8 +1208,10 @@ class TestDnsmasq(TestBase): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,' '192.168.1.0/24,0.0.0.0,0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,192.168.1.0/24,0.0.0.0,' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,192.168.1.0/24,0.0.0.0,' '0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' @@ -1216,8 +1239,9 @@ class TestDnsmasq(TestBase): expected = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' - '0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee,' 'tag:ipxe,option:bootfile-name,pxelinux.0') @@ -1330,8 +1354,9 @@ class TestDnsmasq(TestBase): exp_opt_data = ( 'tag:tag0,option:dns-server,8.8.8.8\n' 'tag:tag0,option:classless-static-route,20.0.0.1/24,20.0.0.1,' - '0.0.0.0/0,192.168.0.1\n' - 'tag:tag0,249,20.0.0.1/24,20.0.0.1,0.0.0.0/0,192.168.0.1\n' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' + 'tag:tag0,249,20.0.0.1/24,20.0.0.1,' + '169.254.169.254/32,192.168.0.1,0.0.0.0/0,192.168.0.1\n' 'tag:tag0,option:router,192.168.0.1\n' 'tag:tag1,option6:dns-server,%s\n' 'tag:tag1,option6:domain-search,openstacklocal').lstrip() % ( From 490bdabd4c5835bac2314722c498e6dc6e85979d Mon Sep 17 00:00:00 2001 From: Gal Sagie Date: Tue, 2 Jun 2015 08:49:10 +0300 Subject: [PATCH 100/292] Update rootwrap.conf to add /usr/local/bin When working with OVN i found on Fedora 21 that my ovs-vsctl is installed in /usr/local/bin, since this wasnt in rootwrap DHCP didnt work properly. This change adds it to rootwrap Change-Id: Ib3646933744ca6b20ecd5ad0cedcedb4f1fa5f12 --- etc/rootwrap.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/rootwrap.conf b/etc/rootwrap.conf index dee1dd94b1f..f2d9ce4227e 100644 --- a/etc/rootwrap.conf +++ b/etc/rootwrap.conf @@ -10,7 +10,7 @@ filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writeable by root ! -exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin +exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin # Enable logging to syslog # Default value is False From 637b2f42560508c8b131718d9a6920d86d9a6c55 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Tue, 2 Jun 2015 06:15:35 +0000 Subject: [PATCH 101/292] Imported Translations from Transifex For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: Iadf5c59306dd4f8ce574a2d7259bd6a03d60a72a --- .../locale/fr/LC_MESSAGES/neutron-log-info.po | 20 +++- neutron/locale/neutron-log-error.pot | 94 ++++++++++--------- neutron/locale/neutron.pot | 44 +++++---- 3 files changed, 89 insertions(+), 69 deletions(-) diff --git a/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po b/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po index 91f486b41aa..2c22f27a120 100644 --- a/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/fr/LC_MESSAGES/neutron-log-info.po @@ -3,15 +3,15 @@ # This file is distributed under the same license as the neutron project. # # Translators: -# Maxime COQUEREL , 2014 +# Maxime COQUEREL , 2014-2015 # Patte D , 2015 msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-29 06:16+0000\n" -"PO-Revision-Date: 2015-05-28 20:54+0000\n" -"Last-Translator: openstackjenkins \n" +"POT-Creation-Date: 2015-06-02 06:15+0000\n" +"PO-Revision-Date: 2015-06-01 16:41+0000\n" +"Last-Translator: Maxime COQUEREL \n" "Language-Team: French (http://www.transifex.com/projects/p/neutron/language/" "fr/)\n" "Language: fr\n" @@ -33,6 +33,10 @@ msgstr "%(method)s %(url)s" msgid "%(plugin_key)s: %(function_name)s with args %(args)s ignored" msgstr "%(plugin_key)s : %(function_name)s avec les arguments %(args)s ignoré" +#, python-format +msgid "%(prog)s version %(version)s" +msgstr "%(prog)s version %(version)s" + #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "%(url)s a retourné une erreur : %(exception)s." @@ -131,6 +135,10 @@ msgstr "Processus fils %(pid)s terminé avec le status %(code)d" msgid "Child caught %s, exiting" msgstr "L'enfant a reçu %s, sortie" +#, python-format +msgid "Cleaning bridge: %s" +msgstr "Supprimer le pont: %s" + #, python-format msgid "Config paste file: %s" msgstr "Config du fichier de collage : %s" @@ -147,6 +155,10 @@ msgid "Default provider is not specified for service type %s" msgstr "" "Le fournisseur par défaut n'est pas spécifié pour le type de service %s" +#, python-format +msgid "Deleting port: %s" +msgstr "Supprimer le port: %s" + #, python-format msgid "Device %s already exists" msgstr "L'unité %s existe déjà" diff --git a/neutron/locale/neutron-log-error.pot b/neutron/locale/neutron-log-error.pot index 1bf18a27974..a5b1b8ea5a9 100644 --- a/neutron/locale/neutron-log-error.pot +++ b/neutron/locale/neutron-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev485\n" +"Project-Id-Version: neutron 2015.2.0.dev533\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"POT-Creation-Date: 2015-06-02 06:15+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,12 +17,16 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: neutron/manager.py:140 +#: neutron/manager.py:133 +msgid "Error, plugin is not set" +msgstr "" + +#: neutron/manager.py:144 #, python-format msgid "Error loading plugin by name, %s" msgstr "" -#: neutron/manager.py:141 +#: neutron/manager.py:145 #, python-format msgid "Error loading plugin by class, %s" msgstr "" @@ -79,17 +83,17 @@ msgstr "" msgid "Internal error" msgstr "" -#: neutron/agent/common/ovs_lib.py:217 neutron/agent/common/ovs_lib.py:312 +#: neutron/agent/common/ovs_lib.py:218 neutron/agent/common/ovs_lib.py:313 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:238 +#: neutron/agent/common/ovs_lib.py:239 #, python-format msgid "Timed out retrieving ofport on port %(pname)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:531 +#: neutron/agent/common/ovs_lib.py:532 #, python-format msgid "OVS flows could not be applied on bridge %s" msgstr "" @@ -113,7 +117,7 @@ msgstr "" msgid "Network %s info call failed." msgstr "" -#: neutron/agent/dhcp/agent.py:582 neutron/agent/l3/agent.py:614 +#: neutron/agent/dhcp/agent.py:582 neutron/agent/l3/agent.py:616 #: neutron/agent/metadata/agent.py:311 #: neutron/plugins/hyperv/agent/l2_agent.py:94 #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:108 @@ -124,50 +128,50 @@ msgstr "" msgid "Failed reporting state!" msgstr "" -#: neutron/agent/l3/agent.py:172 neutron/tests/unit/agent/l3/test_agent.py:2130 +#: neutron/agent/l3/agent.py:172 neutron/tests/unit/agent/l3/test_agent.py:2150 #, python-format msgid "Error importing interface driver '%s'" msgstr "" -#: neutron/agent/l3/agent.py:232 neutron/agent/linux/dhcp.py:875 +#: neutron/agent/l3/agent.py:234 neutron/agent/linux/dhcp.py:875 msgid "An interface driver must be specified" msgstr "" -#: neutron/agent/l3/agent.py:237 +#: neutron/agent/l3/agent.py:239 msgid "Router id is required if not using namespaces." msgstr "" -#: neutron/agent/l3/agent.py:244 +#: neutron/agent/l3/agent.py:246 #, python-format msgid "%s used in config as ipv6_gateway is not a valid IPv6 link-local address." msgstr "" -#: neutron/agent/l3/agent.py:326 +#: neutron/agent/l3/agent.py:328 #, python-format msgid "Error while deleting router %s" msgstr "" -#: neutron/agent/l3/agent.py:390 +#: neutron/agent/l3/agent.py:392 #, python-format msgid "The external network bridge '%s' does not exist" msgstr "" -#: neutron/agent/l3/agent.py:444 +#: neutron/agent/l3/agent.py:446 #, python-format msgid "Failed to fetch router information for '%s'" msgstr "" -#: neutron/agent/l3/agent.py:467 +#: neutron/agent/l3/agent.py:469 #, python-format msgid "Removing incompatible router '%s'" msgstr "" -#: neutron/agent/l3/agent.py:471 +#: neutron/agent/l3/agent.py:473 #, python-format msgid "Failed to process compatible router '%s'" msgstr "" -#: neutron/agent/l3/agent.py:523 +#: neutron/agent/l3/agent.py:525 msgid "Failed synchronizing routers due to RPC error" msgstr "" @@ -206,12 +210,12 @@ msgstr "" msgid "Failed to process or handle event for line %s" msgstr "" -#: neutron/agent/l3/namespace_manager.py:90 +#: neutron/agent/l3/namespace_manager.py:101 #, python-format msgid "Failed to destroy stale namespace %s" msgstr "" -#: neutron/agent/l3/namespace_manager.py:120 +#: neutron/agent/l3/namespace_manager.py:131 msgid "RuntimeError in obtaining namespace list for namespace cleanup." msgstr "" @@ -722,7 +726,7 @@ msgid "" msgstr "" #: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:255 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1656 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1658 #, python-format msgid "%s Agent terminated!" msgstr "" @@ -778,7 +782,7 @@ msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" msgstr "" #: neutron/plugins/ml2/db.py:241 neutron/plugins/ml2/db.py:325 -#: neutron/plugins/ml2/plugin.py:1340 +#: neutron/plugins/ml2/plugin.py:1341 #, python-format msgid "Multiple ports have port_id starting with %s" msgstr "" @@ -912,22 +916,22 @@ msgstr "" msgid "_bind_port_if_needed failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1054 +#: neutron/plugins/ml2/plugin.py:1055 #, python-format msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1199 +#: neutron/plugins/ml2/plugin.py:1200 #, python-format msgid "No Host supplied to bind DVR Port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1321 +#: neutron/plugins/ml2/plugin.py:1322 #, python-format msgid "mechanism_manager.delete_port_postcommit failed for port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:1353 +#: neutron/plugins/ml2/plugin.py:1354 #, python-format msgid "Binding info for DVR port %s not found" msgstr "" @@ -1094,106 +1098,106 @@ msgid "" "a different subnet %(orig_subnet)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:382 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:384 msgid "No tunnel_type specified, cannot create tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:385 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:408 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:387 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:410 #, python-format msgid "tunnel_type %s not supported by agent" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:401 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:403 msgid "No tunnel_ip specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:405 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:407 msgid "No tunnel_type specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:551 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:553 #, python-format msgid "No local VLAN available for net-id=%s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:582 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:584 #, python-format msgid "" "Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " "tunneling disabled" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:590 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:592 #, python-format msgid "" "Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:600 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:602 #, python-format msgid "" "Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:609 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:611 #, python-format msgid "" "Cannot provision unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:669 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:671 #, python-format msgid "" "Cannot reclaim unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:866 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:868 msgid "" "Failed to create OVS patch port. Cannot have tunneling enabled on this " "agent, since this version of OVS does not support tunnels or patch ports." " Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:925 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:927 #, python-format msgid "" "Bridge %(bridge)s for physical network %(physical_network)s does not " "exist. Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1119 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1121 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1308 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1310 #, python-format msgid "" "process_network_ports - iteration:%d - failure while retrieving port " "details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1344 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1346 #, python-format msgid "" "process_ancillary_network_ports - iteration:%d - failure while retrieving" " port details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1486 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1488 msgid "Error while synchronizing tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1561 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1563 msgid "Error while processing VIF ports" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1650 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1652 msgid "Agent failed to create agent config map" msgstr "" diff --git a/neutron/locale/neutron.pot b/neutron/locale/neutron.pot index deaa73bb339..abd550bbcf3 100644 --- a/neutron/locale/neutron.pot +++ b/neutron/locale/neutron.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev485\n" +"Project-Id-Version: neutron 2015.2.0.dev533\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"POT-Creation-Date: 2015-06-02 06:15+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -31,11 +31,11 @@ msgstr "" msgid "Neutron core_plugin not configured!" msgstr "" -#: neutron/manager.py:142 +#: neutron/manager.py:134 neutron/manager.py:146 msgid "Plugin not found." msgstr "" -#: neutron/manager.py:181 +#: neutron/manager.py:189 #, python-format msgid "Multiple plugins for service %s were configured" msgstr "" @@ -275,16 +275,16 @@ msgstr "" msgid "Timeout in seconds for ovs-vsctl commands" msgstr "" -#: neutron/agent/common/ovs_lib.py:439 +#: neutron/agent/common/ovs_lib.py:440 #, python-format msgid "Unable to determine mac address for %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:547 +#: neutron/agent/common/ovs_lib.py:548 msgid "Cannot match priority on flow deletion or modification" msgstr "" -#: neutron/agent/common/ovs_lib.py:552 +#: neutron/agent/common/ovs_lib.py:553 msgid "Must specify one or more actions on flow addition or modification" msgstr "" @@ -343,7 +343,7 @@ msgstr "" msgid "Use broadcast in DHCP replies" msgstr "" -#: neutron/agent/l3/agent.py:276 +#: neutron/agent/l3/agent.py:278 msgid "" "The 'gateway_external_network_id' option must be configured for this " "agent as Neutron has more than one external network." @@ -1343,29 +1343,33 @@ msgid "" msgstr "" #: neutron/common/config.py:135 +msgid "IPAM driver to use." +msgstr "" + +#: neutron/common/config.py:137 msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" -#: neutron/common/config.py:142 +#: neutron/common/config.py:144 msgid "" "Where to store Neutron state files. This directory must be writable by " "the agent." msgstr "" -#: neutron/common/config.py:174 +#: neutron/common/config.py:176 msgid "" "Name of nova region to use. Useful if keystone manages more than one " "region." msgstr "" -#: neutron/common/config.py:196 +#: neutron/common/config.py:198 #, python-format msgid "Base MAC: %s" msgstr "" -#: neutron/common/config.py:229 +#: neutron/common/config.py:231 #, python-format msgid "Unable to load %(app_name)s from configuration file %(config_path)s." msgstr "" @@ -4691,23 +4695,23 @@ msgid "" "error: %(error)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1592 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1594 msgid "" "DVR deployments for VXLAN/GRE underlays require L2-pop to be enabled, in " "both the Agent and Server side." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1606 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1608 #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1628 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1630 #, python-format msgid "Invalid tunnel type specified: %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1631 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1633 msgid "Tunneling cannot be enabled without a valid local_ip." msgstr "" @@ -5140,12 +5144,12 @@ msgid "" "operation." msgstr "" -#: neutron/tests/unit/plugins/ml2/test_plugin.py:424 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:423 #, python-format msgid "Deleting port %s" msgstr "" -#: neutron/tests/unit/plugins/ml2/test_plugin.py:425 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:424 #, python-format msgid "The port '%s' was deleted" msgstr "" @@ -5179,8 +5183,8 @@ msgstr "" msgid "Adds test attributes to core resources." msgstr "" -#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:870 -#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:887 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:881 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:898 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" From 46223363bd4d41639102ae1923dd1dfb306ec808 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Sat, 30 May 2015 20:41:29 +0200 Subject: [PATCH 102/292] Refactor type_tunnel/gre/vxlan to reduce duplicate code gre and vxlan type drivers have similar implementations for multiple methods: * get_endpoint_by_host * get_endpoint_by_ip * delete_endpoint * get_endpoints * add_endpoint This change abstracts these methods and moves the abstractions to the new class EndpointTunnelTypeDriver. Change-Id: Iab97f8283b6bf5586334958de950664f6e74202a --- neutron/plugins/ml2/drivers/type_gre.py | 41 ++--------------- neutron/plugins/ml2/drivers/type_tunnel.py | 46 +++++++++++++++++++ neutron/plugins/ml2/drivers/type_vxlan.py | 46 +++---------------- .../plugins/ml2/drivers/base_type_tunnel.py | 5 +- 4 files changed, 59 insertions(+), 79 deletions(-) diff --git a/neutron/plugins/ml2/drivers/type_gre.py b/neutron/plugins/ml2/drivers/type_gre.py index 134348b0697..18d7040f79a 100644 --- a/neutron/plugins/ml2/drivers/type_gre.py +++ b/neutron/plugins/ml2/drivers/type_gre.py @@ -66,10 +66,11 @@ class GreEndpoints(model_base.BASEV2): return "" % self.ip_address -class GreTypeDriver(type_tunnel.TunnelTypeDriver): +class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): - super(GreTypeDriver, self).__init__(GreAllocation) + super(GreTypeDriver, self).__init__( + GreAllocation, GreEndpoints) def get_type(self): return p_const.TYPE_GRE @@ -127,45 +128,13 @@ class GreTypeDriver(type_tunnel.TunnelTypeDriver): def get_endpoints(self): """Get every gre endpoints from database.""" - - LOG.debug("get_gre_endpoints() called") - session = db_api.get_session() - - gre_endpoints = session.query(GreEndpoints) + gre_endpoints = self._get_endpoints() return [{'ip_address': gre_endpoint.ip_address, 'host': gre_endpoint.host} for gre_endpoint in gre_endpoints] - def get_endpoint_by_host(self, host): - LOG.debug("get_endpoint_by_host() called for host %s", host) - session = db_api.get_session() - return (session.query(GreEndpoints). - filter_by(host=host).first()) - - def get_endpoint_by_ip(self, ip): - LOG.debug("get_endpoint_by_ip() called for ip %s", ip) - session = db_api.get_session() - return (session.query(GreEndpoints). - filter_by(ip_address=ip).first()) - def add_endpoint(self, ip, host): - LOG.debug("add_gre_endpoint() called for ip %s", ip) - session = db_api.get_session() - try: - gre_endpoint = GreEndpoints(ip_address=ip, host=host) - gre_endpoint.save(session) - except db_exc.DBDuplicateEntry: - gre_endpoint = (session.query(GreEndpoints). - filter_by(ip_address=ip).one()) - LOG.warning(_LW("Gre endpoint with ip %s already exists"), ip) - return gre_endpoint - - def delete_endpoint(self, ip): - LOG.debug("delete_gre_endpoint() called for ip %s", ip) - session = db_api.get_session() - - with session.begin(subtransactions=True): - session.query(GreEndpoints).filter_by(ip_address=ip).delete() + return self._add_endpoint(ip, host) def get_mtu(self, physical_network=None): mtu = super(GreTypeDriver, self).get_mtu(physical_network) diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py index 68ffc3d3b06..12dce86f48f 100644 --- a/neutron/plugins/ml2/drivers/type_tunnel.py +++ b/neutron/plugins/ml2/drivers/type_tunnel.py @@ -15,10 +15,12 @@ import abc from oslo_config import cfg +from oslo_db import exception as db_exc from oslo_log import log from neutron.common import exceptions as exc from neutron.common import topics +from neutron.db import api as db_api from neutron.i18n import _LI, _LW from neutron.plugins.common import utils as plugin_utils from neutron.plugins.ml2 import driver_api as api @@ -196,6 +198,50 @@ class TunnelTypeDriver(helpers.SegmentTypeDriver): return min(mtu) if mtu else 0 +class EndpointTunnelTypeDriver(TunnelTypeDriver): + + def __init__(self, segment_model, endpoint_model): + super(EndpointTunnelTypeDriver, self).__init__(segment_model) + self.endpoint_model = endpoint_model + self.segmentation_key = iter(self.primary_keys).next() + + def get_endpoint_by_host(self, host): + LOG.debug("get_endpoint_by_host() called for host %s", host) + session = db_api.get_session() + return (session.query(self.endpoint_model). + filter_by(host=host).first()) + + def get_endpoint_by_ip(self, ip): + LOG.debug("get_endpoint_by_ip() called for ip %s", ip) + session = db_api.get_session() + return (session.query(self.endpoint_model). + filter_by(ip_address=ip).first()) + + def delete_endpoint(self, ip): + LOG.debug("delete_endpoint() called for ip %s", ip) + session = db_api.get_session() + with session.begin(subtransactions=True): + (session.query(self.endpoint_model). + filter_by(ip_address=ip).delete()) + + def _get_endpoints(self): + LOG.debug("_get_endpoints() called") + session = db_api.get_session() + return session.query(self.endpoint_model) + + def _add_endpoint(self, ip, host, **kwargs): + LOG.debug("_add_endpoint() called for ip %s", ip) + session = db_api.get_session() + try: + endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs) + endpoint.save(session) + except db_exc.DBDuplicateEntry: + endpoint = (session.query(self.endpoint_model). + filter_by(ip_address=ip).one()) + LOG.warning(_LW("Endpoint with ip %s already exists"), ip) + return endpoint + + class TunnelRpcCallbackMixin(object): def setup_tunnel_callback_mixin(self, notifier, type_manager): diff --git a/neutron/plugins/ml2/drivers/type_vxlan.py b/neutron/plugins/ml2/drivers/type_vxlan.py index 51125701c22..b8cdb003c33 100644 --- a/neutron/plugins/ml2/drivers/type_vxlan.py +++ b/neutron/plugins/ml2/drivers/type_vxlan.py @@ -14,7 +14,6 @@ # under the License. from oslo_config import cfg -from oslo_db import exception as db_exc from oslo_log import log from six import moves import sqlalchemy as sa @@ -23,7 +22,7 @@ from sqlalchemy import sql from neutron.common import exceptions as n_exc from neutron.db import api as db_api from neutron.db import model_base -from neutron.i18n import _LE, _LW +from neutron.i18n import _LE from neutron.plugins.common import constants as p_const from neutron.plugins.ml2.drivers import type_tunnel @@ -70,10 +69,11 @@ class VxlanEndpoints(model_base.BASEV2): return "" % self.ip_address -class VxlanTypeDriver(type_tunnel.TunnelTypeDriver): +class VxlanTypeDriver(type_tunnel.EndpointTunnelTypeDriver): def __init__(self): - super(VxlanTypeDriver, self).__init__(VxlanAllocation) + super(VxlanTypeDriver, self).__init__( + VxlanAllocation, VxlanEndpoints) def get_type(self): return p_const.TYPE_VXLAN @@ -132,48 +132,14 @@ class VxlanTypeDriver(type_tunnel.TunnelTypeDriver): def get_endpoints(self): """Get every vxlan endpoints from database.""" - - LOG.debug("get_vxlan_endpoints() called") - session = db_api.get_session() - - vxlan_endpoints = session.query(VxlanEndpoints) + vxlan_endpoints = self._get_endpoints() return [{'ip_address': vxlan_endpoint.ip_address, 'udp_port': vxlan_endpoint.udp_port, 'host': vxlan_endpoint.host} for vxlan_endpoint in vxlan_endpoints] - def get_endpoint_by_host(self, host): - LOG.debug("get_endpoint_by_host() called for host %s", host) - session = db_api.get_session() - return (session.query(VxlanEndpoints). - filter_by(host=host).first()) - - def get_endpoint_by_ip(self, ip): - LOG.debug("get_endpoint_by_ip() called for ip %s", ip) - session = db_api.get_session() - return (session.query(VxlanEndpoints). - filter_by(ip_address=ip).first()) - def add_endpoint(self, ip, host, udp_port=p_const.VXLAN_UDP_PORT): - LOG.debug("add_vxlan_endpoint() called for ip %s", ip) - session = db_api.get_session() - try: - vxlan_endpoint = VxlanEndpoints(ip_address=ip, - udp_port=udp_port, - host=host) - vxlan_endpoint.save(session) - except db_exc.DBDuplicateEntry: - vxlan_endpoint = (session.query(VxlanEndpoints). - filter_by(ip_address=ip).one()) - LOG.warning(_LW("Vxlan endpoint with ip %s already exists"), ip) - return vxlan_endpoint - - def delete_endpoint(self, ip): - LOG.debug("delete_vxlan_endpoint() called for ip %s", ip) - session = db_api.get_session() - - with session.begin(subtransactions=True): - session.query(VxlanEndpoints).filter_by(ip_address=ip).delete() + return self._add_endpoint(ip, host, udp_port=udp_port) def get_mtu(self, physical_network=None): mtu = super(VxlanTypeDriver, self).get_mtu() diff --git a/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py b/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py index 41431e0c898..725fdaab18e 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py +++ b/neutron/tests/unit/plugins/ml2/drivers/base_type_tunnel.py @@ -21,6 +21,7 @@ from testtools import matchers from neutron.common import exceptions as exc from neutron.db import api as db from neutron.plugins.ml2 import driver_api as api +from neutron.plugins.ml2.drivers import type_tunnel TUNNEL_IP_ONE = "10.10.10.10" TUNNEL_IP_TWO = "10.10.10.20" @@ -33,7 +34,6 @@ UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)] class TunnelTypeTestMixin(object): - DRIVER_MODULE = None DRIVER_CLASS = None TYPE = None @@ -208,8 +208,7 @@ class TunnelTypeTestMixin(object): def test_add_endpoint_for_existing_tunnel_ip(self): self.add_endpoint() - log = getattr(self.DRIVER_MODULE, 'LOG') - with mock.patch.object(log, 'warning') as log_warn: + with mock.patch.object(type_tunnel.LOG, 'warning') as log_warn: self.add_endpoint() log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE) From b48bccc60e4fd552d7b127376f41810b61e0ba9d Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Mon, 1 Jun 2015 17:05:56 -0400 Subject: [PATCH 103/292] Remove get_dhcp_port RPC method This method was last used in Icehouse. I think we can safely remove all of its code and tests. Icehouse to Liberty rolling upgrades are in no way expected to work so I just bumped the RPC version and removed all traces of the code. Change-Id: Ia545c5be7da80c919ad5bae6074bc1cc3aa89fa0 Closes-Bug: #1314534 --- neutron/agent/dhcp/agent.py | 9 -- neutron/api/rpc/handlers/dhcp_rpc.py | 90 ++----------------- neutron/tests/unit/agent/dhcp/test_agent.py | 11 --- .../unit/api/rpc/handlers/test_dhcp_rpc.py | 70 --------------- 4 files changed, 7 insertions(+), 173 deletions(-) diff --git a/neutron/agent/dhcp/agent.py b/neutron/agent/dhcp/agent.py index 89fd0773ed5..f6513d981c9 100644 --- a/neutron/agent/dhcp/agent.py +++ b/neutron/agent/dhcp/agent.py @@ -422,15 +422,6 @@ class DhcpPluginApi(object): if network: return dhcp.NetModel(self.use_namespaces, network) - def get_dhcp_port(self, network_id, device_id): - """Make a remote process call to get the dhcp port.""" - cctxt = self.client.prepare() - port = cctxt.call(self.context, 'get_dhcp_port', - network_id=network_id, device_id=device_id, - host=self.host) - if port: - return dhcp.DictModel(port) - def create_dhcp_port(self, port): """Make a remote process call to create the dhcp port.""" cctxt = self.client.prepare(version='1.1') diff --git a/neutron/api/rpc/handlers/dhcp_rpc.py b/neutron/api/rpc/handlers/dhcp_rpc.py index 4e17e39dda0..7d97b7c5226 100644 --- a/neutron/api/rpc/handlers/dhcp_rpc.py +++ b/neutron/api/rpc/handlers/dhcp_rpc.py @@ -47,9 +47,15 @@ class DhcpRpcCallback(object): # 1.0 - Initial version. # 1.1 - Added get_active_networks_info, create_dhcp_port, # and update_dhcp_port methods. + # 1.2 - Removed get_dhcp_port. When removing a method (Making a + # backwards incompatible change) you would normally bump the + # major version. However, since the method was unused in the + # RPC client for many releases, it should be OK to bump the + # minor release instead and claim RPC compatibility with the + # last few client versions. target = oslo_messaging.Target( namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN, - version='1.1') + version='1.2') def _get_active_networks(self, context, **kwargs): """Retrieve and return a list of the active networks.""" @@ -151,88 +157,6 @@ class DhcpRpcCallback(object): network['ports'] = plugin.get_ports(context, filters=filters) return network - def get_dhcp_port(self, context, **kwargs): - """Allocate a DHCP port for the host and return port information. - - This method will re-use an existing port if one already exists. When a - port is re-used, the fixed_ip allocation will be updated to the current - network state. If an expected failure occurs, a None port is returned. - - """ - host = kwargs.get('host') - network_id = kwargs.get('network_id') - device_id = kwargs.get('device_id') - # There could be more than one dhcp server per network, so create - # a device id that combines host and network ids - - LOG.debug('Port %(device_id)s for %(network_id)s requested from ' - '%(host)s', {'device_id': device_id, - 'network_id': network_id, - 'host': host}) - plugin = manager.NeutronManager.get_plugin() - retval = None - - filters = dict(network_id=[network_id]) - subnets = dict([(s['id'], s) for s in - plugin.get_subnets(context, filters=filters)]) - - dhcp_enabled_subnet_ids = [s['id'] for s in - subnets.values() if s['enable_dhcp']] - - try: - filters = dict(network_id=[network_id], device_id=[device_id]) - ports = plugin.get_ports(context, filters=filters) - if ports: - # Ensure that fixed_ips cover all dhcp_enabled subnets. - port = ports[0] - for fixed_ip in port['fixed_ips']: - if fixed_ip['subnet_id'] in dhcp_enabled_subnet_ids: - dhcp_enabled_subnet_ids.remove(fixed_ip['subnet_id']) - port['fixed_ips'].extend( - [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) - - retval = plugin.update_port(context, port['id'], - dict(port=port)) - - except n_exc.NotFound as e: - LOG.warning(e) - - if retval is None: - # No previous port exists, so create a new one. - LOG.debug('DHCP port %(device_id)s on network %(network_id)s ' - 'does not exist on %(host)s', - {'device_id': device_id, - 'network_id': network_id, - 'host': host}) - try: - network = plugin.get_network(context, network_id) - except n_exc.NetworkNotFound: - LOG.warn(_LW("Network %s could not be found, it might have " - "been deleted concurrently."), network_id) - return - - port_dict = dict( - admin_state_up=True, - device_id=device_id, - network_id=network_id, - tenant_id=network['tenant_id'], - mac_address=attributes.ATTR_NOT_SPECIFIED, - name='', - device_owner=constants.DEVICE_OWNER_DHCP, - fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) - - retval = self._port_action(plugin, context, {'port': port_dict}, - 'create_port') - if not retval: - return - - # Convert subnet_id to subnet dict - for fixed_ip in retval['fixed_ips']: - subnet_id = fixed_ip.pop('subnet_id') - fixed_ip['subnet'] = subnets[subnet_id] - - return retval - def release_dhcp_port(self, context, **kwargs): """Release the port currently being used by a DHCP agent.""" host = kwargs.get('host') diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index f49a3d73afc..e41c3094fa0 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -993,10 +993,6 @@ class TestDhcpPluginApiProxy(base.BaseTestCase): self._test_dhcp_api('get_network_info', network_id='fake_id', return_value=None) - def test_get_dhcp_port(self): - self._test_dhcp_api('get_dhcp_port', network_id='fake_id', - device_id='fake_id_2', return_value=None) - def test_create_dhcp_port(self): self._test_dhcp_api('create_dhcp_port', port='fake_port', return_value=None, version='1.1') @@ -1203,7 +1199,6 @@ class TestDeviceManager(base.BaseTestCase): port = port or fake_port1 plugin = mock.Mock() plugin.create_dhcp_port.return_value = port or fake_port1 - plugin.get_dhcp_port.return_value = port or fake_port1 self.ensure_device_is_ready.return_value = device_is_ready self.mock_driver.get_device_name.return_value = 'tap12345678-12' @@ -1328,17 +1323,12 @@ class TestDeviceManager(base.BaseTestCase): True, dict(id=FAKE_NETWORK_UUID, tenant_id='aaaaaaaa-aaaa-aaaa-aaaaaaaaaaaa')) - fake_port = dhcp.DictModel( - dict(id='12345678-1234-aaaa-1234567890ab', - mac_address='aa:bb:cc:dd:ee:ff')) - with mock.patch('neutron.agent.linux.interface.NullDriver') as dvr_cls: mock_driver = mock.MagicMock() mock_driver.get_device_name.return_value = 'tap12345678-12' dvr_cls.return_value = mock_driver plugin = mock.Mock() - plugin.get_dhcp_port.return_value = fake_port dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.destroy(fake_net, 'tap12345678-12') @@ -1365,7 +1355,6 @@ class TestDeviceManager(base.BaseTestCase): dvr_cls.return_value = mock_driver plugin = mock.Mock() - plugin.get_dhcp_port.return_value = fake_port dh = dhcp.DeviceManager(cfg.CONF, plugin) dh.get_interface_name(fake_net, fake_port) diff --git a/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py b/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py index 758cfd11fde..c17c57e27aa 100644 --- a/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py +++ b/neutron/tests/unit/api/rpc/handlers/test_dhcp_rpc.py @@ -152,37 +152,6 @@ class TestDhcpRpcCallback(base.BaseTestCase): self.assertEqual(retval['subnets'], subnet_retval) self.assertEqual(retval['ports'], port_retval) - def _test_get_dhcp_port_helper(self, port_retval, other_expectations=[], - update_port=None, create_port=None): - subnets_retval = [dict(id='a', enable_dhcp=True), - dict(id='b', enable_dhcp=False)] - - self.plugin.get_subnets.return_value = subnets_retval - if port_retval: - self.plugin.get_ports.return_value = [port_retval] - else: - self.plugin.get_ports.return_value = [] - if isinstance(update_port, n_exc.NotFound): - self.plugin.update_port.side_effect = update_port - else: - self.plugin.update_port.return_value = update_port - self.plugin.create_port.return_value = create_port - - retval = self.callbacks.get_dhcp_port(mock.Mock(), - network_id='netid', - device_id='devid', - host='host') - - expected = [mock.call.get_subnets(mock.ANY, - filters=dict(network_id=['netid'])), - mock.call.get_ports(mock.ANY, - filters=dict(network_id=['netid'], - device_id=['devid']))] - - expected.extend(other_expectations) - self.plugin.assert_has_calls(expected) - return retval - def test_update_dhcp_port_verify_port_action_port_dict(self): port = {'port': {'network_id': 'foo_network_id', 'device_owner': constants.DEVICE_OWNER_DHCP, @@ -222,45 +191,6 @@ class TestDhcpRpcCallback(base.BaseTestCase): self.plugin.assert_has_calls( mock.call.update_port(mock.ANY, 'foo_port_id', expected_port)) - def test_get_dhcp_port_existing(self): - port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')]) - expectations = [ - mock.call.update_port(mock.ANY, 'port_id', dict(port=port_retval))] - - self._test_get_dhcp_port_helper(port_retval, expectations, - update_port=port_retval) - self.assertEqual(len(self.log.mock_calls), 1) - - def _test_get_dhcp_port_create_new(self, update_port=None): - self.plugin.get_network.return_value = dict(tenant_id='tenantid') - create_spec = dict(tenant_id='tenantid', device_id='devid', - network_id='netid', name='', - admin_state_up=True, - device_owner=constants.DEVICE_OWNER_DHCP, - mac_address=mock.ANY) - create_retval = create_spec.copy() - create_retval['id'] = 'port_id' - create_retval['fixed_ips'] = [dict(subnet_id='a', enable_dhcp=True)] - - create_spec['fixed_ips'] = [dict(subnet_id='a')] - - expectations = [ - mock.call.get_network(mock.ANY, 'netid'), - mock.call.create_port(mock.ANY, dict(port=create_spec))] - - retval = self._test_get_dhcp_port_helper(None, expectations, - update_port=update_port, - create_port=create_retval) - self.assertEqual(create_retval, retval) - self.assertEqual(len(self.log.mock_calls), 2) - - def test_get_dhcp_port_create_new(self): - self._test_get_dhcp_port_create_new() - - def test_get_dhcp_port_create_new_with_failure_on_port_update(self): - self._test_get_dhcp_port_create_new( - update_port=n_exc.PortNotFound(port_id='foo')) - def test_release_dhcp_port(self): port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')]) self.plugin.get_ports.return_value = [port_retval] From 805d4b148ae4b32657ed6a83e9a530b8b2c806ac Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Tue, 2 Jun 2015 12:21:11 -0400 Subject: [PATCH 104/292] Add devref that explains fullstack testing and its direction The goal of this doc is to communicate what are full stack tests, how they benefit you and when would you write such a test. Additionally I'd like to communicate the way forward, and gather feedback about any areas in the code that can benefit from full stack tests, and any additional thoughts! Change-Id: Ifd4ff9be0ed0184a49df6566d238c31a328cd23f --- doc/source/devref/fullstack_testing.rst | 93 ++++++++++++++++++ .../images/fullstack-multinode-simulation.png | Bin 0 -> 29718 bytes doc/source/devref/index.rst | 9 +- neutron/tests/fullstack/README | 1 + 4 files changed, 102 insertions(+), 1 deletion(-) create mode 100644 doc/source/devref/fullstack_testing.rst create mode 100644 doc/source/devref/images/fullstack-multinode-simulation.png create mode 100644 neutron/tests/fullstack/README diff --git a/doc/source/devref/fullstack_testing.rst b/doc/source/devref/fullstack_testing.rst new file mode 100644 index 00000000000..b761e9bf401 --- /dev/null +++ b/doc/source/devref/fullstack_testing.rst @@ -0,0 +1,93 @@ +========================== +Neutron Full Stack Testing +========================== + +Why? +==== + +The idea behind "fullstack" testing is to fill a gap between unit + functional +tests and Tempest. Tempest tests are expensive to run, difficult to run in +a multi node environment, and are often very high level and provide little +indication to what is wrong, only that something is wrong. Developers further +benefit from full stack testing as it can sufficiently simulate a real +environment and provide a rapidly reproducible way to verify code as you're +still writing it. + +How? +==== + +Full stack tests set up their own Neutron processes (Server & agents). They +assume a working Rabbit and MySQL server before the run starts. Instructions +on how to run fullstack tests on a VM are available at TESTING.rst: +http://git.openstack.org/cgit/openstack/neutron/tree/TESTING.rst + +Each test defines its own topology (What and how many servers and agents should +be running). + +Since the test runs on the machine itself, full stack testing enables +"white box" testing. This means that you can, for example, create a router +through the API and then assert that a namespace was created for it. + +Full stack tests run in the Neutron tree with Neutron resources alone. You +may use the Neutron API (Keystone is set to NOAUTH so that it's out of the +picture). VMs may be simulated with a helper class that contains a container- +like object in its own namespace and IP address. It has helper methods to send +different kinds of traffic. The "VM" may be connected to br-int or br-ex, +to simulate internal or external traffic. + +Full stack testing can simulate multi node testing by starting an agent +multiple times. Specifically, each node would have its own copy of the +OVS/DHCP/L3 agents, all configured with the same "host" value. Each OVS agent +is connected to its own pair of br-int/br-ex, and those bridges are then +interconnected. + +.. image:: images/fullstack-multinode-simulation.png + +When? +===== + +1) You'd like to test the interaction between Neutron components (Server + and agents) and have already tested each component in isolation via unit or + functional tests. You should have many unit tests, fewer tests to test + a component and even fewer to test their interaction. Edge cases should + not be tested with full stack testing. +2) You'd like to increase coverage by testing features that require multi node + testing such as l2pop, L3 HA and DVR. +3) You'd like to test agent restarts. We've found bugs in the OVS, DHCP and + L3 agents and haven't found an effective way to test these scenarios. Full + stack testing can help here as the full stack infrastructure can restart an + agent during the test. + +Short Term Goals +================ + +* Multinode & Stability: + - Interconnect the internal and external bridges + - Convert the L3 HA failover functional test to a full stack test + - Write a test for DHCP HA / Multiple DHCP agents per network +* Write DVR tests +* Write L3 HA tests +* Write a test that validates L3 HA + l2pop integration after + https://bugs.launchpad.net/neutron/+bug/1365476 is fixed. +* Write a test that validates DVR + L3 HA integration after + https://bugs.launchpad.net/neutron/+bug/1365473 is fixed. + +None of these tasks currently have owners. Feel free to send patches! + +After these tests are merged, it should be fair to start asking contributors to +add full stack tests when appropriate in the patches themselves and not after +the fact as there will probably be something to copy/paste from. + +Long Term Goals +=============== + +* Currently we configure the OVS agent with VLANs segmentation (Only because + it's easier). This allows us to validate most functionality, but we might + need to support tunneling somehow. +* How do advanced services use the full stack testing infrastructure? I'd + assume we treat all of the infrastructure classes as a publicly consumed + API and have the XaaS repos import and use them. +* Currently we configure the Neutron server with the ML2 plugin and the OVS + mechanism driver. We may modularize the topology configuration further to + allow to rerun full stack tests against different Neutron plugins or ML2 + mechanism drivers. diff --git a/doc/source/devref/images/fullstack-multinode-simulation.png b/doc/source/devref/images/fullstack-multinode-simulation.png new file mode 100644 index 0000000000000000000000000000000000000000..c124e4311e3dc05194905b533b9798b453516013 GIT binary patch literal 29718 zcmcG$2UL^Wwl*wP5D*c>0D=_hAfZd>(xmsQG()c{1f&;fLICLz!+@x!Uv0`K-LvP*WheL4D)ewQD4bPh_>O zUBiX~|C;72>=HF@Bq0rFphV}CEpXpw{3zJ^UDMTw9)rT|olGbID9uw6Qo-(ly z&IunAFa+JQdpmy(4*YyffCa_;{kR=;^*J^Y_=q9MpBM9p0r$TYAppr3m&C-xETNK2 zkziu`RN6-`{E>G71BzW;T|G4O+xWy?9}0}r)k&p701J8QK4@**u1Aj^C122Vff2(K z3kz9wqT%l_cFi_mpj=H^g5h8EX2cBCazVE?Zvu_T0X|}wp#=)a=7!zMnUe-86$mF_ zkZpPcShS%adt4(5G&xM3j3LOz{T4XnRwU!&Ft_UrX2ip6j2I(84=_dmyMPglUBC#? zIgF7RTA*VXyBH%N8H^FYE?@*>7cc_!1Y-oS3m5_H{%Kq`zEJq-;g1yk^vZwVd+65|#=j0$6pg-*fvk7IH*T#uKqq}wX*PumDc4T(%nh^tiz=Ap! zTixWw{$m;_48{liO?>s*=1nN8dZx<3uer31nOaVf*V|^Q{5T23ZC;1JEG#S@i1?hi zYNKV3d6gXWbE;xz1)D18%vCIP4ErWv+~9%}$rs6mhKT<;?|t_I1z2rFgy7QM%V_J$9EZ z&|}59z1(Sn_IfiOAtNGP(18N=EE8?x<$#P9>lHom0bo@6Yjr0*>&&mOuRpSam=;01 zqnP`Wc<%3W^`D%a#5&H_<-_jA4XzAvhiTvb=vQ#!O~A$BwIQQ2{`%hM_g#Tz=7 zX5b2^NrWMPiPO2yC&WY%2N<&qs0pBGt}#WF;CD~S1N-?mfsoZ(%NDg{vYroqK2-FP=Jj0~xLV1lv-5#12hHnof^X6`Q21-dkXOoVc z=R9fdC3~}e3wc)krHcD$-BM?=*+cyP`MrC0c>BVi_ee(cX1fHL(a5WB&hMynCocfg z0&qBvdA|y6Nr={1%ol<5AAaMZ?`{PK$_egK!NRbaVO54?$-g_3<+xlUv$L~nerhS? z-SnDo_Q>r;<*HJn$zE_htwX$m=P-MgspRG8fyh;g`qBQA<;H0y%H1Vm;Z7D<2zmp{ z*$?JjMMdwq`PM^8@Pz0NTuo@I#$CSd?!$5+U*7OWJnRwPNa1)4oz!XZyg%FkBPn&B zORKPM*~%CE;Eij)*cqHlIXHg)`n%rwXGSf2uH`ozxrwl^U-Qrgts4x_RtTA3bkCc4 zI2)P6#!gS_Lvy!%RXkf;Z{k(xSvQtn>z65~7pElKeMd77OvMKog4}23WAYhop%q5@xjZ<~pX}~rQi9k>uG={`1zAX+a_S+z z;4L6lL!y^0NJ!U@2@v98RvpO>FJ6FmQ*OLIs}2`;_L-P)i923Ryv}_tO7u-I$hLcp zuG*~s`?GiYI#JuF3YGbloiLWMfK>L)JA<~1mG6c_u+ZF!NB zfXe2Tcz15O+#NG6p6PEm&JF}453cREJ83{Q#X3|ow+J9&HfWP-GZoDdF54sC(wj1% zzBJq)q4Zr_QP=y$F5`uqz9kb53AvU+IG)xA>Rs(1FnV~g40kLEv+e8d%fLf_4cu#y z{*}!sZq!=6Z<6%d4Tdd3n!nXDf_7z~mUV5vU;LE*(q+JPLl=MbIXHj!BwN}~lEW(o z*N}IWz?_TnPQ=_Sdc2h19Kq)Pwe`*7p~j)@@WXR%>KuL7eg%3}?2ansq1DqOn`iSM zIIqTM<{om7v+W5E4oEC6_E+y%7k=vzK?9qccY4L* z6N4+836@@)_mg%Io^UC!^_^+ttIHnlC$7+hpICHgQuUaY1Sp{VEKxk3AkKAihWD>u zvtz&K0`1<)SLBf!YFFTgc_W;5TlGDF0RV?+T<QUIQR^I>1-^qaB}US_$K1pVbu`&kmCUB+i468JbjA0^Au-440r+=(c#p4E}0 zvV>&$1gry77sHq|I8nH3XsK|>n%R8PrR|em*VOCw99{5oZgj1amktTsmMR!wag^*J z%AA(J8X#YNFJfu8^l_HsUC{2z7o#@_cnyW}qf-HOYO4bn5#|KRzWJsyNA`6`61ViN z!Dd|K%yr}$#L_6Y`()7dDTRj={;Esrv zUl}jEylGX&Px(|9bY;1JA!QSZhDEO#ALbGc+q!kxJ#zI-S_w=k}ZoGnK;?;k(hBZ>X^C z9Ly(w1AEQDmQ^MnNO)l)PqZ7NFhPfz=C&w;F}IeI}^` z{=98pN?;$1;>RVEUQ{Sx)l3wRxWux*`oW~qxbh*c93R(2O`#U3YRZ zGp1LvdZ={6cS>E^=+m`opcx~F-IAQ+)rqBFE0qQh*AIHCgoq9~&+m>q5}%w^Cz{%D zH}c6$wPW@bJ3hGWs^)M=^L}(y2(Ks=a_snIu%C;P!Qnb6?yO+eu-Y8vBl#)8GCb#V zdw~&%T;sTf3Cc1x07;^py5oN*LH}A@t)O;ON3xHw81ZKn{{&$V^Jdt3pP{NR-PN*9 z9b+ruk2SQA>tdn4$tR`B_koez!;9Pwb+9eA|1N|7z#oeUjuIR*;$t|S?T5%q6 z)`;!3)BLakdWiE-kLP&V3?Wu>l)eR`e?t4pIsYqW%$dvnPayd>K>dR*{~w~(F~P_m zz!o|hLe|wgGpa6`A+|rJ-NQd5CgWqKJN^AT{gwY!WMpJ)bHmI*6Am;~RM}+m08qrY z2!Cg8U~~WSs5@@fsfjy9+{^9NpvVdw{eZo-#|hmq!Dc} z77`k2>rDpiNRN3l9=&9o24GN?MNe#AVWBmJ4S=y&r$4@gzFPg7t(hePK*Ey~gkiNj zZN<>x;UVkOdMDK=T2Xlv;}wqxUju-%ZO!8kcRJ(WJIvNO_Wb_Yt(>9Uy>w?yFt2?56fb47(CX7OQ)?gB+S;nwE@0UkZ}B<&PEWaUQ>=EOTI4#U zP(4d6G#3M)R5Bp2F_@>9*XSgSmNxW?%M_I|eEcd6RkUa|FC&TcbB6}U>3;pT`tG(2N6bQl@TxVkItEOux6lWfr4#bAYaN89w zDw{fyIHO@C?tJNQF$CeUW1$twPjJ^a{(FV2AM;tazt(W88;(*ChLxmB*}*RK`STZ5 zxFfTf`8t80`PtceB!FSeW4odmk;_uUovsvyAiGB{07vNgHP=KnD2t(E*(XKhyKSk= zG?~WrPIJkN{XKE4`XT&ym466X2{zoR=hChKAR(y^(^brdA77YTEp31Ac%wt{7{ewa zCuDms?Gn{8#Zo4UzY8KpZAS~3OZ6+%4mPH{Ua(-w*yMztRV?=?&;VYPuzKV&U1izx z_3N#!l&kuk?jD7JxCLFRtx1ncLCp;;KvRk!}Qviey455lzPzn7?l)<>Pu1x z4HHanaNjZs3A=ip!MM`~(f4FA+(s<+2LuFItPExV{sh)W(2VMmAFNOS0X)DXqJ&co zrVBfpK+=n$e}+tsR`A%AT%n@%#E$w7&Aog5@Yhjr_^scvm`({QE%`CT`73{Cz7}yD z=&$;n#b4&Cu`k2Ucy#0`na96cK1wfUP$Pk1#s?e%F*w01pEfaY*#Ks@vcz6CDYL$* z4~)-tBu|re9iXt8-@JDRdRQfod&--(^c-vE951*p8xH5aMi1v8#jqAk&V-Bz-nofr9RP+$+<^y zPE#5Uum<*q(_c+-?ro=YJ$QGYflG!ZR?=N!4Xdq~>*w5_R+)E&5Nh5(KR;h<;SBWm z6CRYOgvh%DK0E=v^s89)KUHYScN6Bl|UG{Jol>CZcc0$Ww^m_Xhkj#s-Y+kbsLy;#EWJMUP! zX!8g>c_FmbDA#Z?b`vPnVn6H^_jIe}zRN)XX_-=ai@9Re#--1@ldvh#@|0OUj5Psi zvyHWtkl3zE7wRsh6n})a*#?uklTz!!R)v{?E&01EI@1oq2Ot#)v-9D?&4Hg-(5tQd z&o5V;7kG>hHwPH-pE8(V{_Cs2MDgb(2H_yVSHKt4<W4`ESkKcPcJlk((GOsvaZ`gd?N)XI-kJD z6Cle@Q5Rn%4)Xmr*a);(vtkH5J@ht!N6dyi5*SeE&pLr$IoeppY zOaM;!p*;EgF7&%S+A@hbZeg!Lv%>VCaMV8P=r13GCp2#1ApI-Vf-BdPTHCail}}G% z_lKO+nJf=lL)h`m-THwczk-&)&>7fo_E^`kuAKc|;*}8rTs&b5J~%WzEmZR0XD;9$G-RJuM1P=$%oILv0vt8X67B{L< zg059;Z)@dYp(EaNQ*RIV8>R`1>3#F5&D#T8g@@PRk)%kWmTfId{G5teFW>LOseNv{o9_9Rr=_!c zveqVq8olwV-GrU4tyAI`J$v`2LFD9ksmUJs(>yCrpypZ9ApX&1LZFXYvryc1f@Zsb*lO z!F2rBavLqZ>Bk`Q_RoX-z}~qh5h8zJd6R}iy|n0CrO_yItnt4#(!wa-YvmDf8cA zMF&hJ=ntv=@>_blRDMdQwy4;ZSerPIV*rW1{xCsirQV)1ODU!TQI$ftg!`=Go@mP1 zPJQ8si;19@6UqlGO)@l!e|yyQT(Y#kam6mjW}F(+gA`kqg*v|rUrPfGnb&t4#hMKY zoxueJ$!3b*^H!4~URdu24g0{>26Y#t-}pF;9!&kNz5$J4BZX38jh;Lc!Y$gJYdDC} zq^DlCTf02Slt|OHl1?upnmDYh7pC~6<8=9~BvR}u3P?)?4Ue*vj?2+@d-n0fi$HA4 zW1))T6_SXk)d9N<@vZhwDP7S$)<>oJ4%nh|ZM1jg^8~!@x7f83b*&;;(9TS+di*PA z%vCfq5Z@j|#1a}2PdoJ zfXl)>4u;#R5`)NXP$X?V^7%((IIi=_d_ZXVHwd;x;u0gnF)kYIc2%2 zMq^%(cFhneDrD<))v(3ChTs!L*;pP?oeY3MQ^DW@v6399?nN(-teZd@aVM4q-9d?XEMIRFJUM;-bEs8BEih-rs7+3u!yj$y#$!P z9=GniNwDfPsI|1GfCtOe`m&9fOun;wG)n=T-AE!Rz6{@e^HNs$akh~pMX@4|hfK-O z0AAzKA&@SvhJDPqi#s;vfG>iYA^P>C4MlV<%~i;lTC%Kb6CKYBC1_|LBZT3$=M}e3 z1R6RRt$?jlayK}mEBKJMPZ|e_z7Ca>dF{)l5tCwMrqYc{xgWI$)+=x@q}Npk#o3DU zD~|}T?#9tPS62Dvx?Hg_KwFUmHDwK~ zvva=%O#;P6zNiZ1mU?Oxd{M=6Yd7MTU4d0gUBVS=flwkXJcg;5lV_=o7W@0;yOI$$ zekuW)4$GD;=3KaK&X-3Cy}z!NWO*VgfK&ZGGMv&%H%e#Xs;Jb1ErYgJ{f~5pE#@jx z7S${X-T8Np*IVk62iK%k_Ko|D95E-UhJL5P{6&nq#BONu)}qEUelwM8@%6BGw0G5H zZE)eX+AmG5g=CAT3nJ&G?tw~4khmbjbzMSSikH3A;r0S>c636mZC$UGOlTFjvSl93 zLjKXH!6B^5#6E)X;7m`wcEgzvvnpYNa6*pfrG56R1Bs4bc^7la2mC9iUfr(yTEdgY zs2dr=n-T)!zOBFY*(snBiD<7duLH9x%J}=%9!_-@1CKeM5=3D0c7)UBKG7YPU)=ZS zHLdfk;9l-4^_p1n&taDCeHe{(Rg)N;owS!x%t-06heqK2^I3Q5@OLOje@`vn8*cU#z1c8mi#eJHQC}T6m(GPh!5*~z0 zO79O>iiP28&5fkt;@@Svxv|{ki$G69?GZkv}`gX5lkTe7z zIRk(Qd)867YTw-mpdP}BP4L7ZWv4+?!jQ8X-P5NC^1mQ*<2|H+#Vn%sGk}~C(^l2W z>kV~(0eCDZEHZq$(aMrC4-bPY19{t)jfMkj{)VW619ZnzqwLK#+`Mf|@~o3008s6T zx3Alax??Ls$<->?AbAt`xiS3#il%XV&dARuJuHJNjGbqEi< zsRYtJtMY9?8F_j68A{}~BjV{_Os^lp-4Y#HcX4r1Gl+lvdU13Z2`Q;Q%DU{NvZK1Q z4FC%O?PUQ{$!NsrI^0?=L(>`nQ38x(RM^PZdSg_WlrCBJAdq(b*Ao~%{5PVA9}_KO zdYl2URY>k2&`TTHB7QX7`g3&-=C50k1VA|kG#mmzGK`5L{>(cUsu{qz4TpUV)363U z1wI3eP?G_Lei|+=VCn>3%K>W3yaSW}Q&huxjB4n{Wx<~sZe7)V3kJ9yQ@5tI80X(L zNRLs0k=2j?G6KAgX(RfIOa}oC#Mr!Q*k8^3kDmTdHnV|Rk(mBmHSE9H1o|};4w3oG zU03D))gPQI?_ndAY=NXKj1BSI?R$Gbos;Z#@JEuHHw$2Op8Iy_+&t5I8u;Suk-PAb zFj@;lLr14Zd&53ze{Zkm-j^zBGHK?hZ#oNoqVMx$Po& zob{LjdCd0r*4+Ymy*7;_=F`4;Zk>blX69cGe5>1W2AKW9o{LP+;-yHW;McL2Fl_iD zk%9PGBdz&=a&pYX{D(~gix!xP7>-T=%;W!$4;CjeuK)@G44MCj-ve`d+x{%{-vs?3|5v1Z*cKB{_>1Y=q5bP`ZYo75ZJv%hk4$D> zp15;XkX-f+jZU^6?fmMsU%2?qDJZxnc{+4imJaRP<>R!UAYrhN<2)>aqoVlb zguT1uwhgNGu-VyAmE`}z0QhlMVffCueR=rFKVi1>5S;%Jy_HSy61%caV>hM>rmUvR7}i*TC^wl;evPJY%ECks{3}`7lVMYvARldy zRNXpYk54B?G#5TxIl)9CCe|CY2k(zgqz$wlG=|7f0Xse&vbE__KVgz(fA7Jm-+5CB z%?W2kruedvA-DiqDsp(&?Pj#db->wzXzK9J8TUfXcX?asmjPJ7$PWZo=DQ=Fu&AcS zH~&m+fV~Yd<~`++uN_!1ol5Kf=&8rIU)3ky^3Z@B7qf-+`BP2{?pBmGw*sJ8Z~TkB z&8URv`KCUcN-z9e>qaQm?nZ^vQ;gw+6|ScMmJczOX&rg;j8$PYF-cn52=5U z)A#Ic$s2e7YNG?Y{WM)L5n0Ih@#$G`T;R1@)z#z`UCvQ{sMvb;(+}q=m>`*QQP-Q) z{;EKL*tWvfh8JHIyV{Q0L$d$)OLyjA5HG2>WhjT>GuK}pEBRS#Evr9NKu=$deoy9G z5u z&Vaa;DjSG@Tgyp_*s1f|2NdU4Ddbf?mHPpeT;cQTg|qGK>m_{c)>7=??XDzZJEX)yq0%^Wo*N5h#zi>ubK8`j2yd038rN#0Wk_)Vu(Vx>vtE z_bT@qPny<(llXGA4?LRuP_SwH+W5nej@b)vuND|lvOZhc%M1)`P#`Z%?4%zEquh^M z$S2S>Da>XVaD6Z5yIc37*;vzTu_Sb}P~%Ba!UlCq-JIy%XA&O-H}l2p&IgpIXz5 zT9fRYpU!IY*^m=E$`l4xP`|@3Wc!jB^c#MTK);pE!i1PpI>g)4*C9kN)lXY6e>_* zdg2SrJ1$V^XMrFK)6}M4ULV0!4Q#z+oypSLDcQa2r_T3k4-CoRmDqkhfSe_U_cC1| zeT6lUAkcx045B?g5v4j_AXdwe*GMECS_VAsm3xr$7X z`4VCn&)LV=(Zt^%FY-RyQ~!t((fcL;$e2Gkk5%R*En=qDmbI;gx%@vsG3Fphp{#jeYqp zgA~PkzAJc=5yM@{nU?%|-w(lw^^>_gG+?2Z=3JZ{Rt2)h2nI<~sFIm83)~hYuqD>& zP^pMJ5Xeh1dtyE=|ImU6Aksm+B+`aU9GoTQTbxvqJm?rasE15K@6SpGrTdHY)`$iX zsHI-Rp@@O+91u!Ll}@g|NQ@fbV{X$Yg8H&2d3Gi5pmusWIh)pmF*CK#2)|QBCeAnH zIpwwmi6)1>l2np4U1D#ifo4^US(zSablbtiBLdyOGkI2Z7n>KgSgH948)*l=e_WN$ z-R5pO0k5e79H?X4M;}B_02jQA49|4@81`Y}11FJ~r8PL@t1dn={GjX}DcsU=3pF6a z4xVTQ1RHB_5!oF%tJ*14a0Y`~9Qg5RxZ&`v;t84E+55IOeBT%eF*7V7!hYs%uN6NM zLiW@Q2nO^~m(;w)TD%1qc?RN)bKS~%dmj)B0I;9F?odzlU(_$JHB-+az$#umeB2`@Q!CGqM zQnd#v;m)}f7IFp$DX{y&Mlpo1D;^kJIZZrykLBF~K>zaV_4eYoop~VKF>G&lG{SQ| z=cIS06cfodNLa2{o7rBo?rzsjsagV58U5cvI2^;Y8f+G;sM`%(%n>R-T1@9ZZ7Ca3 zBB^8A7oLEa0@}RH5H>BGDD~sdrlrA^^32=9w~=#X z#Mo?@F|1L-NYAYy5KtjC*aAKo6i?hE`Q!Y`?|m#rLHs<;!bb6i-^SGm$``_N zUXEv^&1Fx-j$vXyKVTxC-orH&`LI?G{-u%t-hqPM>R)I01MLRu`rjwP&nI4b(Dba!;fR#uw~25cAa>*?%;-3qCkCjPq5k&S4hDaI(lQF|K2a^&B70zRu+P zB?4bZx%zS}UYa39zy;PntQAk5@rcTc3I_Fwxbne|-!kuWiGn9gz*apZqo%H2ySwzXZiaxOC<+~#u0H+wDx zD?0;oTjmr^{m92zo6q*>IS-8B8)})T3Z{vhRr!WEEa6T|d@&q-^J)3xhPeH~``YId zI~l;j6efu)9!<0j_K|CuaPZ{Fz$yxhVKg@qH{r*PXz$4?q_Z5|P!%n_$k@;P1;i$e z9_AJ~K(biCQ?Yl9MsiZCZtQGOkEezRW5RX-g9$NJ*8p`jPGZLQzq68m$oT)i2LK`b z?=0Yox05)Q9nzM^~QjTvUd3&_UL zZJJ4)$$~nsxDdaE#KgY6TKQs+A*K6=hlMbqM~|NP#9uK`lg@%Nbjg)c1B=;#3asS z)Zt`r%~k~0rT`GFF!NjWKcRg1%;5DPMi&7TNElkL7@VA(6h=cJlYjuEsbo>t8WGht zqvkzV zw2ULc->rlxHAY<^L!8Du)_l%$Y7Y+q8G9=rT4?Ow<#_%cdaya0bm2FYDZX|&{x*2z zMO2O@`U{d@Y~~?JMR~=m_+Vbx2sU0s0pQjcv~D{yJy?=>VUiRlL57SsPMqix=acdR z4pvAjIZKAZMd=d_whq(8E%%(g+!A8AVMfqCTe7xK5iJ%bB*!8KrUuLGXw`sZ@^#o96`)6g+}+#TFHTa#)<7@=YpHc}d-bJ4N0>vIlA_mo-)H0{W%lU# zC&m|^#=d3HJC)(aqu#og6O||PY@M{e*Ilztd{{oMpk+$s>lF#cOT=~Zq&D46$6I9~&o-S-6YpG;~eh*%eyH9^|-q0$h;25TW+ zDcbkcmJtF8F%U&qd3Y~7 z8I?zU5xej0-Y%fw<7?+rp(Q`=ma4!{tE16g5x32%u zpE;h|2_{f!x`thn6Qvwk4+#W6w~Qe?mAm{A!DDrkrubQ_3CY*abVisKwm`;YS2Cm& zT(5?j6c5%6vAcZ~P=?MuX&Ql&yHQoe*y+ig9)s8l^-W);wnWQjk)A8B z?8iKa;3JxE-osG@+kgvpSH0f5ge-9HYL@z)wq?fA>!M@&{#m|gKT6rBSeFoUg9m&j2-RMDxt=Evhz!1W> zijg~!6(3$Z1mmvKwIampGv85u_C$zi_cT}Vlb z7$w-l8m>1h{F*ekVMh@w9wo<7MZWx)y1(agXJW2;%V^$$s+3%A$+bBuVQ7|=V3(+G zzyBn_^8J#_VwLx7h&l}(G*CUlmy`KD&8#NIR}Rm#WQqsnN$lsh-=CL6a*LXG(e~z7 z-eje;t-kZI`t|_{@_B<5i`>Ji5UH}Ck7%t!@%k9N-`JSYFZX}mAWZnq9#jo%zqa5LYxUEMJX7DQcBtD(?2#z z&9LmEGn^cLS-EaNr!s#&qjkrKw7>7wQBw_Xtev3#|5_lM}$#8VrPgXs@UwQ3NR z@4&?mAu)~vtwvw`3NyB#dmZqc97Dq6Sh@WE!{?GuTunOC&r~Iqo=;X%HF;=4 zeCAKnEHq8G?j{yhY#}bmEJY0I;;RzB5hT4XrbN2($W`5c>9O8?m4<0gZc3zfPoXyI z2sV=^!-~*D$qfqJD;ioBY(~H58lDSzzUSAGV$AyNA$>f3KZK?u!%ShA$7g=!UGbnm ze4b6pT-+v=O-6NvK8|#P94tGkSI_Bi7|5Mk4L^ppH@ZzL61E$hqe6R@NOyR}c>>@A zZd@F1PO$+IjS~Z!1p#)p^f#&> zQC-YM#hP+r(?)#cpiWED^7muHD2U6G@CG8qJt}L;ULBbO!mxp)7@< zX@3Dx+&dE~A&K!7&pn<}L#p<_guim%=09u7$X}G3)%wCRVBnN`VI0r#?=~3|lyh;d zQQz3q3sjb>;6Hx|3ao1?2O4#-NNW4Wtjqt_|1s_ z0ob{Nuk{En0rmHv#Mxg@ihLu(tdxrxI>J?}S&FM?&(tpe{71S9XvY&;D0vaX ziM5(S7mTt}z9VzL{^dm&hw*9b`v+^85)laC;O;*8%^o z#K(YC_v-p6z*w$Sy#SEL$d3@VcH$^*cKwRj$=OX0l4W{ z;BIgvJTY*kCS|T-5T59No>6HvW>~8CeZ|mxZuK7t?3khO1d!AFhaCXC;3D!b*#BRg z094?|1bhNy2!WK)mGbrPOd`NPfLlH;)G>2S?yRGQ>gt!2Ya7$mJ#~(=7=3O&3cbFr zJW=*EDIEeUu}5zJ+VP%Daj%k#Z;9MbM?60b&8#;P-F?V){tQsMCvpLJcWrZ1buxd% zy3d;jqNoscPjRdI35GfdIZVClPZ#0$kuM=1kCceUvyJqxuI^pxi>_MykeG9+C zDc{lxT#4L+2~N#vSf4l;g0u_s)s$${C9cC^58-8o)hyz_07{XYxs@*BqD5evbf0Gb zsQhk6GHpFsB>w(FOj?rsdoA^^7i9U6iN0W)7b@y`)=K2F4 zoqw#{^7mG}c$h^LdbaN|+orv6otEa*sl>anwKWT(C$o7mb8>T)4#uM-_yvbguFLfq z*ZF($^Lv1AKxk?Z=ZW{QM${_}3f+iJs>0RSB*<+!rn8r5`uN=CTs=r3>|XZqPLYi6 z^&Ha^%Fle-v|dGfiTbP%6WX|r;$A$A=PWxCGGC^ANv2o1pgMVHvMWjNI{4KZ=8G~3 z(JY-30q)(3EjJZR?lZ|sGIrV%b&BbD_GuHJ=(v%fIHHL$%M}}GL<~hI<1z%Yq295z zj*H22J7AJI6^i=Sk%e}eSXd@kc4@XE+ly*&jS8{LheNm1kMx$`wH5iQ+#j~~U zFq6KBCo4U_6fC>{%mm7=WRjI>9vCW;&}t{o^jE7kpW$E%?6FWkXmIBCI}Z@Kl;I~X zJ!^@}?i_4%P;>lB!K9$YnrNLJIRHj~1@hw1;q2y61)U7jzla!U9@`(_!8JK){dA_x ze(l1`EQarm=%tJeZ~=IYlzyu^JTQ1cS`{%|x#-@)X3fPSw}uAZ2iDO+<*!ULnfX@eM_hvR?BTYUqwzz6O!4qFeLy?`^*TcgfFIwxM& zfvAk!dt5cjm@+r~Y>CpJsNBE@yF_x;I+W+vdQRwO4yEXs{SI3lt@vrVsmuJjcjlJj zF6QlndB@+NrC{b=nO9K?@WgWv5pbn0L~-Z#{J69uKm|7~yyp{9pA4?Y+!6T@H9o&f z_JOO~8~mmc{OTGIRFczj6{HLxVLrNji~TmrrNiN-%KnkY?nga9=9 zHLtbeBw*|<{gt*IMhpKO>&e{b(PO`q(Dc0R`yHweDNyt443D%KU#Un0n3}EZn)W-q zR?y_7QR4;mp|1hqP_Cgi#}LOv0eJD37%7z)qvO2q{fS6$+gGsF1RwYyGn7~EZHsfL zmE=QeJZR!V!TqNmk~LSmy%|CgoS4w{!<=SH({y~mhE7Dqs{`X#zccbi*e-gKcgsnPdHd~D^MG{g=F5;yTgj1(i}$X)*GE^wuiMffv>mgH%X z!%3SILgjfySwNmzSDn`FC$_YO;Ns~VTp_OFpAM2L3xu}V{ty-viVc#zsPz>4=lPbO zAL^r>S$}V?*WU1TGeosHS0C~7rJp;r$JJ*>mesPy2Ctfv2KL5l1-u^;kbfcgATUHN&#*^g!(;H^@dCA}PTtD}Kc|YQO)J(b+m%Xsw z*vMn37xhfj>(=SRD@f69a91${5Ev%I;!B8NuYi~zaPN^c!eBlhWiws{;`}RuNBwFD zz#a@AvgO-$9s#NySgOO9Io4&)#?nbq$G@>wo^pORIS>?bMmgI)`n=QLON=uD1m-rm zNLV=(R6I;~9Q&xq;lbqB08oWNNh5sh(JC2kq0Gg7TLTGsB}wWG*|jkf5xe5 zvR1?WC;s?<6*K$?nK@$r3319>0$RU+&9-m{>aE zSpy&NMF?tqF!WeWRJcQLJbO>G$8Kz`{Ui$$6VsXp0GGC%fta!^lh*Bax6PRZAjZc6 zeB}Zij!~SaXKekRab`boS)^)j8P$Uc8-MdYHSUQ`!ylb|=)0Q_mB`&CCMM1s=9in; z27u?)z=p{`_zDCFwXdyvkkpAiG3LK)v;d^FX~ckJCholL!uZvZpW59S%teH_3hxz# zp44bxF^`pR$L#IX#}vle##s>94M_l1Tp>dnS^F$iB;+f#@s*5{zC8Yx#T(;-^}nJvPDuA5Wf{k zO@G!dEa=Js@O|W;&~OalXqXMl5}b;kLuxPckXsoeSs@Aj@Ks==_#QuJ*J&bG$}N6g z;QBUZtpRYjnQVCDiE{tshee`=Ghb~6uA0Iu8YNGj#*ep|M>mwufK&YH1c-d1Z@0TV zknm(|4oE;^E&~|dcrH=cPipM+?IYRWodhPNxR&n}fNwY&%#{k9uM-e$#zc!7r+73x z{~TWa3?Bz1Q9x1g^2Gz3fwOlOC~T;pqO>$#U^e#yjnknzz}Z6xLasrlh3#w$;}Clh)3Ll*PZ z7<;}o%$lr$$wLg)c^6Iw2iSi%S|drAv9}@>xZ~%^GkLf$HB7&3Z8!t5z*aqjtKGhu zGXji`oC&6fzgb5Y5}l^zNWd(k53w36n3WU1Tf4^oMPY2!Gelep_{5F@LAEi@^Exo1 zY;2*&%McqH=0<_Zm!N;hTLAY;*TkQB>~EA~^R~fae>np7p_RkrV;Vn@ zX_ij~S9B=WD}3&E(EhCkj{;WQU)6WYTWrm@*iKiZ_!%2#udFCs@*H>YT$IK~Ht#H` z8WZasmiFApsMEa-h#VEj0GUtrLkC&C3X_aPqi}^X=L0|u7H4{Kpws2= zfK1TOpPX&+684p6eT#VskVhJo)*tNWTUH!iU_x5L1L$wS%T?^d2?vT?t22 z84s=+oI_@tag!FJW4KhSsjLVxu(i(@ds>MOsJTb?1ugE1o}?WWQUq4Jnp&1bWY(}E z;|wO+Lme}bcnVx^abknd0n1(2+0?s1`=^*jq_1N238(VUiwxjjpO~Br&Bo+Y7>ienanm4psb@lO!rONY$?Ln?FtV8 zR3tq2%1eR!;s%@n<2B$;K?HRS@D(h1mKT5|OYk0$vqpuFdoj#>b!zajR1g>bXU!Oo zg&CyWiL_${jRs+Ji)Q1V%5Q0V%x0AO^ZuK^3@uc8b^zsi8KNRv$yupi^00ZAqx+fX z^wUwp_<ftn63z0^JDFnk+W zO;@uOI3yZo>YYuEmVqtR$piaxC*e>xO1B1KC^g}JoK;nW_pd1w8Ff!&sNxWmoP#8XAtw=#oCPIF&H|Ei4oXIloO2QoWdsHg z7;^rq0pH{K);V{ryVkuw=FjwWbx(D5)${CU?_C^ASeAo}<>5csz~tXrqkWJz%|6uM zAF8zsa^}BBT^^XUN0nJtOn%Kga1|(L@~dv!1iZ<4n>49d@4p`PPz$ZdQqesWkbiL* z>!TuOwasDvMw(ojh^`M0{%$Y;kCu7MZ)#Upqb};>O5Cg2K=ofqa?hUU91R%sn}B8R z;$LGTx3?+aYQNRgL0z(0F?9&7a;R3|2#;0SY?Vp=at87a7?AwFVC4gEb8iSq8Hp;t z8F`*2_P!+wn$0KH8o8hF+hb;C*5~{GzK6K5y1b%c$A!ezx~R=$@*BIk!RT@^6oc@V zlf&efFOU8jWhKf$XKmh6Cax~!3TT<~jxz`ul>>WJ;pi-KL-JScp`c&C<7BDH<-Z=C zdUmzrRvJ>}hZEN)TI29hvecYVZ$|e&_gZq{!K+`AedFWf1#00-`mbHK-QQnJ!h?eH#gHX9!>u>39}d#FG9%3)+(5$D$jkzi*yxQ z%bF59EHiKX>tgX^#Rf7CG|(b7F-&ceIcq2n z0?f4JM(O9l-1S?Wtl#_RZ2UdCYM;*7dp`}f?;S%uhEMpOoh7|4>9c9AO@&@4+4!2K zWo4wksg$ie{nu|4%D%mUxA5t<$cuzhUVvdj~EHv#LzR%7$A8>6h^PFTTItI50L)dJF9CtY~=RN{cQ zUB1Ch9acu|eW$kI)%|j(-SIvvLKrBa1}1=&@dUe@mo^%aX>jEwXCyEl)QT9JoNLQA zP-&x;C1?NfubZyWQqPbJLzK+SM_HPs(;9+Ulm&z0n-f%q<(&Ul2qzG>1oCdRY6lzSFBIe3in$+p zU}1ywx*v`M%7Z|GV9{P7AbOiW;#t2BD7&|3wJoQky3tO2`QBocX65Jp0rXOC_N9Hh zde2-E)Xfz}X^L{@?dWE*wMeP?o*)5=dn9_iEc_ej>gkGcEYR7$ck)X>dIvpzy0}KeAP7a> zV4^DW_P^RHLoY8w0Ky+efesNA=Ru(s$OxcHo|o&-6GB5B3}C#sPS;#QTC>+FeEd$^ z?BPI&bjpR`)_^d{g++jYPna72)@t7Ylw7E5gJJO3E%liiJvcuY8S&}iDP!XJ zzkIq@vKXl0nvW`Dr=Z7jWZ%0`}8@;<&JJ7;y3yVb?SgUJ1-|^NR7Ad;Fvq^*|8$( z#&nGZTrdw58z&i|oHg7Nb`6iZm~AJfr>EC>h_V|U92}aq?Xq==63nYfu@G;&&LtOT z)z>bSzP{sNVYN`bg-|teZ032D?DEYR){w5T3>q_5H7K>4e^~S@nWnj*4{}>D8&pH> zY>CBx3v*hU(*9D=m)7+;4r((Fw3(qBH`w1zOy_en%k3S`iw$@;$GoFsTnuI9fsR9_xzY5b^N0fK?l^)kb0xx|IGp5tk5v` z)}6x%3mR&mI?L~$3uxi3fWs?4zD=r~hMS#i<}jVAr-Kq~1vEnX<Bv=Tg?UJ30#T}Y`lie|?*ON;2OT|;$%DZC$uFj6AQb^CHP(AOcGy56Hij$_o z3s?uE*pk^Qqifmrq$dGgyUm38EZ~G(JC2*@caUr@3qB7eahDFo;>8!#Y)Jfh&Nk%~ z(#y*Uk8gHQ$>hav{eD{Av{>(VSNi(iw^wzlt&XcUTi-ryrQ}OhFBY|#(CqaIr{zP$ ztTq#zNOd%%%ei_Wy}j%keIqmG)p@dpSE{3(`_7{qHf)|~hjZx1ivz93va7xGNh=BsZAHe$n*1AfbSLglKvV#-3m><5LN{Yck+ zi*k1Qxx8Qd&z4n>+k=_NK%$kf4~~=e;SR?dfzr`oEtO0eVj1rSRqca$wh+`-{*^OC zkYK^Rknc;>Vv%s1mi@tWI)>GcFjGD$9b#a7$SV-Pp zQ?At*z1Xc^lZ+WfjYb>0)rcHyz%*(m1wtFzwReMwDiUlu6 zk8uZ$MpiF_8afrel4(Vi5y|pZ)=q=*Mk=? z>=!1`97+$O6a=r}c~P5A_QZOpx)nMp)WiwhaxEENUo1w;)aY*P0ZHalAYno2X3tf= zcz1|&lWFJ&(in|*UVz6-9Bxc7zE?x7-C4j3VqMB6C<1TQ+!FjymAZ_Xa=qZ_5CTa; zua9p>&p^5Ant%s!+yVd|;tsW&uLenilxlvVc7G`|rhTV6;Xq71?~00+64Ua|<>yh) zr(6+5sGZ2Z)^gu) z5PwolzszOhrAF=kUA%cNpOy$MUf(_YNFkokf4@h{5KVL@5WQ00t?m=&LZh`nRLmzu zGbrB`Eb}dI$T}eD>&q`$nld6_T$g7gU;Mtr( zE@OpVs*0q}+OJ&VXKrpwSkvk7<^qs!dLK=mLYF}tAaF7zy;_ZB>s%zqyD~L#l>Hnl z`t?K-gj4bVD#j^R*Ok0sz;&T`F=l6&ZqH>S)-bHCL4&AB@oso!Nbw5QdO*6g(q&i9 z1GD7sq>xx+N6pQH*#_pzK4R(b{xm0<6Q^bq?^VK0t2m042l%lk@|*Uo`Qd$Ebg4)3C`DU>VI15j)+!!S(e4n8+N zV%M>YMkbG_`^vY$bNBJdm1}A%%;4&mnPc1L+S3T0vpQ@6v@82svTL(W_hc;wJ^Iu4 z`{-ipNFODzVbSeyV_p0f_UXqh1+;1r=)!qCJ@kEYx_U) zWIxg;t6+EC=BtqWEI)T#H-ksBcRE$!{+qKMqF<(-XuD65@*CruOcn|qv6{Snz?-72 zpCK=#FA}EwUI2zT10-=2by23RQl70KmVE#2+Wdw<)QZR}PGQze!x(RL)}8U;&3!#Z z#b-x>@Y0CMs(sS_q~$7u4h|}ZjR(&YMr(>OllL|cOK=iB#E5-(jpG30djr%d##7%n z@sZmIM5tb?kD_A?G>Gc%V8c$F?&s`DD{AzXS8}zz3zuZAFpwB5?WoFb9N=&)==7~0 zSR2jLs^;5ZD*p22%iKaIXQc~}WRv~ARbhXKxNvvJoM$Ioow(L8qmKqLbrmY){&eHO zR#MiA()z?0`JK-Hy**4}J?lDBK`1It;n@>CYK9vM+}&-ISM&Bk<|0NMeXhq^X0x2E z&#mw`&@xFxyt##X0jwJ2$dht15goX!7A|FKLj$e}19u44?BUmYsE|~AYseNVj2XG% zr&u1uX5LnN4nfKd=JtF)9Z5?wx#66|r|k{v(4~C zb^DWkDs2L95~k=W+~gzIWNboclP;i+A?>mG=@ zK)B+RlHa>$#k~9Dve%%ca-F8ZMmCH48Vb;DO!qdagkY8_l5u9nMJJE!M`@2&?RqB^@s!=SIF_@Y~)lQ)f zk3!(CvAJF@aB1aD@r~@#0@F#S_ff>1zwb&A;54#Nk-$uHcB&qJv5Ppm+^GmI0c0@G zR!FrW{W~uSjD^p(iv5@S?e;~X1J*W&`Y1^*dY0Ca$ZDQ@%#_lS8Jp@L%2LKdP^Gc@ z`rG&a$hKc_+(EJH**6ECZ5k-wQ~N+glC4i8qLU+(*6319?*U^p+2$*g)*PK+ejyU> z^5P+k0}_i5(E$^XU8!VmrVR(R;yLw>7bYK(#3ne1i8BZ|%YaD~he=`|C%gUv11g$0 zeAgxcqc5)<`*3*+uVk9V=A#;Q(h_SBQtg>-#@fanQ`XuY^dfLph!%YPr4au3AZru3 zc@_x9M`Tk`*^QLJ5Ze#>bLuveY&;jbqF8YeRQMDp5K61|;HL_+ z$w#A1qFPb4AycVA2`&5fU(MK1ANwSXJ^xHCH>8e0>g0?mZLMt*`nxOlT2Z-7FcB;5 z?q$xJKF!Tm=4Bv;nMiAnP>M`;3zCJejtT%bP3qC06k@q1tKMVf1sFW~k9M#4`LZjW zM+`<`KJP!yfj6IzDfv+?lpY;bh+^AcNkR9|1{#(7t(v;y`Iid&Q@}s&<2M1944?_3 zdCbgJ{*XE+w}&Q{Cw(-8G7~dSyW^DPkh7C29y66vVHkT2=s0W=XU#;sJ#Hn;qP8k? zD4O<}_Z)ujl8kJ;C=P=J0|B^An4z6ZNh;Inu7aMZTd+-TK*puE=Mrvtp*N|!y|*nWLFe(}n2T!-|>gc4l< z(f`}GEQjz;3xyOELyvYcDEv3&YA$PQBlTr2t177ALB4Uh3lD02^m{q453&OLjDRTt z)=`go2Wfb0MdTX}VMR4^Gkj=dbX7AUpT^%?k&6$XG8bpRG(MJbft_CVoPm%FaQR8k zo*nnX30E42DJ9|OV~c;b%Ly!KM9azD^L&5`Lz;P$_}lRM@4zKW@jS8G`ErxqZ~^R? zcdys;BX(roZ0IWS?K1|2Hu3e-tyNuevw27>w<(LF_-8KxznhD63(jSf4c&}U+E-Aw z0F)c=!VyUki?(cEGmi;ISSaB+!5~!hZ@Oq&jov?VIegeEh~-vt=U7xWpe7L7P6Zd zifUNY#_5?J8#m8L@%6s$cyH2lm;zu#FyDI6baE?liv<^qEPhpv#(Yt{%XPKrs~d;u zi{tb#17xIsRQxAO7?AND>-&%Myc2o<|M381(f6NDfWGg&fQ#71f>4rE`+x*!06{+& z6(jPW1}W}8-g!<`P@)HNPAGeT($;^Doa+An`;k(nW3fxgTD|BvwaN9T$_GoA2H;;I zLnt$1#J2SUltdCQ@xVW>Bl|JoQE9#pU&F+E}x}ecG;@ze;=@Gl;4#_&DUz%4*^*~06|x*-$`|*2V*kW zKi{98d3&FT2VgBp7cz^bApq64CoTx%=e50GACwDPru)#jqwJ`v319ke^a&?9Z_ zDaNU5FqpXQ@??O;j}!1}g3iX&adT0EpHJ97bFRUg_$s~iIyTHC9-xMyBou4_A5k2z z1+te`z?74JC<4i7$O6QN7l1xS3TQQA02%I;q*fQeCK;Ys4g~~+!@6vGgA>zYouyMi6=D3B--O9xx!f7rDFz@lYaK7rr zQjIasAD$#mi)v2EM%tCgn0ND$H-j{-TIuF}=hR7!Y4g+tFC1tz1OT?+wQz%NG1qmm zRwWh9@LyEsV9c$r(XHd0@jKM&98U+uFLL9Xy2K{M zLF@qTf1GiqsI(nVcX|xz!Sm=U+LW5l+OWe^<3PjSJm#Greo<0%>i(76*2P$Db)w> zVE94Y204>RBSR?>(;Ur+JMfF)YpE?sfgVOUZ>+wyw4WHhX6Y?x0taHBn`Jn5N_f2u z+4^Fjs2+SJn*dBiHx+4&mNKEiv%Ek-mO5 z|BTKV#6Uz4>3dlQAnQfM@4 zJuoQE(`-7u6R)f=tI`*H{KmGwzzcOgGEC>R?Z-ADLk#_VNYK;1wmbWGPrEump^da} zOc6%Og>ocpt2Z1@jns8mZ;*ZnuFm_tNECfSLH1 zIN1BW(^)@YEls3APH_Xwz>o7K{LHd+uU)$)c@&_Flxmmz=D1vKG~mBmm&Gx@sCj`C z3=%y5Y7)f%cX#Y3|F?U({HG0P2I@o$Yior|A&zV2&jB!dF>makv-5fP9nz)alKSL~ z01W{jT6-QZ5g_<{B(dC^(n0-53B3kjdg971pau+qs#-#BvcaIR9%njIa1+!%fUXm~ zEC3KSts1L!b>#%=yLGBM7;_&q!2@^e0qvOtXb}T1Z-qKEvnKfE$cemlnB44e<50YP zz{u`;jP--%5b958zybya6zd;@vWdLjU@*U@3lu)IE3%xf*8}yD*1EcfMc{vMZ{2?8 zLrMdHfXr2u`hqSFgRT$jRSts7Ye?&vmSAo|`_>RTqjE}C)gbypQc_Zbu8Y7oJTvSW z7xh}n>*dL|!qOq5N%qw0PhcUE8&*7<=@<=mUQr)*5a#`AWFr8!_CZJQI0Vkm^(ME1 z+G{AXJtB3ur!<01`&dKx+BObsvyzM0un<&;6Pfkzgei^Mm}??{mTR8JW~qwPVg5rF z?u5a%{f>V8`UC3piU`GDz>I4BIrFD31EE*IuzK!+qD!U4V0pL^ct4FE%sB3v15yw@ zM1^9i?I~T)34fSxz>8?|ZzK9uG+R(Kdazv~H(GC?yOn4XRhvPwI2q?r+AoWjS6{)Sn1!V-4!a`Wf{mF8iV0(pYJ9w?f+2-z$9daoq_T zaW32T*zY25bPT^X%tE+3N36#S`b5oZ;ayg{ky=qo5EP$zAgqrQ3X|Lw4?#NtC1W_@ zC5}tqkm1cPPv+F^yQRsr))DZj&ZLTs!=>e0`lT*3P|ZT96$1U<(chsV&;lRN^drlAL;FxiN_dEqNg59hCBZ;B^H2LJ8R01xx(9iDlw$ zpQ8sgDJ^rh&zVP|Lg_93KC&9a9ZC0XWo(JFufi1+EvC7YuCRuLok3)+jc{%BWX<`z z%7Eu;-AOj%WZg4!>;B=IZ(Zhl0Y$VE2NvgcU$smu@127!uy0X6V;p*9&rkCfX_NJo zttPw}a%hh^T;-^@`WwtW;F0`m$K|18Ag$gZJYNeTr?tY&sgj5@KPNR4XE+<7%0pXY4a7Qkf4r;Q;@MV||2(kaPPzPacxB}x?nm~EX$DtG8qXOEu6twA zh=wV~ukh28t*>Mv>KEpYY6O@q2!eJCZ_2K(Tz*=&kf`2$C+v4CS~y3;3JPImeZEBT z<+Iuw67tuRH30x*RsMD-9}FIfiSU)>ZqHk~`Q^1k4Wfka(hWPeYs*({-zFVl9OIDT>Iz%Qf>FcJtbB4)7P=iPzBAkB^7%jCA_g2pgA}{#w zFzU-`1wE|81d%IscmVg9wBI)GtM-}TICpj?zISPhEmKI?TdIAZ)n#Sl&}t}B6MiUA zHCKb7e_)q!-pApf=QV56RZ%7cN^WA8=C-{}ww1=jZqdnO_tcRrXMkfK+!JD+#=gmR zPBy@)E0mTNj_C#al^6;;&Azh7(0YIkX;j}UFGhRyXy7;Po3?XacuHfxy0LguqFJw% zG`Z1x)ORL12Hrz0N6tnjdY8Fhn%}YFAs> z`lnlvyysMW$QI_>^PE%Pt583VuxG@`vDnB)*nL3ywvof(|NPVLl+3KoF!EOvIyEj0#1F3~Qicqwk(@&f&LM_9Xp8bC@M+0Hi!d}Pj2 zT_@TnCEanZMxX9~f{#E_?XiAj95>pQW-xi@TkyP8_4?v=%Tu`niaQJ!w6dvnSS=p- z(5~4`yVLOo*C}=u81e`yJv$hwcKm2`#{bQZ?O;r+%*B@W%h2NB4X_|tn8M`yJBn&U zMU-yri(wjGfuR{!&6mZx#?{yM>u5mUfKsr~b)n{kyw-Cqd!$RBNF}kt zdjnfTHP~9ayk737aE64SgnNkol&UgTRpZOBeyh7k%#!a+f{m8-vFG(;O&kPrGp%Hw z7M|@CuXJnk*}9)v#3-j|ob;rrtb4q6pybCNYt5Jl_6)kVerC|DWSINa&UHUHKJeE# z8^{^@ko%`huLR2XO>ikE`|Pszjp6seVfgiK7rDd~UFUD&SC^e9^bd4aLWHx;ovZ&0 zFxE2j&`9l2mi^x>pri)Um>A83kdydev#**@I2KpT$mSFKc+|{MOLV@lFq@| zdyHXgm&jg-s9cvs)crsWTb=)s%<3=UQhmqIu5rfT=dRMOpbm<-dFQ6wBfa}h6VmLz zYA-R%43Wm=oOy?^iH&^v{ZkIcW??y9a^|F;?|5@8eAq7tx3YHXb7Vc624I*Sr4F-D zqgt!#w)scTn!QXyb}~F^?@tFk|9lT+*ER))MLYdSnSy%hq;@yy0ihlbOZRZBTjz2H z+4!|;z|W7{rQ@{Dzb<4gSX&y7853A%emwt)v5#vbL1om2)Wx-Jgv^v5m=LQySqOL| z%xFF3lJVRHb_kOd+(v3B-kbDkSbk_naxm^HJad&<>~kO3{|LlMhew;g8%uOyzBqHf zS7=3$Uj9L=ZNvh01g*yEAH1zd2%fGLqkioS6`@E85D#7`iqMYEBR98PLL57Tgen8N z)pSh-IA!_6ST$_Too7EJG*hpCzNHZ-!ZWGyl88Qmys7WVxqfVL&{oiN&>^+E?=_;f z(_DA`Da`M~@Ni&Dt`J*duJ*p%ZZ+rL?(D|FmVN$U_#1EMAG;$1ls28TNn`)vj)jSdk-3S@XRM&v733ju#@YUYnp| zOx!ly4ZUW>R}uBIzh3ptEZ$Ewx4h*cJ8lR1nEML0P zwckbQ1`r`;@OGUEQL< zyq4MDyJpWnjTYDlmc>%tK9KL8c(WxCzZ90ZnWIpnP1s(!JhW(mSd5+69dba?L%=Y~Hv76I7Q6Z+J{jz7@X&}W6l8Qc9a WikFP;9JB){mIPe-N#0`}kN*K`(U#c& literal 0 HcmV?d00001 diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index 352d606a1f1..d2b263baa5f 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -21,7 +21,8 @@ Developer Guide In the Developer Guide, you will find information on Neutron's lower level programming APIs. There are sections that cover the core pieces of Neutron, including its database, message queue, and scheduler components. There are -also subsections that describe specific plugins inside Neutron. +also subsections that describe specific plugins inside Neutron. Finally, +the developer guide includes information about Neutron testing infrastructure. Programming HowTos and Tutorials @@ -51,6 +52,12 @@ Neutron Internals oslo-incubator callbacks +Testing +------- +.. toctree:: + :maxdepth: 3 + + fullstack_testing Module Reference ---------------- diff --git a/neutron/tests/fullstack/README b/neutron/tests/fullstack/README new file mode 100644 index 00000000000..adbbd70f275 --- /dev/null +++ b/neutron/tests/fullstack/README @@ -0,0 +1 @@ +Please see neutron/doc/source/devref/fullstack_testing.rst. \ No newline at end of file From ae8c1c5f80fd4fb7b4ab116677f4cff988c67cf1 Mon Sep 17 00:00:00 2001 From: Eugene Nikanorov Date: Tue, 26 May 2015 20:17:20 +0400 Subject: [PATCH 105/292] Catch broad exception in methods used in FixedIntervalLoopingCall Unlike other places where it might make sense to catch specific exceptions, methods that are used to check L3 and DHCP agents liveness via FixedIntervalLoopingCall should never allow exceptions to leak to calling method and interrupt the loop. Further improvement of FixedIntervalLoopingCall might be needed, but for the sake of easy backporting it makes sense to fix the issue in neutron before pushing refactoring to 3rd-party library. Change-Id: I6a61e99a6f4e445e26ea4a9923b47e35559e5703 Closes-Bug: #1458119 --- neutron/db/agentschedulers_db.py | 71 ++++++++++--------- neutron/db/l3_agentschedulers_db.py | 6 +- .../openvswitch/test_agent_scheduler.py | 11 ++- .../scheduler/test_dhcp_agent_scheduler.py | 9 +++ 4 files changed, 55 insertions(+), 42 deletions(-) diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index bac33a78f6b..61eff9b07cb 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -270,40 +270,47 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler agents_db.Agent.admin_state_up)) dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) - for binding in self._filter_bindings(context, down_bindings): - LOG.warn(_LW("Removing network %(network)s from agent %(agent)s " - "because the agent did not report to the server in " - "the last %(dead_time)s seconds."), - {'network': binding.network_id, - 'agent': binding.dhcp_agent_id, - 'dead_time': agent_dead_limit}) - # save binding object to avoid ObjectDeletedError - # in case binding is concurrently deleted from the DB - saved_binding = {'net': binding.network_id, - 'agent': binding.dhcp_agent_id} - try: - # do not notify agent if it considered dead - # so when it is restarted it won't see network delete - # notifications on its queue - self.remove_network_from_dhcp_agent(context, - binding.dhcp_agent_id, - binding.network_id, - notify=False) - except dhcpagentscheduler.NetworkNotHostedByDhcpAgent: - # measures against concurrent operation - LOG.debug("Network %(net)s already removed from DHCP agent " - "%(agent)s", - saved_binding) - # still continue and allow concurrent scheduling attempt - except Exception: - LOG.exception(_LE("Unexpected exception occurred while " - "removing network %(net)s from agent " - "%(agent)s"), + try: + for binding in self._filter_bindings(context, down_bindings): + LOG.warn(_LW("Removing network %(network)s from agent " + "%(agent)s because the agent did not report " + "to the server in the last %(dead_time)s " + "seconds."), + {'network': binding.network_id, + 'agent': binding.dhcp_agent_id, + 'dead_time': agent_dead_limit}) + # save binding object to avoid ObjectDeletedError + # in case binding is concurrently deleted from the DB + saved_binding = {'net': binding.network_id, + 'agent': binding.dhcp_agent_id} + try: + # do not notify agent if it considered dead + # so when it is restarted it won't see network delete + # notifications on its queue + self.remove_network_from_dhcp_agent(context, + binding.dhcp_agent_id, + binding.network_id, + notify=False) + except dhcpagentscheduler.NetworkNotHostedByDhcpAgent: + # measures against concurrent operation + LOG.debug("Network %(net)s already removed from DHCP " + "agent %(agent)s", saved_binding) + # still continue and allow concurrent scheduling attempt + except Exception: + LOG.exception(_LE("Unexpected exception occurred while " + "removing network %(net)s from agent " + "%(agent)s"), + saved_binding) - if cfg.CONF.network_auto_schedule: - self._schedule_network( - context, saved_binding['net'], dhcp_notifier) + if cfg.CONF.network_auto_schedule: + self._schedule_network( + context, saved_binding['net'], dhcp_notifier) + except Exception: + # we want to be thorough and catch whatever is raised + # to avoid loop abortion + LOG.exception(_LE("Exception encountered during network " + "rescheduling")) def get_dhcp_agents_hosting_networks( self, context, network_ids, active=None, admin_state_up=None): diff --git a/neutron/db/l3_agentschedulers_db.py b/neutron/db/l3_agentschedulers_db.py index f661dcc6221..59c39771852 100644 --- a/neutron/db/l3_agentschedulers_db.py +++ b/neutron/db/l3_agentschedulers_db.py @@ -116,9 +116,9 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase, # so one broken one doesn't stop the iteration. LOG.exception(_LE("Failed to reschedule router %s"), binding.router_id) - except db_exc.DBError: - # Catch DB errors here so a transient DB connectivity issue - # doesn't stop the loopingcall. + except Exception: + # we want to be thorough and catch whatever is raised + # to avoid loop abortion LOG.exception(_LE("Exception encountered during router " "rescheduling.")) diff --git a/neutron/tests/unit/plugins/openvswitch/test_agent_scheduler.py b/neutron/tests/unit/plugins/openvswitch/test_agent_scheduler.py index 2682014d0af..5f14037fd0e 100644 --- a/neutron/tests/unit/plugins/openvswitch/test_agent_scheduler.py +++ b/neutron/tests/unit/plugins/openvswitch/test_agent_scheduler.py @@ -674,17 +674,14 @@ class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase): db_exc.DBError(), oslo_messaging.RemoteError(), l3agentscheduler.RouterReschedulingFailed(router_id='f', agent_id='f'), - ValueError('this raises') + ValueError('this raises'), + Exception() ]).start() - # these first three should not raise any errors self._take_down_agent_and_run_reschedule(L3_HOSTA) # DBError self._take_down_agent_and_run_reschedule(L3_HOSTA) # RemoteError self._take_down_agent_and_run_reschedule(L3_HOSTA) # schedule err - - # ValueError is not caught so it should raise - self.assertRaises(ValueError, - self._take_down_agent_and_run_reschedule, - L3_HOSTA) + self._take_down_agent_and_run_reschedule(L3_HOSTA) # Value error + self._take_down_agent_and_run_reschedule(L3_HOSTA) # Exception def test_router_rescheduler_iterates_after_reschedule_failure(self): plugin = manager.NeutronManager.get_service_plugins().get( diff --git a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py index 9faa8ab5bfb..5ee1adb16cd 100644 --- a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py @@ -248,6 +248,15 @@ class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, self.assertIn('foo3', res_ids) self.assertIn('foo4', res_ids) + def test_reschedule_network_from_down_agent_failed_on_unexpected(self): + agents = self._create_and_set_agents_down(['host-a'], 1) + self._test_schedule_bind_network([agents[0]], self.network_id) + with mock.patch.object( + self, '_filter_bindings', + side_effect=Exception()): + # just make sure that no exception is raised + self.remove_networks_from_down_agents() + class DHCPAgentWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase): """Unit test scenarios for WeightScheduler.schedule.""" From 75f3aaa4cc42c2c1280f6c578e27e64cff8f860c Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Tue, 2 Jun 2015 16:52:14 -0700 Subject: [PATCH 106/292] Check for 'removed' in port_info before reference scan_ports can return early on no changes, in which case 'removed' won't be present in the dict. The deleted_ports logic wasn't setup to handle that. This patch checks for the key before trying to reference it. Change-Id: I0e2c6d76515ad8e2a2addc8d40451ac003a150f7 Closes-Bug: #1461325 --- .../plugins/openvswitch/agent/ovs_neutron_agent.py | 11 ++++++----- .../openvswitch/agent/test_ovs_neutron_agent.py | 11 ++++++++++- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index dfe156bfcfd..40c1de3c0bf 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -371,7 +371,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.deleted_ports.add(port_id) LOG.debug("port_delete message processed for port %s", port_id) - def process_deleted_ports(self): + def process_deleted_ports(self, port_info): + # don't try to process removed ports as deleted ports since + # they are already gone + if 'removed' in port_info: + self.deleted_ports -= port_info['removed'] while self.deleted_ports: port_id = self.deleted_ports.pop() # Flush firewall rules and move to dead VLAN so deleted ports no @@ -1516,10 +1520,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.updated_ports = set() reg_ports = (set() if ovs_restarted else ports) port_info = self.scan_ports(reg_ports, updated_ports_copy) - # don't try to process removed ports as deleted ports since - # they are already gone - self.deleted_ports -= port_info['removed'] - self.process_deleted_ports() + self.process_deleted_ports(port_info) self.update_stale_ofport_rules() LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " "port information retrieved. " diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index 8344764020b..d8cd5a8c644 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -536,7 +536,7 @@ class TestOvsNeutronAgent(object): int_br.get_vif_port_by_id.return_value = vif self.agent.port_delete("unused_context", port_id='id') - self.agent.process_deleted_ports() + self.agent.process_deleted_ports(port_info={}) # the main things we care about are that it gets put in the # dead vlan and gets blocked int_br.set_db_attribute.assert_any_call( @@ -544,6 +544,15 @@ class TestOvsNeutronAgent(object): log_errors=False) int_br.drop_port.assert_called_once_with(in_port=vif.ofport) + def test_port_delete_removed_port(self): + with mock.patch.object(self.agent, 'int_br') as int_br: + self.agent.port_delete("unused_context", + port_id='id') + # if it was removed from the bridge, we shouldn't be processing it + self.agent.process_deleted_ports(port_info={'removed': {'id', }}) + self.assertFalse(int_br.set_db_attribute.called) + self.assertFalse(int_br.drop_port.called) + def test_setup_physical_bridges(self): with mock.patch.object(ip_lib, "device_exists") as devex_fn,\ mock.patch.object(sys, "exit"),\ From f4d72a8fdfb2c897aa0a0581ca9cb9d9da4a4167 Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 2 Jun 2015 17:34:48 -0700 Subject: [PATCH 107/292] Remove extra indent in testcases Change-Id: I42cb044d75d53eab7ba180954589c718374857ee --- neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py | 4 ++-- neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py index c61f07b0e63..ec4d342012b 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py +++ b/neutron/tests/unit/plugins/ml2/drivers/test_type_gre.py @@ -121,5 +121,5 @@ class GreTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, class GreTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): - DRIVER_CLASS = type_gre.GreTypeDriver - TYPE = p_const.TYPE_GRE + DRIVER_CLASS = type_gre.GreTypeDriver + TYPE = p_const.TYPE_GRE diff --git a/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py b/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py index 8827fedb4d8..ac271095e30 100644 --- a/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py +++ b/neutron/tests/unit/plugins/ml2/drivers/test_type_vxlan.py @@ -98,5 +98,5 @@ class VxlanTypeMultiRangeTest(base_type_tunnel.TunnelTypeMultiRangeTestMixin, class VxlanTypeRpcCallbackTest(base_type_tunnel.TunnelRpcCallbackTestMixin, test_rpc.RpcCallbacksTestCase, testlib_api.SqlTestCase): - DRIVER_CLASS = type_vxlan.VxlanTypeDriver - TYPE = p_const.TYPE_VXLAN + DRIVER_CLASS = type_vxlan.VxlanTypeDriver + TYPE = p_const.TYPE_VXLAN From 45ea2cf10033e12c63b8ce2cd78b04755d0aba64 Mon Sep 17 00:00:00 2001 From: Oleg Bondarev Date: Wed, 6 May 2015 12:50:11 +0300 Subject: [PATCH 108/292] Wrap ML2 delete_port with db retry decorator ML2 delete_port operation currently involves locking ports and bindings tables which may lead to DBDeadlock errors in certain cases when several ports are deleted concurrently. That may happen due to specifics of Galera working in active-active mode: it may throw deadlock errors when it fails to validate a change with other members of the cluster. The fix adds retries to delete port operation to overcome such deadlocks Closes-Bug: #1422504 Change-Id: I684691d59c5ac370d74314c3c91857dc709b2d9b --- neutron/plugins/ml2/plugin.py | 2 ++ neutron/tests/unit/plugins/ml2/test_plugin.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 2f209db7723..535bd86c97e 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -1235,6 +1235,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, raise e.errors[0].error raise exc.ServicePortInUse(port_id=port_id, reason=e) + @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, + retry_on_deadlock=True) def delete_port(self, context, id, l3_port_check=True): self._pre_delete_port(context, id, l3_port_check) # TODO(armax): get rid of the l3 dependency in the with block diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index 21b90976a32..e972b022896 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -583,6 +583,21 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): # by the called method self.assertIsNone(l3plugin.disassociate_floatingips(ctx, port_id)) + def test_delete_port_tolerates_db_deadlock(self): + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + with self.port() as port: + port_db, binding = ml2_db.get_locked_port_and_binding( + ctx.session, port['port']['id']) + with mock.patch('neutron.plugins.ml2.plugin.' + 'db.get_locked_port_and_binding') as lock: + lock.side_effect = [db_exc.DBDeadlock, + (port_db, binding)] + plugin.delete_port(ctx, port['port']['id']) + self.assertEqual(2, lock.call_count) + self.assertRaises( + exc.PortNotFound, plugin.get_port, ctx, port['port']['id']) + class TestMl2PluginOnly(Ml2PluginV2TestCase): """For testing methods that don't call drivers""" From 6575db592c92791a51540134192bc86465940283 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Wed, 3 Jun 2015 05:52:51 +0000 Subject: [PATCH 109/292] Revert "Add VIF_DELETED notification event to Nova" We need to wait until the nova support is added in I998b6bb80cc0a81d665b61b8c4a424d7219c666f. Otherwise this generates a ton of error messages in the nova api log as well as on the neutron side. This reverts commit 0ace88fd4a75ff213dc36fd16c1f8e7080ab7d6d. Change-Id: I129c4e4c05cf07d45032fec6f57e0cc17a5a82af Closes-Bug: #1461391 --- neutron/notifiers/nova.py | 13 ++----------- neutron/tests/unit/notifiers/test_nova.py | 15 --------------- 2 files changed, 2 insertions(+), 26 deletions(-) diff --git a/neutron/notifiers/nova.py b/neutron/notifiers/nova.py index 86e4a74088c..4bad6dcbadd 100644 --- a/neutron/notifiers/nova.py +++ b/neutron/notifiers/nova.py @@ -35,7 +35,6 @@ LOG = logging.getLogger(__name__) VIF_UNPLUGGED = 'network-vif-unplugged' VIF_PLUGGED = 'network-vif-plugged' -VIF_DELETED = 'network-vif-deleted' NEUTRON_NOVA_EVENT_STATUS_MAP = {constants.PORT_STATUS_ACTIVE: 'completed', constants.PORT_STATUS_ERROR: 'failed', constants.PORT_STATUS_DOWN: 'completed'} @@ -122,11 +121,6 @@ class Notifier(object): return {'name': 'network-changed', 'server_uuid': device_id} - def _get_port_delete_event(self, port): - return {'server_uuid': port['device_id'], - 'name': VIF_DELETED, - 'tag': port['id']} - @property def _plugin(self): # NOTE(arosen): this cannot be set in __init__ currently since @@ -166,7 +160,7 @@ class Notifier(object): def create_port_changed_event(self, action, original_obj, returned_obj): port = None - if action in ['update_port', 'delete_port']: + if action == 'update_port': port = returned_obj['port'] elif action in ['update_floatingip', 'create_floatingip', @@ -184,10 +178,7 @@ class Notifier(object): port = self._plugin.get_port(ctx, port_id) if port and self._is_compute_port(port): - if action == 'delete_port': - return self._get_port_delete_event(port) - else: - return self._get_network_changed_event(port['device_id']) + return self._get_network_changed_event(port['device_id']) def record_port_status_changed(self, port, current_port_status, previous_port_status, initiator): diff --git a/neutron/tests/unit/notifiers/test_nova.py b/neutron/tests/unit/notifiers/test_nova.py index b04e2625781..49ccb975ae7 100644 --- a/neutron/tests/unit/notifiers/test_nova.py +++ b/neutron/tests/unit/notifiers/test_nova.py @@ -290,18 +290,3 @@ class TestNovaNotify(base.BaseTestCase): self.nova_notifier.batch_notifier.pending_events[0], event_dis) self.assertEqual( self.nova_notifier.batch_notifier.pending_events[1], event_assoc) - - def test_delete_port_notify(self): - device_id = '32102d7b-1cf4-404d-b50a-97aae1f55f87' - port_id = 'bee50827-bcee-4cc8-91c1-a27b0ce54222' - returned_obj = {'port': - {'device_owner': 'compute:dfd', - 'id': port_id, - 'device_id': device_id}} - - expected_event = {'server_uuid': device_id, - 'name': nova.VIF_DELETED, - 'tag': port_id} - event = self.nova_notifier.create_port_changed_event('delete_port', - {}, returned_obj) - self.assertEqual(expected_event, event) From 608c282285380704e70228ef549646a61492d2bd Mon Sep 17 00:00:00 2001 From: Aman Kumar Date: Tue, 2 Jun 2015 23:49:40 -0700 Subject: [PATCH 110/292] Make MockFixedIntervalLoopingCall class as a helper class MockFixedIntervalLoopingCall class is used in both the classes TestOvsNeutronAgent and TestOvsDvrNeutronAgent. This patch removes the MockFixedIntervalLoopingCall from both the places and make it as a helper class in test_ovs_neutron_agent.py. So that it will be used everywhere in test_ovs_neutron_agent.py. Change-Id: I5e0803436e91b4f20ec03209e9e799a68df4eaf4 --- .../agent/test_ovs_neutron_agent.py | 22 +++++++------------ 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index d8cd5a8c644..8851f794b28 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -45,6 +45,14 @@ class FakeVif(object): port_name = 'name' +class MockFixedIntervalLoopingCall(object): + def __init__(self, f): + self.f = f + + def start(self, interval=0): + self.f() + + class CreateAgentConfigMap(ovs_test_base.OVSAgentConfigTestBase): def test_create_agent_config_map_succeeds(self): @@ -106,13 +114,6 @@ class TestOvsNeutronAgent(object): cfg.CONF.set_default('prevent_arp_spoofing', False, 'AGENT') kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) - class MockFixedIntervalLoopingCall(object): - def __init__(self, f): - self.f = f - - def start(self, interval=0): - self.f() - with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch.object(self.mod_agent.OVSNeutronAgent, @@ -1229,13 +1230,6 @@ class TestOvsDvrNeutronAgent(object): group='SECURITYGROUP') kwargs = self.mod_agent.create_agent_config_map(cfg.CONF) - class MockFixedIntervalLoopingCall(object): - def __init__(self, f): - self.f = f - - def start(self, interval=0): - self.f() - with mock.patch.object(self.mod_agent.OVSNeutronAgent, 'setup_integration_br'),\ mock.patch.object(self.mod_agent.OVSNeutronAgent, From 052054133abb76ef944d17e97df3afc8bc3f1738 Mon Sep 17 00:00:00 2001 From: Aman Kumar Date: Wed, 3 Jun 2015 05:15:25 -0700 Subject: [PATCH 111/292] Extra indent in test_ovs_neutron_agent This patch removes extra indent errors from test_ovs_neutron_agent Change-Id: I1270cca3699b7874047f7812710f2611cc465493 --- .../agent/test_ovs_neutron_agent.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index d8cd5a8c644..d99fb70fe8d 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -631,15 +631,15 @@ class TestOvsNeutronAgent(object): "phys_veth_ofport") def test_get_peer_name(self): - bridge1 = "A_REALLY_LONG_BRIDGE_NAME1" - bridge2 = "A_REALLY_LONG_BRIDGE_NAME2" - self.agent.use_veth_interconnection = True - self.assertEqual(len(self.agent.get_peer_name('int-', bridge1)), - n_const.DEVICE_NAME_MAX_LEN) - self.assertEqual(len(self.agent.get_peer_name('int-', bridge2)), - n_const.DEVICE_NAME_MAX_LEN) - self.assertNotEqual(self.agent.get_peer_name('int-', bridge1), - self.agent.get_peer_name('int-', bridge2)) + bridge1 = "A_REALLY_LONG_BRIDGE_NAME1" + bridge2 = "A_REALLY_LONG_BRIDGE_NAME2" + self.agent.use_veth_interconnection = True + self.assertEqual(len(self.agent.get_peer_name('int-', bridge1)), + n_const.DEVICE_NAME_MAX_LEN) + self.assertEqual(len(self.agent.get_peer_name('int-', bridge2)), + n_const.DEVICE_NAME_MAX_LEN) + self.assertNotEqual(self.agent.get_peer_name('int-', bridge1), + self.agent.get_peer_name('int-', bridge2)) def test_setup_tunnel_br(self): self.tun_br = mock.Mock() From 913298da04a52f31182c8849efe77b2310b4c2e1 Mon Sep 17 00:00:00 2001 From: Oleg Bondarev Date: Wed, 3 Jun 2015 15:38:50 +0300 Subject: [PATCH 112/292] Fix confusing parameters names Change-Id: Ib00135e1d0b8f334d660de9a80629e084efb13ba --- neutron/db/securitygroups_db.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py index f3572fd10ad..1fec3f2239b 100644 --- a/neutron/db/securitygroups_db.py +++ b/neutron/db/securitygroups_db.py @@ -342,9 +342,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): for binding in bindings: context.session.delete(binding) - def create_security_group_rule_bulk(self, context, security_group_rule): + def create_security_group_rule_bulk(self, context, security_group_rules): return self._create_bulk('security_group_rule', context, - security_group_rule) + security_group_rules) def create_security_group_rule_bulk_native(self, context, security_group_rule): @@ -436,7 +436,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): raise ext_sg.SecurityGroupMissingIcmpType( value=rule['port_range_max']) - def _validate_security_group_rules(self, context, security_group_rule): + def _validate_security_group_rules(self, context, security_group_rules): """Check that rules being installed. Check that all rules belong to the same security @@ -445,8 +445,8 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): """ new_rules = set() tenant_ids = set() - for rules in security_group_rule['security_group_rules']: - rule = rules.get('security_group_rule') + for rule_dict in security_group_rules['security_group_rules']: + rule = rule_dict.get('security_group_rule') new_rules.add(rule['security_group_id']) self._validate_port_range(rule) From ea643d9163427b75aff7053d00c97dc84680e63b Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 2 Jun 2015 15:20:44 -0400 Subject: [PATCH 113/292] Add sub-project lieutenants A recent patch added the concept of Lieutenants for Neutron. This same concept makes a lot of sense for identifying clear points of contact for the various sub-projects under the Neutron tent. This patch adds a table of the projects currently approved as part of Neutron in the governance repo. I also made a guess on who the lieutenant would be based on reviews in stackalytics (except for ones that weren't in stackalytics). Change-Id: Ibce5d6ba5e19dec59a10c88749557509136c4e41 Signed-off-by: Russell Bryant --- doc/source/policies/core-reviewers.rst | 27 ++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/doc/source/policies/core-reviewers.rst b/doc/source/policies/core-reviewers.rst index 808f97de9f0..09a1c451a6d 100644 --- a/doc/source/policies/core-reviewers.rst +++ b/doc/source/policies/core-reviewers.rst @@ -90,6 +90,33 @@ Some notes on the above: * Note these areas may change as the project evolves due to code refactoring, new feature areas, and libification of certain pieces of code. +Neutron also consists of several plugins, drivers, and agents that are developed +effectively as sub-projects within Neutron in their own git repositories. +Lieutenants are also named for these sub-projects to identify a clear point of +contact and leader for that area. The Lieutenant is also responsible for +updating the core review team for the sub-project's repositories. + ++------------------------+---------------------------+----------------------+ +| Area | Lieutenant | IRC nick | ++========================+===========================+======================+ +| dragonflow | Eran Gampel | gampel | ++------------------------+---------------------------+----------------------+ +| networking-l2gw | Sukhdev Kapur | sukhdev | ++------------------------+---------------------------+----------------------+ +| networking-odl | Flavio Fernandes | flaviof | +| | Kyle Mestery | mestery | ++------------------------+---------------------------+----------------------+ +| networking-ofagent | YAMAMOTO Takashi | yamamoto | ++------------------------+---------------------------+----------------------+ +| networking-ovn | Russell Bryant | russellb | ++------------------------+---------------------------+----------------------+ +| networking-vshpere | Vivekanandan Narasimhan | viveknarasimhan | ++------------------------+---------------------------+----------------------+ +| octavia | German Eichberger | xgerman | ++------------------------+---------------------------+----------------------+ +| vmware-nsx | Gary Kotton | garyk | ++------------------------+---------------------------+----------------------+ + Existing Core Reviewers ----------------------- From ece8cc2e9aae1610a325d0c206e38da3da9a0a1a Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Wed, 3 Jun 2015 14:22:29 +0300 Subject: [PATCH 114/292] Decompose db_base_plugin_v2.py Moved private getters and simple helpers into db_base_plugin_common. This change is part of bigger refactoring for supporting Pluggable IPAM. Main purpose is to make getters accessible by IPAM code. Partially-Implements: blueprint neutron-ipam Change-Id: I1eac61c258541bca80e14be4b7c75519a014ffae --- neutron/db/db_base_plugin_common.py | 251 ++++++++++++++++++++++++++++ neutron/db/db_base_plugin_v2.py | 216 +----------------------- 2 files changed, 254 insertions(+), 213 deletions(-) create mode 100644 neutron/db/db_base_plugin_common.py diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py new file mode 100644 index 00000000000..f0a75ed1a86 --- /dev/null +++ b/neutron/db/db_base_plugin_common.py @@ -0,0 +1,251 @@ +# Copyright (c) 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr + +from oslo_config import cfg +from oslo_log import log as logging +from sqlalchemy.orm import exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import utils +from neutron.db import common_db_mixin +from neutron.db import models_v2 +from neutron.ipam import utils as ipam_utils + +LOG = logging.getLogger(__name__) + + +class DbBasePluginCommon(common_db_mixin.CommonDbMixin): + """Stores getters and helper methods for db_base_plugin_v2 + + All private getters and simple helpers like _make_*_dict were moved from + db_base_plugin_v2. + More complicated logic and public methods left in db_base_plugin_v2. + Main purpose of this class is to make getters accessible for Ipam + backends. + """ + + @staticmethod + def _generate_mac(): + return utils.get_random_mac(cfg.CONF.base_mac.split(':')) + + @staticmethod + def _delete_ip_allocation(context, network_id, subnet_id, ip_address): + + # Delete the IP address from the IPAllocate table + LOG.debug("Delete allocated IP %(ip_address)s " + "(%(network_id)s/%(subnet_id)s)", + {'ip_address': ip_address, + 'network_id': network_id, + 'subnet_id': subnet_id}) + context.session.query(models_v2.IPAllocation).filter_by( + network_id=network_id, + ip_address=ip_address, + subnet_id=subnet_id).delete() + + @staticmethod + def _store_ip_allocation(context, ip_address, network_id, subnet_id, + port_id): + LOG.debug("Allocated IP %(ip_address)s " + "(%(network_id)s/%(subnet_id)s/%(port_id)s)", + {'ip_address': ip_address, + 'network_id': network_id, + 'subnet_id': subnet_id, + 'port_id': port_id}) + allocated = models_v2.IPAllocation( + network_id=network_id, + port_id=port_id, + ip_address=ip_address, + subnet_id=subnet_id + ) + context.session.add(allocated) + + @classmethod + def _check_gateway_in_subnet(cls, cidr, gateway): + """Validate that the gateway is on the subnet.""" + ip = netaddr.IPAddress(gateway) + if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()): + return ipam_utils.check_subnet_ip(cidr, gateway) + return True + + def _make_subnet_dict(self, subnet, fields=None): + res = {'id': subnet['id'], + 'name': subnet['name'], + 'tenant_id': subnet['tenant_id'], + 'network_id': subnet['network_id'], + 'ip_version': subnet['ip_version'], + 'cidr': subnet['cidr'], + 'subnetpool_id': subnet.get('subnetpool_id'), + 'allocation_pools': [{'start': pool['first_ip'], + 'end': pool['last_ip']} + for pool in subnet['allocation_pools']], + 'gateway_ip': subnet['gateway_ip'], + 'enable_dhcp': subnet['enable_dhcp'], + 'ipv6_ra_mode': subnet['ipv6_ra_mode'], + 'ipv6_address_mode': subnet['ipv6_address_mode'], + 'dns_nameservers': [dns['address'] + for dns in subnet['dns_nameservers']], + 'host_routes': [{'destination': route['destination'], + 'nexthop': route['nexthop']} + for route in subnet['routes']], + 'shared': subnet['shared'] + } + # Call auxiliary extend functions, if any + self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet) + return self._fields(res, fields) + + def _make_subnetpool_dict(self, subnetpool, fields=None): + default_prefixlen = str(subnetpool['default_prefixlen']) + min_prefixlen = str(subnetpool['min_prefixlen']) + max_prefixlen = str(subnetpool['max_prefixlen']) + res = {'id': subnetpool['id'], + 'name': subnetpool['name'], + 'tenant_id': subnetpool['tenant_id'], + 'default_prefixlen': default_prefixlen, + 'min_prefixlen': min_prefixlen, + 'max_prefixlen': max_prefixlen, + 'shared': subnetpool['shared'], + 'prefixes': [prefix['cidr'] + for prefix in subnetpool['prefixes']], + 'ip_version': subnetpool['ip_version'], + 'default_quota': subnetpool['default_quota']} + return self._fields(res, fields) + + def _make_port_dict(self, port, fields=None, + process_extensions=True): + res = {"id": port["id"], + 'name': port['name'], + "network_id": port["network_id"], + 'tenant_id': port['tenant_id'], + "mac_address": port["mac_address"], + "admin_state_up": port["admin_state_up"], + "status": port["status"], + "fixed_ips": [{'subnet_id': ip["subnet_id"], + 'ip_address': ip["ip_address"]} + for ip in port["fixed_ips"]], + "device_id": port["device_id"], + "device_owner": port["device_owner"]} + # Call auxiliary extend functions, if any + if process_extensions: + self._apply_dict_extend_functions( + attributes.PORTS, res, port) + return self._fields(res, fields) + + def _get_network(self, context, id): + try: + network = self._get_by_id(context, models_v2.Network, id) + except exc.NoResultFound: + raise n_exc.NetworkNotFound(net_id=id) + return network + + def _get_subnet(self, context, id): + try: + subnet = self._get_by_id(context, models_v2.Subnet, id) + except exc.NoResultFound: + raise n_exc.SubnetNotFound(subnet_id=id) + return subnet + + def _get_subnetpool(self, context, id): + try: + return self._get_by_id(context, models_v2.SubnetPool, id) + except exc.NoResultFound: + raise n_exc.SubnetPoolNotFound(subnetpool_id=id) + + def _get_all_subnetpools(self, context): + # NOTE(tidwellr): see note in _get_all_subnets() + return context.session.query(models_v2.SubnetPool).all() + + def _get_port(self, context, id): + try: + port = self._get_by_id(context, models_v2.Port, id) + except exc.NoResultFound: + raise n_exc.PortNotFound(port_id=id) + return port + + def _get_dns_by_subnet(self, context, subnet_id): + dns_qry = context.session.query(models_v2.DNSNameServer) + return dns_qry.filter_by(subnet_id=subnet_id).all() + + def _get_route_by_subnet(self, context, subnet_id): + route_qry = context.session.query(models_v2.SubnetRoute) + return route_qry.filter_by(subnet_id=subnet_id).all() + + def _get_router_gw_ports_by_network(self, context, network_id): + port_qry = context.session.query(models_v2.Port) + return port_qry.filter_by(network_id=network_id, + device_owner=constants.DEVICE_OWNER_ROUTER_GW).all() + + def _get_subnets_by_network(self, context, network_id): + subnet_qry = context.session.query(models_v2.Subnet) + return subnet_qry.filter_by(network_id=network_id).all() + + def _get_subnets_by_subnetpool(self, context, subnetpool_id): + subnet_qry = context.session.query(models_v2.Subnet) + return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all() + + def _get_all_subnets(self, context): + # NOTE(salvatore-orlando): This query might end up putting + # a lot of stress on the db. Consider adding a cache layer + return context.session.query(models_v2.Subnet).all() + + def _make_network_dict(self, network, fields=None, + process_extensions=True): + res = {'id': network['id'], + 'name': network['name'], + 'tenant_id': network['tenant_id'], + 'admin_state_up': network['admin_state_up'], + 'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU), + 'status': network['status'], + 'shared': network['shared'], + 'subnets': [subnet['id'] + for subnet in network['subnets']]} + # TODO(pritesh): Move vlan_transparent to the extension module. + # vlan_transparent here is only added if the vlantransparent + # extension is enabled. + if ('vlan_transparent' in network and network['vlan_transparent'] != + attributes.ATTR_NOT_SPECIFIED): + res['vlan_transparent'] = network['vlan_transparent'] + # Call auxiliary extend functions, if any + if process_extensions: + self._apply_dict_extend_functions( + attributes.NETWORKS, res, network) + return self._fields(res, fields) + + def _make_subnet_args(self, context, shared, detail, + subnet, subnetpool_id=None): + args = {'tenant_id': detail.tenant_id, + 'id': detail.subnet_id, + 'name': subnet['name'], + 'network_id': subnet['network_id'], + 'ip_version': subnet['ip_version'], + 'cidr': str(detail.subnet_cidr), + 'subnetpool_id': subnetpool_id, + 'enable_dhcp': subnet['enable_dhcp'], + 'gateway_ip': self._gateway_ip_str(subnet, detail.subnet_cidr), + 'shared': shared} + if subnet['ip_version'] == 6 and subnet['enable_dhcp']: + if attributes.is_attr_set(subnet['ipv6_ra_mode']): + args['ipv6_ra_mode'] = subnet['ipv6_ra_mode'] + if attributes.is_attr_set(subnet['ipv6_address_mode']): + args['ipv6_address_mode'] = subnet['ipv6_address_mode'] + return args + + def _gateway_ip_str(self, subnet, cidr_net): + if subnet.get('gateway_ip') is attributes.ATTR_NOT_SPECIFIED: + return str(cidr_net.network + 1) + return subnet.get('gateway_ip') diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 29ffa85b4f2..87fd1d3dac9 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -35,7 +35,7 @@ from neutron.common import ipv6_utils from neutron.common import utils from neutron import context as ctx from neutron.db import api as db_api -from neutron.db import common_db_mixin +from neutron.db import db_base_plugin_common from neutron.db import models_v2 from neutron.db import sqlalchemyutils from neutron.extensions import l3 @@ -70,8 +70,8 @@ def _check_subnet_not_used(context, subnet_id): raise n_exc.SubnetInUse(subnet_id=subnet_id, reason=e) -class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, - common_db_mixin.CommonDbMixin): +class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, + neutron_plugin_base_v2.NeutronPluginBaseV2): """V2 Neutron plugin interface implementation using SQLAlchemy models. Whenever a non-read call happens the plugin will call an event handler @@ -100,98 +100,6 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, event.listen(models_v2.Port.status, 'set', self.nova_notifier.record_port_status_changed) - def _get_network(self, context, id): - try: - network = self._get_by_id(context, models_v2.Network, id) - except exc.NoResultFound: - raise n_exc.NetworkNotFound(net_id=id) - return network - - def _get_subnet(self, context, id): - try: - subnet = self._get_by_id(context, models_v2.Subnet, id) - except exc.NoResultFound: - raise n_exc.SubnetNotFound(subnet_id=id) - return subnet - - def _get_subnetpool(self, context, id): - try: - return self._get_by_id(context, models_v2.SubnetPool, id) - except exc.NoResultFound: - raise n_exc.SubnetPoolNotFound(subnetpool_id=id) - - def _get_all_subnetpools(self, context): - # NOTE(tidwellr): see note in _get_all_subnets() - return context.session.query(models_v2.SubnetPool).all() - - def _get_port(self, context, id): - try: - port = self._get_by_id(context, models_v2.Port, id) - except exc.NoResultFound: - raise n_exc.PortNotFound(port_id=id) - return port - - def _get_dns_by_subnet(self, context, subnet_id): - dns_qry = context.session.query(models_v2.DNSNameServer) - return dns_qry.filter_by(subnet_id=subnet_id).all() - - def _get_route_by_subnet(self, context, subnet_id): - route_qry = context.session.query(models_v2.SubnetRoute) - return route_qry.filter_by(subnet_id=subnet_id).all() - - def _get_router_gw_ports_by_network(self, context, network_id): - port_qry = context.session.query(models_v2.Port) - return port_qry.filter_by(network_id=network_id, - device_owner=constants.DEVICE_OWNER_ROUTER_GW).all() - - def _get_subnets_by_network(self, context, network_id): - subnet_qry = context.session.query(models_v2.Subnet) - return subnet_qry.filter_by(network_id=network_id).all() - - def _get_subnets_by_subnetpool(self, context, subnetpool_id): - subnet_qry = context.session.query(models_v2.Subnet) - return subnet_qry.filter_by(subnetpool_id=subnetpool_id).all() - - def _get_all_subnets(self, context): - # NOTE(salvatore-orlando): This query might end up putting - # a lot of stress on the db. Consider adding a cache layer - return context.session.query(models_v2.Subnet).all() - - @staticmethod - def _generate_mac(): - return utils.get_random_mac(cfg.CONF.base_mac.split(':')) - - @staticmethod - def _delete_ip_allocation(context, network_id, subnet_id, ip_address): - - # Delete the IP address from the IPAllocate table - LOG.debug("Delete allocated IP %(ip_address)s " - "(%(network_id)s/%(subnet_id)s)", - {'ip_address': ip_address, - 'network_id': network_id, - 'subnet_id': subnet_id}) - context.session.query(models_v2.IPAllocation).filter_by( - network_id=network_id, - ip_address=ip_address, - subnet_id=subnet_id).delete() - - @staticmethod - def _store_ip_allocation(context, ip_address, network_id, subnet_id, - port_id): - LOG.debug("Allocated IP %(ip_address)s " - "(%(network_id)s/%(subnet_id)s/%(port_id)s)", - {'ip_address': ip_address, - 'network_id': network_id, - 'subnet_id': subnet_id, - 'port_id': port_id}) - allocated = models_v2.IPAllocation( - network_id=network_id, - port_id=port_id, - ip_address=ip_address, - subnet_id=subnet_id - ) - context.session.add(allocated) - @staticmethod def _generate_ip(context, subnets): try: @@ -341,14 +249,6 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, return True return False - @classmethod - def _check_gateway_in_subnet(cls, cidr, gateway): - """Validate that the gateway is on the subnet.""" - ip = netaddr.IPAddress(gateway) - if ip.version == 4 or (ip.version == 6 and not ip.is_link_local()): - return ipam_utils.check_subnet_ip(cidr, gateway) - return True - @staticmethod def _check_ip_in_allocation_pool(context, subnet_id, gateway_ip, ip_address): @@ -851,92 +751,6 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, if old_ra_mode_set or old_address_mode_set: raise n_exc.InvalidInput(error_message=msg) - def _make_network_dict(self, network, fields=None, - process_extensions=True): - res = {'id': network['id'], - 'name': network['name'], - 'tenant_id': network['tenant_id'], - 'admin_state_up': network['admin_state_up'], - 'mtu': network.get('mtu', constants.DEFAULT_NETWORK_MTU), - 'status': network['status'], - 'shared': network['shared'], - 'subnets': [subnet['id'] - for subnet in network['subnets']]} - # TODO(pritesh): Move vlan_transparent to the extension module. - # vlan_transparent here is only added if the vlantransparent - # extension is enabled. - if ('vlan_transparent' in network and network['vlan_transparent'] != - attributes.ATTR_NOT_SPECIFIED): - res['vlan_transparent'] = network['vlan_transparent'] - # Call auxiliary extend functions, if any - if process_extensions: - self._apply_dict_extend_functions( - attributes.NETWORKS, res, network) - return self._fields(res, fields) - - def _make_subnet_dict(self, subnet, fields=None): - res = {'id': subnet['id'], - 'name': subnet['name'], - 'tenant_id': subnet['tenant_id'], - 'network_id': subnet['network_id'], - 'ip_version': subnet['ip_version'], - 'cidr': subnet['cidr'], - 'subnetpool_id': subnet.get('subnetpool_id'), - 'allocation_pools': [{'start': pool['first_ip'], - 'end': pool['last_ip']} - for pool in subnet['allocation_pools']], - 'gateway_ip': subnet['gateway_ip'], - 'enable_dhcp': subnet['enable_dhcp'], - 'ipv6_ra_mode': subnet['ipv6_ra_mode'], - 'ipv6_address_mode': subnet['ipv6_address_mode'], - 'dns_nameservers': [dns['address'] - for dns in subnet['dns_nameservers']], - 'host_routes': [{'destination': route['destination'], - 'nexthop': route['nexthop']} - for route in subnet['routes']], - 'shared': subnet['shared'] - } - # Call auxiliary extend functions, if any - self._apply_dict_extend_functions(attributes.SUBNETS, res, subnet) - return self._fields(res, fields) - - def _make_subnetpool_dict(self, subnetpool, fields=None): - default_prefixlen = str(subnetpool['default_prefixlen']) - min_prefixlen = str(subnetpool['min_prefixlen']) - max_prefixlen = str(subnetpool['max_prefixlen']) - res = {'id': subnetpool['id'], - 'name': subnetpool['name'], - 'tenant_id': subnetpool['tenant_id'], - 'default_prefixlen': default_prefixlen, - 'min_prefixlen': min_prefixlen, - 'max_prefixlen': max_prefixlen, - 'shared': subnetpool['shared'], - 'prefixes': [prefix['cidr'] - for prefix in subnetpool['prefixes']], - 'ip_version': subnetpool['ip_version'], - 'default_quota': subnetpool['default_quota']} - return self._fields(res, fields) - - def _make_port_dict(self, port, fields=None, - process_extensions=True): - res = {"id": port["id"], - 'name': port['name'], - "network_id": port["network_id"], - 'tenant_id': port['tenant_id'], - "mac_address": port["mac_address"], - "admin_state_up": port["admin_state_up"], - "status": port["status"], - "fixed_ips": [{'subnet_id': ip["subnet_id"], - 'ip_address': ip["ip_address"]} - for ip in port["fixed_ips"]], - "device_id": port["device_id"], - "device_owner": port["device_owner"]} - # Call auxiliary extend functions, if any - if process_extensions: - self._apply_dict_extend_functions( - attributes.PORTS, res, port) - return self._fields(res, fields) - def _create_bulk(self, resource, context, request_items): objects = [] collection = "%ss" % resource @@ -1240,25 +1054,6 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, return subnet - def _make_subnet_args(self, context, shared, detail, - subnet, subnetpool_id=None): - args = {'tenant_id': detail.tenant_id, - 'id': detail.subnet_id, - 'name': subnet['name'], - 'network_id': subnet['network_id'], - 'ip_version': subnet['ip_version'], - 'cidr': str(detail.subnet_cidr), - 'subnetpool_id': subnetpool_id, - 'enable_dhcp': subnet['enable_dhcp'], - 'gateway_ip': self._gateway_ip_str(subnet, detail.subnet_cidr), - 'shared': shared} - if subnet['ip_version'] == 6 and subnet['enable_dhcp']: - if attributes.is_attr_set(subnet['ipv6_ra_mode']): - args['ipv6_ra_mode'] = subnet['ipv6_ra_mode'] - if attributes.is_attr_set(subnet['ipv6_address_mode']): - args['ipv6_address_mode'] = subnet['ipv6_address_mode'] - return args - def _make_subnet_request(self, tenant_id, subnet, subnetpool): cidr = subnet.get('cidr') subnet_id = subnet.get('id', uuidutils.generate_uuid()) @@ -1279,11 +1074,6 @@ class NeutronDbPluginV2(neutron_plugin_base_v2.NeutronPluginBaseV2, subnet_id, cidr) - def _gateway_ip_str(self, subnet, cidr_net): - if subnet.get('gateway_ip') is attributes.ATTR_NOT_SPECIFIED: - return str(cidr_net.network + 1) - return subnet.get('gateway_ip') - @oslo_db_api.wrap_db_retry(max_retries=db_api.MAX_RETRIES, retry_on_request=True, retry_on_deadlock=True) From 92b946e90b6cdd90deba2097196970afaaaf8dab Mon Sep 17 00:00:00 2001 From: Carl Baldwin Date: Wed, 27 May 2015 20:12:27 +0000 Subject: [PATCH 115/292] Use a single method to remove an address with its conntrack state I just noticed a pattern and I thought I'd throw this up for discussion. It has occurred to me that this addition sort of breaks the ip_lib paradigm of wrapping ip commands without any additional useful abstraction. Any better ideas? Change-Id: Ibd34bf4a721c153aca916e294e58adb4a28379e4 --- neutron/agent/l3/router_info.py | 3 +- neutron/agent/linux/interface.py | 39 +------------------ neutron/agent/linux/ip_lib.py | 35 +++++++++++++++++ .../tests/unit/agent/l3/test_legacy_router.py | 5 +-- .../tests/unit/agent/linux/test_interface.py | 10 +++-- 5 files changed, 44 insertions(+), 48 deletions(-) diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index c1336355e6e..8a5695cc08c 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -210,8 +210,7 @@ class RouterInfo(object): raise NotImplementedError() def remove_floating_ip(self, device, ip_cidr): - device.addr.delete(ip_cidr) - self.driver.delete_conntrack_state(namespace=self.ns_name, ip=ip_cidr) + device.delete_addr_and_conntrack_state(ip_cidr) def get_router_cidrs(self, device): return set([addr['cidr'] for addr in device.addr.list()]) diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py index 435791b9313..ed1e91e98f7 100644 --- a/neutron/agent/linux/interface.py +++ b/neutron/agent/linux/interface.py @@ -113,8 +113,7 @@ class LinuxInterfaceDriver(object): # clean up any old addresses for ip_cidr in previous: if ip_cidr not in preserve_ips: - device.addr.delete(ip_cidr) - self.delete_conntrack_state(namespace=namespace, ip=ip_cidr) + device.delete_addr_and_conntrack_state(ip_cidr) for gateway_ip in gateway_ips or []: device.route.add_gateway(gateway_ip) @@ -131,42 +130,6 @@ class LinuxInterfaceDriver(object): for route in existing_onlink_routes - new_onlink_routes: device.route.delete_onlink_route(route) - def delete_conntrack_state(self, namespace, ip): - """Delete conntrack state associated with an IP address. - - This terminates any active connections through an IP. Call this soon - after removing the IP address from an interface so that new connections - cannot be created before the IP address is gone. - - namespace: the name of the namespace where the IP has been configured - ip: the IP address for which state should be removed. This can be - passed as a string with or without /NN. A netaddr.IPAddress or - netaddr.Network representing the IP address can also be passed. - """ - ip_str = str(netaddr.IPNetwork(ip).ip) - ip_wrapper = ip_lib.IPWrapper(namespace=namespace) - - # Delete conntrack state for ingress traffic - # If 0 flow entries have been deleted - # conntrack -D will return 1 - try: - ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str], - check_exit_code=True, - extra_ok_codes=[1]) - - except RuntimeError: - LOG.exception(_LE("Failed deleting ingress connection state of" - " floatingip %s"), ip_str) - - # Delete conntrack state for egress traffic - try: - ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str], - check_exit_code=True, - extra_ok_codes=[1]) - except RuntimeError: - LOG.exception(_LE("Failed deleting egress connection state of" - " floatingip %s"), ip_str) - def check_bridge_exists(self, bridge): if not ip_lib.device_exists(bridge): raise exceptions.BridgeDoesNotExist(bridge=bridge) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index a22b46b4f5a..586f70bb9b3 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -211,6 +211,41 @@ class IPDevice(SubProcessBase): def __str__(self): return self.name + def delete_addr_and_conntrack_state(self, cidr): + """Delete an address along with its conntrack state + + This terminates any active connections through an IP. + + cidr: the IP address for which state should be removed. This can be + passed as a string with or without /NN. A netaddr.IPAddress or + netaddr.Network representing the IP address can also be passed. + """ + self.addr.delete(cidr) + + ip_str = str(netaddr.IPNetwork(cidr).ip) + ip_wrapper = IPWrapper(namespace=self.namespace) + + # Delete conntrack state for ingress traffic + # If 0 flow entries have been deleted + # conntrack -D will return 1 + try: + ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str], + check_exit_code=True, + extra_ok_codes=[1]) + + except RuntimeError: + LOG.exception(_LE("Failed deleting ingress connection state of" + " floatingip %s"), ip_str) + + # Delete conntrack state for egress traffic + try: + ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str], + check_exit_code=True, + extra_ok_codes=[1]) + except RuntimeError: + LOG.exception(_LE("Failed deleting egress connection state of" + " floatingip %s"), ip_str) + class IpCommandBase(object): COMMAND = '' diff --git a/neutron/tests/unit/agent/l3/test_legacy_router.py b/neutron/tests/unit/agent/l3/test_legacy_router.py index 296d93f80d9..2bf4f303515 100644 --- a/neutron/tests/unit/agent/l3/test_legacy_router.py +++ b/neutron/tests/unit/agent/l3/test_legacy_router.py @@ -46,10 +46,7 @@ class TestBasicRouterOperations(BasicRouterTestCaseFramework): ri.remove_floating_ip(device, cidr) - device.addr.delete.assert_called_once_with(cidr) - self.driver.delete_conntrack_state.assert_called_once_with( - ip=cidr, - namespace=ri.ns_name) + device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) @mock.patch.object(ip_lib, 'send_gratuitous_arp') diff --git a/neutron/tests/unit/agent/linux/test_interface.py b/neutron/tests/unit/agent/linux/test_interface.py index ad08444c157..8bbc210dd9b 100644 --- a/neutron/tests/unit/agent/linux/test_interface.py +++ b/neutron/tests/unit/agent/linux/test_interface.py @@ -94,7 +94,7 @@ class TestABCDriver(TestBase): [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24'), - mock.call().addr.delete('172.16.77.240/24'), + mock.call().delete_addr_and_conntrack_state('172.16.77.240/24'), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')]) @@ -127,6 +127,7 @@ class TestABCDriver(TestBase): mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24')]) self.assertFalse(self.ip_dev().addr.delete.called) + self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) def _test_l3_init_with_ipv6(self, include_gw_ip): addresses = [dict(scope='global', @@ -147,7 +148,8 @@ class TestABCDriver(TestBase): [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('2001:db8:a::124/64'), - mock.call().addr.delete('2001:db8:a::123/64')]) + mock.call().delete_addr_and_conntrack_state( + '2001:db8:a::123/64')]) if include_gw_ip: expected_calls += ( [mock.call().route.add_gateway('2001:db8:a::1')]) @@ -180,8 +182,8 @@ class TestABCDriver(TestBase): mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24'), mock.call().addr.add('2001:db8:a::124/64'), - mock.call().addr.delete('172.16.77.240/24'), - mock.call().addr.delete('2001:db8:a::123/64'), + mock.call().delete_addr_and_conntrack_state('172.16.77.240/24'), + mock.call().delete_addr_and_conntrack_state('2001:db8:a::123/64'), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')], From 75c3a8dee4a7f643d7654e2ea4641fd1bd18af62 Mon Sep 17 00:00:00 2001 From: Carl Baldwin Date: Wed, 3 Jun 2015 16:56:23 +0000 Subject: [PATCH 116/292] Enhance utils.ensure_dir to be resilient to concurrent workers In rare cases, concurrent workers may attempt to ensure a directory exists. One may successfully create the directory while the other gets an oserror that it already exists. This patch detects the problem and returns successfully in both cases. Change-Id: I224be69168ede8a496a5f7d59b04b722f4de7192 --- neutron/agent/linux/utils.py | 9 ++++++++- neutron/tests/fullstack/fullstack_fixtures.py | 10 +--------- neutron/tests/unit/agent/linux/test_utils.py | 9 +++++++++ 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/neutron/agent/linux/utils.py b/neutron/agent/linux/utils.py index c38ed138489..dc22a0e069b 100644 --- a/neutron/agent/linux/utils.py +++ b/neutron/agent/linux/utils.py @@ -13,6 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. +import errno import fcntl import glob import grp @@ -191,7 +192,13 @@ def find_child_pids(pid): def ensure_dir(dir_path): """Ensure a directory with 755 permissions mode.""" if not os.path.isdir(dir_path): - os.makedirs(dir_path, 0o755) + try: + os.makedirs(dir_path, 0o755) + except OSError as e: + # Make sure that the error was that the directory was created + # by a different (concurrent) worker. If not, raise the error. + if e.errno != errno.EEXIST: + raise def _get_conf_base(cfg_root, uuid, ensure_conf_dir): diff --git a/neutron/tests/fullstack/fullstack_fixtures.py b/neutron/tests/fullstack/fullstack_fixtures.py index f714273aeea..e1959f86771 100644 --- a/neutron/tests/fullstack/fullstack_fixtures.py +++ b/neutron/tests/fullstack/fullstack_fixtures.py @@ -13,7 +13,6 @@ # under the License. from distutils import spawn -import errno import functools import os @@ -51,14 +50,7 @@ class ProcessFixture(fixtures.Fixture): def start(self): fmt = self.process_name + "--%Y-%m-%d--%H%M%S.log" log_dir = os.path.join(DEFAULT_LOG_DIR, self.test_name) - if not os.path.exists(log_dir): - try: - os.makedirs(log_dir) - except OSError as e: - # Make sure that the error was that the directory was created - # by a different (concurrent) worker. If not, raise the error. - if e.errno != errno.EEXIST: - raise + utils.ensure_dir(log_dir) cmd = [spawn.find_executable(self.exec_name), '--log-dir', log_dir, diff --git a/neutron/tests/unit/agent/linux/test_utils.py b/neutron/tests/unit/agent/linux/test_utils.py index 512f1bd7788..aa510f96de7 100644 --- a/neutron/tests/unit/agent/linux/test_utils.py +++ b/neutron/tests/unit/agent/linux/test_utils.py @@ -12,6 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. +import errno import mock import socket import testtools @@ -281,6 +282,14 @@ class TestBaseOSUtils(base.BaseTestCase): getegid.assert_called_once_with() getgrgid.assert_called_once_with(self.EGID) + @mock.patch('os.makedirs') + @mock.patch('os.path.exists', return_value=False) + def test_ensure_dir_no_fail_if_exists(self, path_exists, makedirs): + error = OSError() + error.errno = errno.EEXIST + makedirs.side_effect = error + utils.ensure_dir("/etc/create/concurrently") + class TestUnixDomainHttpConnection(base.BaseTestCase): def test_connect(self): From b0b9e8bcaf3b2b9f33f8d3cd0bcdc21e8b7a1a87 Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Wed, 3 Jun 2015 13:50:57 -0400 Subject: [PATCH 117/292] Remove reference to non-existent fullstack fixture EnvironmentFixture doesn't actually exist, and environment should be a mandatory parameter. Change-Id: I5598e176f297bcfa3b2cab188b4b446ce18d54ab --- neutron/tests/fullstack/base.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/neutron/tests/fullstack/base.py b/neutron/tests/fullstack/base.py index 61b50d3a185..9fed9f3e621 100644 --- a/neutron/tests/fullstack/base.py +++ b/neutron/tests/fullstack/base.py @@ -17,24 +17,21 @@ from oslo_db.sqlalchemy import test_base from neutron.db.migration.models import head # noqa from neutron.db import model_base -from neutron.tests.fullstack import fullstack_fixtures as f_fixtures class BaseFullStackTestCase(test_base.MySQLOpportunisticTestCase): """Base test class for full-stack tests.""" - def __init__(self, environment=None, *args, **kwargs): + def __init__(self, environment, *args, **kwargs): super(BaseFullStackTestCase, self).__init__(*args, **kwargs) - self.environment = (environment if environment - else f_fixtures.EnvironmentFixture()) + self.environment = environment def setUp(self): super(BaseFullStackTestCase, self).setUp() self.create_db_tables() - if self.environment: - self.environment.test_name = self.get_name() - self.useFixture(self.environment) + self.environment.test_name = self.get_name() + self.useFixture(self.environment) self.client = self.environment.neutron_server.client From d00bd978f849df6d8f6f40dbc75b5b3fbdd610ea Mon Sep 17 00:00:00 2001 From: Angus Lees Date: Thu, 4 Jun 2015 12:55:02 +1000 Subject: [PATCH 118/292] pylint: enable `duplicate-key` check This check attempts to detect dictionary literals with duplicate keys. The rest of the Neutron tree has already had the few instances of this cleaned[1] or moved into external vendor repos. Enabling the pylint check will stop future occurrences. [1] I29cd2b843a7905986de13a1ecfba0cb5797ccaf8 (Original patch I1aa221d2019853f905f2b8421dd45b0a3102baf0 by zhiyuan_cai) Change-Id: If4fed9714cd7fa586845f21f8f56dde2645cc5e0 Co-Authored-By: zhiyuan_cai --- .pylintrc | 1 - 1 file changed, 1 deletion(-) diff --git a/.pylintrc b/.pylintrc index f2e4a0db2f2..5037da9166c 100644 --- a/.pylintrc +++ b/.pylintrc @@ -31,7 +31,6 @@ disable= broad-except, dangerous-default-value, deprecated-lambda, - duplicate-key, expression-not-assigned, fixme, global-statement, From 52ac1c30086738ad798bdafeede2172ee23897e6 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 3 Jun 2015 22:21:52 +0900 Subject: [PATCH 119/292] test_db_base_plugin_v2: Don't assume the order of fixed_ips Fixes test_ip_allocation_for_ipv6_2_subnet_slaac_mode failures on my environment. Change-Id: I18a9a177a60c364d79943a8d68b46fde3b9f5d74 --- neutron/tests/unit/db/test_db_base_plugin_v2.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index cd381d7874b..ff566b3d383 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -1868,7 +1868,6 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s ip_version=6, ipv6_ra_mode=constants.IPV6_SLAAC) port = self._make_port(self.fmt, network['network']['id']) - self.assertEqual(len(port['port']['fixed_ips']), 2) port_mac = port['port']['mac_address'] cidr_1 = v6_subnet_1['subnet']['cidr'] cidr_2 = v6_subnet_2['subnet']['cidr'] @@ -1876,10 +1875,9 @@ fixed_ips=ip_address%%3D%s&fixed_ips=ip_address%%3D%s&fixed_ips=subnet_id%%3D%s port_mac)) eui_addr_2 = str(ipv6_utils.get_ipv6_addr_by_EUI64(cidr_2, port_mac)) - self.assertEqual(port['port']['fixed_ips'][0]['ip_address'], - eui_addr_1) - self.assertEqual(port['port']['fixed_ips'][1]['ip_address'], - eui_addr_2) + self.assertEqual({eui_addr_1, eui_addr_2}, + {fixed_ip['ip_address'] for fixed_ip in + port['port']['fixed_ips']}) def test_range_allocation(self): with self.subnet(gateway_ip='10.0.0.3', From 9cac5c3a9f5238fde7fd4e1c01644cdfe48785d2 Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Mon, 1 Jun 2015 14:36:17 -0400 Subject: [PATCH 120/292] Decompose the NCS ML2 Mechanism Driver The last of the Cisco drivers to decompose. Closes-bug: #1416713 Co-Authored-By: Nikolay Fedotov Change-Id: Icd2b358fb0db3d859ee287225ab8eeb10d7da871 --- doc/source/devref/contribute.rst | 11 -- doc/source/devref/sub_projects.rst | 13 ++ etc/neutron/plugins/ml2/ml2_conf_ncs.ini | 28 --- .../plugins/ml2/drivers/cisco/ncs/driver.py | 171 +----------------- .../plugins/ml2/drivers/cisco/ncs/__init__.py | 0 .../ml2/drivers/cisco/ncs/test_driver.py | 45 ----- setup.cfg | 1 - 7 files changed, 18 insertions(+), 251 deletions(-) delete mode 100644 etc/neutron/plugins/ml2/ml2_conf_ncs.ini delete mode 100644 neutron/tests/unit/plugins/ml2/drivers/cisco/ncs/__init__.py delete mode 100644 neutron/tests/unit/plugins/ml2/drivers/cisco/ncs/test_driver.py diff --git a/doc/source/devref/contribute.rst b/doc/source/devref/contribute.rst index c966af3aee0..932474e7500 100644 --- a/doc/source/devref/contribute.rst +++ b/doc/source/devref/contribute.rst @@ -402,14 +402,3 @@ will be removed. The following aspects are captured: +===============================+=======================+===========+==================+=========+==============+ | freescale-nscs | ml2,fw | no | no | [D] | | +-------------------------------+-----------------------+-----------+------------------+---------+--------------+ -| networking-cisco_ | core,ml2,l3,fw,vpn | yes | yes | [B] | | -+-------------------------------+-----------------------+-----------+------------------+---------+--------------+ - -.. _networking-cisco: - -Cisco ------ - -* Git: https://git.openstack.org/stackforge/networking-cisco -* Launchpad: https://launchpad.net/networking-cisco -* PyPI: https://pypi.python.org/pypi/networking-cisco diff --git a/doc/source/devref/sub_projects.rst b/doc/source/devref/sub_projects.rst index 24336b0fe6b..3a32d7a2614 100644 --- a/doc/source/devref/sub_projects.rst +++ b/doc/source/devref/sub_projects.rst @@ -92,6 +92,8 @@ capabilities of Neutron, the Neutron API, or a combination of both. +-------------------------------+-----------------------+ | networking-brocade_ | ml2,l3 | +-------------------------------+-----------------------+ +| networking-cisco_ | core,ml2,l3,fw,vpn | ++-------------------------------+-----------------------+ | networking-edge-vpn_ | vpn | +-------------------------------+-----------------------+ | networking-hyperv_ | ml2 | @@ -185,6 +187,17 @@ Brocade * Launchpad: https://launchpad.net/networking-brocade * PyPI: https://pypi.python.org/pypi/networking-brocade +.. _networking-cisco: + +Cisco +----- + +* Git: https://github.com/stackforge/networking-cisco +* Launchpad: https://launchpad.net/networking-cisco +* PyPI: https://pypi.python.org/pypi/networking-cisco + +.. _dragonflow: + DragonFlow ---------- diff --git a/etc/neutron/plugins/ml2/ml2_conf_ncs.ini b/etc/neutron/plugins/ml2/ml2_conf_ncs.ini deleted file mode 100644 index dbbfcbd2860..00000000000 --- a/etc/neutron/plugins/ml2/ml2_conf_ncs.ini +++ /dev/null @@ -1,28 +0,0 @@ -# Defines configuration options specific to the Tail-f NCS Mechanism Driver - -[ml2_ncs] -# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack -# subtree. -# If this is not set then no HTTP requests will be made. -# -# url = -# Example: url = http://ncs/api/running/services/openstack - -# (StrOpt) Username for HTTP basic authentication to NCS. -# This is an optional parameter. If unspecified then no authentication is used. -# -# username = -# Example: username = admin - -# (StrOpt) Password for HTTP basic authentication to NCS. -# This is an optional parameter. If unspecified then no authentication is used. -# -# password = -# Example: password = admin - -# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion. -# This is an optional parameter, default value is 10 seconds. -# -# timeout = -# Example: timeout = 15 - diff --git a/neutron/plugins/ml2/drivers/cisco/ncs/driver.py b/neutron/plugins/ml2/drivers/cisco/ncs/driver.py index df79db083d3..6f8b8a6c7c0 100644 --- a/neutron/plugins/ml2/drivers/cisco/ncs/driver.py +++ b/neutron/plugins/ml2/drivers/cisco/ncs/driver.py @@ -13,171 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. -import re +""" +ML2 Mechanism Driver for Cisco NCS. +""" -from oslo_config import cfg -from oslo_log import log -from oslo_serialization import jsonutils -import requests -import six +from networking_cisco.plugins.ml2.drivers.cisco.ncs import driver as cisco -from neutron.plugins.ml2 import driver_api as api - -LOG = log.getLogger(__name__) - -ncs_opts = [ - cfg.StrOpt('url', - help=_("HTTP URL of Tail-f NCS REST interface.")), - cfg.StrOpt('username', - help=_("HTTP username for authentication")), - cfg.StrOpt('password', secret=True, - help=_("HTTP password for authentication")), - cfg.IntOpt('timeout', default=10, - help=_("HTTP timeout in seconds.")) -] - -cfg.CONF.register_opts(ncs_opts, "ml2_ncs") - - -class NCSMechanismDriver(api.MechanismDriver): - - """Mechanism Driver for Tail-f Network Control System (NCS). - - This driver makes portions of the Neutron database available for - service provisioning in NCS. For example, NCS can use this - information to provision physical switches and routers in response - to OpenStack configuration changes. - - The database is replicated from Neutron to NCS using HTTP and JSON. - - The driver has two states: out-of-sync (initially) and in-sync. - - In the out-of-sync state each driver event triggers an attempt - to synchronize the complete database. On success the driver - transitions to the in-sync state. - - In the in-sync state each driver event triggers synchronization - of one network or port. On success the driver stays in-sync and - on failure it transitions to the out-of-sync state. - """ - out_of_sync = True - - def initialize(self): - self.url = cfg.CONF.ml2_ncs.url - self.timeout = cfg.CONF.ml2_ncs.timeout - self.username = cfg.CONF.ml2_ncs.username - self.password = cfg.CONF.ml2_ncs.password - - # Postcommit hooks are used to trigger synchronization. - - def create_network_postcommit(self, context): - self.synchronize('create', 'network', context) - - def update_network_postcommit(self, context): - self.synchronize('update', 'network', context) - - def delete_network_postcommit(self, context): - self.synchronize('delete', 'network', context) - - def create_subnet_postcommit(self, context): - self.synchronize('create', 'subnet', context) - - def update_subnet_postcommit(self, context): - self.synchronize('update', 'subnet', context) - - def delete_subnet_postcommit(self, context): - self.synchronize('delete', 'subnet', context) - - def create_port_postcommit(self, context): - self.synchronize('create', 'port', context) - - def update_port_postcommit(self, context): - self.synchronize('update', 'port', context) - - def delete_port_postcommit(self, context): - self.synchronize('delete', 'port', context) - - def synchronize(self, operation, object_type, context): - """Synchronize NCS with Neutron following a configuration change.""" - if self.out_of_sync: - self.sync_full(context) - else: - self.sync_object(operation, object_type, context) - - def sync_full(self, context): - """Resync the entire database to NCS. - Transition to the in-sync state on success. - """ - dbcontext = context._plugin_context - networks = context._plugin.get_networks(dbcontext) - subnets = context._plugin.get_subnets(dbcontext) - ports = context._plugin.get_ports(dbcontext) - for port in ports: - self.add_security_groups(context, dbcontext, port) - json = {'openstack': {'network': networks, - 'subnet': subnets, - 'port': ports}} - self.sendjson('put', '', json) - self.out_of_sync = False - - def sync_object(self, operation, object_type, context): - """Synchronize the single modified record to NCS. - Transition to the out-of-sync state on failure. - """ - self.out_of_sync = True - dbcontext = context._plugin_context - id = context.current['id'] - urlpath = object_type + '/' + id - if operation == 'delete': - self.sendjson('delete', urlpath, None) - else: - assert operation == 'create' or operation == 'update' - if object_type == 'network': - network = context._plugin.get_network(dbcontext, id) - self.sendjson('put', urlpath, {'network': network}) - elif object_type == 'subnet': - subnet = context._plugin.get_subnet(dbcontext, id) - self.sendjson('put', urlpath, {'subnet': subnet}) - else: - assert object_type == 'port' - port = context._plugin.get_port(dbcontext, id) - self.add_security_groups(context, dbcontext, port) - self.sendjson('put', urlpath, {'port': port}) - self.out_of_sync = False - - def add_security_groups(self, context, dbcontext, port): - """Populate the 'security_groups' field with entire records.""" - groups = [context._plugin.get_security_group(dbcontext, sg) - for sg in port['security_groups']] - port['security_groups'] = groups - - def sendjson(self, method, urlpath, obj): - obj = self.escape_keys(obj) - headers = {'Content-Type': 'application/vnd.yang.data+json'} - if obj is None: - data = None - else: - data = jsonutils.dumps(obj, indent=2) - auth = None - if self.username and self.password: - auth = (self.username, self.password) - if self.url: - url = '/'.join([self.url, urlpath]) - r = requests.request(method, url=url, - headers=headers, data=data, - auth=auth, timeout=self.timeout) - r.raise_for_status() - - def escape_keys(self, obj): - """Escape JSON keys to be NCS compatible. - NCS does not allow period (.) or colon (:) characters. - """ - if isinstance(obj, dict): - obj = dict((self.escape(k), self.escape_keys(v)) - for k, v in six.iteritems(obj)) - if isinstance(obj, list): - obj = [self.escape_keys(x) for x in obj] - return obj - - def escape(self, string): - return re.sub('[:._]', '-', string) +NCSMechanismDriver = cisco.NCSMechanismDriver diff --git a/neutron/tests/unit/plugins/ml2/drivers/cisco/ncs/__init__.py b/neutron/tests/unit/plugins/ml2/drivers/cisco/ncs/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/neutron/tests/unit/plugins/ml2/drivers/cisco/ncs/test_driver.py b/neutron/tests/unit/plugins/ml2/drivers/cisco/ncs/test_driver.py deleted file mode 100644 index 5678925c516..00000000000 --- a/neutron/tests/unit/plugins/ml2/drivers/cisco/ncs/test_driver.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from neutron.plugins.ml2.drivers.cisco.ncs import driver -from neutron.tests.unit.plugins.ml2 import test_plugin - - -class NCSTestCase(test_plugin.Ml2PluginV2TestCase): - _mechanism_drivers = ['logger', 'ncs'] - - def setUp(self): - # Enable the test mechanism driver to ensure that - # we can successfully call through to all mechanism - # driver apis. - super(NCSTestCase, self).setUp() - self.port_create_status = 'DOWN' - driver.NCSMechanismDriver.sendjson = self.check_sendjson - - def check_sendjson(self, method, urlpath, obj): - # Confirm fix for bug #1224981 - self.assertFalse(urlpath.startswith("http://")) - - -class NCSMechanismTestBasicGet(test_plugin.TestMl2BasicGet, NCSTestCase): - pass - - -class NCSMechanismTestNetworksV2(test_plugin.TestMl2NetworksV2, NCSTestCase): - pass - - -class NCSMechanismTestPortsV2(test_plugin.TestMl2PortsV2, NCSTestCase): - pass diff --git a/setup.cfg b/setup.cfg index 593760c5c67..6455928f993 100755 --- a/setup.cfg +++ b/setup.cfg @@ -68,7 +68,6 @@ data_files = etc/neutron/plugins/ml2/ml2_conf_brocade.ini etc/neutron/plugins/ml2/ml2_conf_brocade_fi_ni.ini etc/neutron/plugins/ml2/ml2_conf_cisco.ini - etc/neutron/plugins/ml2/ml2_conf_ncs.ini etc/neutron/plugins/ml2/ml2_conf_ofa.ini etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini etc/neutron/plugins/ml2/ml2_conf_sriov.ini From 770859d25b9d95b28794832d673ff6127a9f936f Mon Sep 17 00:00:00 2001 From: Dmitry Ratushnyy Date: Mon, 20 Apr 2015 16:22:27 +0300 Subject: [PATCH 121/292] Add more API tests for port-security extension: Test create port with specific value of port_security_enabled Test create secure port with security_group attached Test update port with port_security_enabled True\False and with or without security groups Test deleting port with port_security_enabled Change-Id: Id71f5451dc17f374feff1a3bdb35fb9ec42f0fa1 Depends-On: Ia27881a34ff99cad34c84764d2bf8a6cdf77af9c Depends-On: Ie0ec090e8fdce7dbdbce14ef47f38e8e57f262d4 --- ...st_extension_driver_port_security_admin.py | 32 +++++ neutron/tests/api/base.py | 4 +- .../test_extension_driver_port_security.py | 128 +++++++++++++----- test-requirements.txt | 1 + 4 files changed, 127 insertions(+), 38 deletions(-) create mode 100644 neutron/tests/api/admin/test_extension_driver_port_security_admin.py diff --git a/neutron/tests/api/admin/test_extension_driver_port_security_admin.py b/neutron/tests/api/admin/test_extension_driver_port_security_admin.py new file mode 100644 index 00000000000..2e28371d2ff --- /dev/null +++ b/neutron/tests/api/admin/test_extension_driver_port_security_admin.py @@ -0,0 +1,32 @@ +# Copyright 2015 Cisco Systems, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from neutron.tests.api import base +from neutron.tests.api import base_security_groups as base_security +from neutron.tests.tempest import test +from tempest_lib import exceptions as lib_exc + + +class PortSecurityAdminTests(base_security.BaseSecGroupTest, + base.BaseAdminNetworkTest): + + @test.attr(type=['negative', 'smoke']) + @test.idempotent_id('d39a96e2-2dea-4feb-8093-e7ac991ce6f8') + def test_create_port_security_false_on_shared_network(self): + network = self.create_shared_network() + self.assertTrue(network['shared']) + self.create_subnet(network, client=self.admin_client) + self.assertRaises(lib_exc.Forbidden, self.create_port, + network, port_security_enabled=False) diff --git a/neutron/tests/api/base.py b/neutron/tests/api/base.py index e0c7386611b..25ae565e580 100644 --- a/neutron/tests/api/base.py +++ b/neutron/tests/api/base.py @@ -188,11 +188,11 @@ class BaseNetworkTest(neutron.tests.tempest.test.BaseTestCase): pass @classmethod - def create_network(cls, network_name=None): + def create_network(cls, network_name=None, **kwargs): """Wrapper utility that returns a test network.""" network_name = network_name or data_utils.rand_name('test-network-') - body = cls.client.create_network(name=network_name) + body = cls.client.create_network(name=network_name, **kwargs) network = body['network'] cls.networks.append(network) return network diff --git a/neutron/tests/api/test_extension_driver_port_security.py b/neutron/tests/api/test_extension_driver_port_security.py index 10ccb224dbb..6e5d32eb593 100644 --- a/neutron/tests/api/test_extension_driver_port_security.py +++ b/neutron/tests/api/test_extension_driver_port_security.py @@ -13,34 +13,22 @@ # License for the specific language governing permissions and limitations # under the License. -from tempest_lib.common.utils import data_utils -from tempest_lib import exceptions as lib_exc +import ddt -from neutron.tests.api import base_security_groups as base +from neutron.tests.api import base +from neutron.tests.api import base_security_groups as base_security from neutron.tests.tempest import config from neutron.tests.tempest import test - +from tempest_lib import exceptions as lib_exc CONF = config.CONF FAKE_IP = '10.0.0.1' FAKE_MAC = '00:25:64:e8:19:dd' -class PortSecTest(base.BaseSecGroupTest): - - @classmethod - def resource_setup(cls): - super(PortSecTest, cls).resource_setup() - - def _create_network(self, network_name=None, port_security_enabled=True): - """Wrapper utility that returns a test network.""" - network_name = network_name or data_utils.rand_name('test-network') - - body = self.client.create_network( - name=network_name, port_security_enabled=port_security_enabled) - network = body['network'] - self.networks.append(network) - return network +@ddt.ddt +class PortSecTest(base_security.BaseSecGroupTest, + base.BaseNetworkTest): @test.attr(type='smoke') @test.idempotent_id('7c338ddf-e64e-4118-bd33-e49a1f2f1495') @@ -49,29 +37,41 @@ class PortSecTest(base.BaseSecGroupTest): # Default port-sec value is True, and the attr of the port will inherit # from the port-sec of the network when it not be specified in API network = self.create_network() - self.create_subnet(network) self.assertTrue(network['port_security_enabled']) + self.create_subnet(network) port = self.create_port(network) self.assertTrue(port['port_security_enabled']) @test.attr(type='smoke') @test.idempotent_id('e60eafd2-31de-4c38-8106-55447d033b57') @test.requires_ext(extension='port-security', service='network') - def test_port_sec_specific_value(self): - network = self.create_network() - - self.assertTrue(network['port_security_enabled']) + @ddt.unpack + @ddt.data({'port_sec_net': False, 'port_sec_port': True, 'expected': True}, + {'port_sec_net': True, 'port_sec_port': False, + 'expected': False}) + def test_port_sec_specific_value(self, port_sec_net, port_sec_port, + expected): + network = self.create_network(port_security_enabled=port_sec_net) self.create_subnet(network) - port = self.create_port(network, port_security_enabled=False) - self.assertFalse(port['port_security_enabled']) + port = self.create_port(network, port_security_enabled=port_sec_port) + self.assertEqual(network['port_security_enabled'], port_sec_net) + self.assertEqual(port['port_security_enabled'], expected) - # Create a network with port-sec set to False - network = self._create_network(port_security_enabled=False) - - self.assertFalse(network['port_security_enabled']) + @test.attr(type=['smoke']) + @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') + @test.requires_ext(extension='port-security', service='network') + def test_create_port_sec_with_security_group(self): + network = self.create_network(port_security_enabled=True) self.create_subnet(network) - port = self.create_port(network, port_security_enabled=True) + + port = self.create_port(network, security_groups=[]) self.assertTrue(port['port_security_enabled']) + self.client.delete_port(port['id']) + + port = self.create_port(network, security_groups=[], + port_security_enabled=False) + self.assertFalse(port['port_security_enabled']) + self.assertEmpty(port['security_groups']) @test.attr(type=['negative', 'smoke']) @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') @@ -79,16 +79,72 @@ class PortSecTest(base.BaseSecGroupTest): def test_port_sec_update_port_failed(self): network = self.create_network() self.create_subnet(network) + + sec_group_body, sec_group_name = self._create_security_group() port = self.create_port(network) # Exception when set port-sec to False with sec-group defined - self.assertRaises(lib_exc.Conflict, - self.update_port, port, port_security_enabled=False) + self.assertRaises(lib_exc.Conflict, self.update_port, port, + port_security_enabled=False) - updated_port = self.update_port( - port, security_groups=[], port_security_enabled=False) - self.assertFalse(updated_port['port_security_enabled']) + port = self.update_port(port, security_groups=[], + port_security_enabled=False) + self.assertEmpty(port['security_groups']) + self.assertFalse(port['port_security_enabled']) + port = self.update_port( + port, security_groups=[sec_group_body['security_group']['id']], + port_security_enabled=True) + self.assertNotEmpty(port['security_groups']) + self.assertTrue(port['port_security_enabled']) + + # Remove security group from port before deletion on resource_cleanup + self.update_port(port, security_groups=[]) + + @test.attr(type=['smoke']) + @test.idempotent_id('05642059-1bfc-4581-9bc9-aaa5db08dd60') + @test.requires_ext(extension='port-security', service='network') + def test_port_sec_update_pass(self): + network = self.create_network() + self.create_subnet(network) + sec_group, _ = self._create_security_group() + sec_group_id = sec_group['security_group']['id'] + port = self.create_port(network, security_groups=[sec_group_id], + port_security_enabled=True) + + self.assertNotEmpty(port['security_groups']) + self.assertTrue(port['port_security_enabled']) + + port = self.update_port(port, security_groups=[]) + self.assertEmpty(port['security_groups']) + self.assertTrue(port['port_security_enabled']) + + port = self.update_port(port, security_groups=[sec_group_id]) + self.assertNotEmpty(port['security_groups']) + port = self.update_port(port, security_groups=[], + port_security_enabled=False) + self.assertEmpty(port['security_groups']) + self.assertFalse(port['port_security_enabled']) + + @test.attr(type=['smoke']) + @test.idempotent_id('2df6114b-b8c3-48a1-96e8-47f08159d35c') + @test.requires_ext(extension='port-security', service='network') + def test_delete_with_port_sec(self): + network = self.create_network(port_security_enabled=True) + port = self.create_port(network=network, + port_security_enabled=True) + self.client.delete_port(port['id']) + self.assertTrue(self.client.is_resource_deleted('port', port['id'])) + self.client.delete_network(network['id']) + self.assertTrue( + self.client.is_resource_deleted('network', network['id'])) + + @test.attr(type=['negative', 'smoke']) + @test.idempotent_id('ed93e453-3f8d-495e-8e7e-b0e268c2ebd9') + def test_allow_address_pairs(self): + network = self.create_network() + self.create_subnet(network) + port = self.create_port(network=network, port_security_enabled=False) allowed_address_pairs = [{'ip_address': FAKE_IP, 'mac_address': FAKE_MAC}] diff --git a/test-requirements.txt b/test-requirements.txt index be4bd087cbc..5648e677f75 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -17,3 +17,4 @@ testscenarios>=0.4 WebTest>=2.0 oslotest>=1.5.1 # Apache-2.0 tempest-lib>=0.5.0 +ddt>=0.7.0 From 28c95da3f500ce80258784514bfff1dc5d22bf1c Mon Sep 17 00:00:00 2001 From: Alexander Maretskiy Date: Thu, 4 Jun 2015 16:32:12 +0300 Subject: [PATCH 122/292] Changes in rally-jobs/README.rst Changes: * fix broken links * add link to release notes Change-Id: Ie42b246aac2fd4f6de8cd8bcf28e5ff2906ffd32 --- rally-jobs/README.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/rally-jobs/README.rst b/rally-jobs/README.rst index 87300ffb55b..9213d95603d 100644 --- a/rally-jobs/README.rst +++ b/rally-jobs/README.rst @@ -22,8 +22,10 @@ Useful links * More about Rally: https://rally.readthedocs.org/en/latest/ -* How to add rally-gates: https://rally.readthedocs.org/en/latest/rally_gatejob.html +* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html + +* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html -* Plugin samples: https://github.com/stackforge/rally/tree/master/doc/samples/plugins +* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins From c0afeba0b55318e4508d59a68bc7c15e23a9a996 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Wed, 27 May 2015 13:50:22 +0000 Subject: [PATCH 123/292] Run RootHelperProcess always as root The purpose of RootHelperProcess is to spawn processes running with root id. Instantiating RootHelperProcess as non-root doesn't make sense. This also implies NetcatTester always spawns 'netcat' process as root. The reason why parameter existed here was due to refactor from passing root_helper all over the place. Change-Id: If042efcd75e46b66abf4a9e303e633749a1611b1 --- .../tests/functional/agent/linux/helpers.py | 21 +++++++------------ .../functional/agent/linux/test_helpers.py | 4 ++-- .../functional/agent/linux/test_iptables.py | 2 +- .../tests/functional/agent/test_l3_agent.py | 3 +-- 4 files changed, 12 insertions(+), 18 deletions(-) diff --git a/neutron/tests/functional/agent/linux/helpers.py b/neutron/tests/functional/agent/linux/helpers.py index ed4d1b0eade..593234346ad 100644 --- a/neutron/tests/functional/agent/linux/helpers.py +++ b/neutron/tests/functional/agent/linux/helpers.py @@ -96,21 +96,18 @@ class RootHelperProcess(subprocess.Popen): for arg in ('stdin', 'stdout', 'stderr'): kwargs.setdefault(arg, subprocess.PIPE) self.namespace = kwargs.pop('namespace', None) - self.run_as_root = kwargs.pop('run_as_root', False) self.cmd = cmd if self.namespace is not None: cmd = ['ip', 'netns', 'exec', self.namespace] + cmd - if self.run_as_root: - root_helper = config.get_root_helper(utils.cfg.CONF) - cmd = shlex.split(root_helper) + cmd + root_helper = config.get_root_helper(utils.cfg.CONF) + cmd = shlex.split(root_helper) + cmd self.child_pid = None super(RootHelperProcess, self).__init__(cmd, *args, **kwargs) - if self.run_as_root: - self._wait_for_child_process() + self._wait_for_child_process() def kill(self): pid = self.child_pid or str(self.pid) - utils.execute(['kill', '-9', pid], run_as_root=self.run_as_root) + utils.execute(['kill', '-9', pid], run_as_root=True) def read_stdout(self, timeout=None): return self._read_stream(self.stdout, timeout) @@ -134,7 +131,7 @@ class RootHelperProcess(subprocess.Popen): sleep=CHILD_PROCESS_SLEEP): def child_is_running(): child_pid = utils.get_root_helper_child_pid( - self.pid, run_as_root=self.run_as_root) + self.pid, run_as_root=True) if utils.pid_invoked_with_cmdline(child_pid, self.cmd): return True @@ -144,14 +141,14 @@ class RootHelperProcess(subprocess.Popen): exception=RuntimeError("Process %s hasn't been spawned " "in %d seconds" % (self.cmd, timeout))) self.child_pid = utils.get_root_helper_child_pid( - self.pid, run_as_root=self.run_as_root) + self.pid, run_as_root=True) class NetcatTester(object): TESTING_STRING = 'foo' def __init__(self, client_namespace, server_namespace, server_address, - port, client_address=None, run_as_root=False, udp=False): + port, client_address=None, udp=False): self.client_namespace = client_namespace self.server_namespace = server_namespace self._client_process = None @@ -162,7 +159,6 @@ class NetcatTester(object): self.client_address = client_address or server_address self.server_address = server_address self.port = str(port) - self.run_as_root = run_as_root self.udp = udp @property @@ -210,8 +206,7 @@ class NetcatTester(object): cmd.append('-k') else: cmd.extend(['-w', '20']) - proc = RootHelperProcess(cmd, namespace=namespace, - run_as_root=self.run_as_root) + proc = RootHelperProcess(cmd, namespace=namespace) return proc def stop_processes(self): diff --git a/neutron/tests/functional/agent/linux/test_helpers.py b/neutron/tests/functional/agent/linux/test_helpers.py index 44ba2e7975a..a027245d4c0 100644 --- a/neutron/tests/functional/agent/linux/test_helpers.py +++ b/neutron/tests/functional/agent/linux/test_helpers.py @@ -19,14 +19,14 @@ from neutron.tests.functional import base class TestRootHelperProcess(base.BaseSudoTestCase): def test_process_read_write(self): - proc = helpers.RootHelperProcess(['tee'], run_as_root=True) + proc = helpers.RootHelperProcess(['tee']) proc.writeline('foo') output = proc.read_stdout(helpers.READ_TIMEOUT) self.assertEqual('foo\n', output) def test_process_kill(self): with self.assert_max_execution_time(100): - proc = helpers.RootHelperProcess(['tee'], run_as_root=True) + proc = helpers.RootHelperProcess(['tee']) proc.kill() proc.wait() # sudo returns 137 and diff --git a/neutron/tests/functional/agent/linux/test_iptables.py b/neutron/tests/functional/agent/linux/test_iptables.py index 7b2b0a722d4..ebbf74ce32c 100644 --- a/neutron/tests/functional/agent/linux/test_iptables.py +++ b/neutron/tests/functional/agent/linux/test_iptables.py @@ -80,7 +80,7 @@ class IptablesManagerTestCase(functional_base.BaseSudoTestCase): def _test_with_nc(self, fw_manager, direction, port, udp): netcat = helpers.NetcatTester( self.client.namespace, self.server.namespace, - self.server.ip, self.port, run_as_root=True, udp=udp) + self.server.ip, self.port, udp=udp) self.addCleanup(netcat.stop_processes) protocol = 'tcp' if udp: diff --git a/neutron/tests/functional/agent/test_l3_agent.py b/neutron/tests/functional/agent/test_l3_agent.py index bd461164f7e..7788654bf2c 100644 --- a/neutron/tests/functional/agent/test_l3_agent.py +++ b/neutron/tests/functional/agent/test_l3_agent.py @@ -414,7 +414,6 @@ class L3AgentTestCase(L3AgentTestFramework): netcat = helpers.NetcatTester(router.ns_name, router.ns_name, server_address, port, client_address=client_address, - run_as_root=True, udp=False) self.addCleanup(netcat.stop_processes) @@ -710,7 +709,7 @@ class L3AgentTestCase(L3AgentTestFramework): netcat = helpers.NetcatTester( src_machine.namespace, dst_machine.namespace, dst_machine.ip, protocol_port, client_address=dst_fip, - run_as_root=True, udp=False) + udp=False) self.addCleanup(netcat.stop_processes) self.assertTrue(netcat.test_connectivity()) From 74dcc91aa85ab2e250fa32d6b895b2d5f7f2d5ba Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 4 Jun 2015 17:58:06 +0200 Subject: [PATCH 124/292] versionutils: switch from incubator version to oslo.log Note that we require oslo.log >= 1.2.0 since it's the first release that included the module. Change-Id: I2b7d587d8c4b0c885873c9c8083abb8fc35780c9 --- neutron/common/log.py | 3 +- neutron/openstack/common/versionutils.py | 262 ----------------------- neutron/quota.py | 2 +- openstack-common.conf | 1 - requirements.txt | 2 +- 5 files changed, 3 insertions(+), 267 deletions(-) delete mode 100644 neutron/openstack/common/versionutils.py diff --git a/neutron/common/log.py b/neutron/common/log.py index fc6c7ba0341..7cee18e9cdb 100644 --- a/neutron/common/log.py +++ b/neutron/common/log.py @@ -16,8 +16,7 @@ import functools from oslo_log import log as logging - -from neutron.openstack.common import versionutils +from oslo_log import versionutils @versionutils.deprecated(as_of=versionutils.deprecated.LIBERTY, diff --git a/neutron/openstack/common/versionutils.py b/neutron/openstack/common/versionutils.py deleted file mode 100644 index c72fe0b3be5..00000000000 --- a/neutron/openstack/common/versionutils.py +++ /dev/null @@ -1,262 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helpers for comparing version strings. -""" - -import copy -import functools -import inspect -import logging - -from oslo_config import cfg -import pkg_resources -import six - -from neutron.openstack.common._i18n import _ - - -LOG = logging.getLogger(__name__) -CONF = cfg.CONF - - -deprecated_opts = [ - cfg.BoolOpt('fatal_deprecations', - default=False, - help='Enables or disables fatal status of deprecations.'), -] - - -def list_opts(): - """Entry point for oslo.config-generator. - """ - return [(None, copy.deepcopy(deprecated_opts))] - - -class deprecated(object): - """A decorator to mark callables as deprecated. - - This decorator logs a deprecation message when the callable it decorates is - used. The message will include the release where the callable was - deprecated, the release where it may be removed and possibly an optional - replacement. - - Examples: - - 1. Specifying the required deprecated release - - >>> @deprecated(as_of=deprecated.ICEHOUSE) - ... def a(): pass - - 2. Specifying a replacement: - - >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()') - ... def b(): pass - - 3. Specifying the release where the functionality may be removed: - - >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) - ... def c(): pass - - 4. Specifying the deprecated functionality will not be removed: - >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0) - ... def d(): pass - - 5. Specifying a replacement, deprecated functionality will not be removed: - >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0) - ... def e(): pass - - """ - - # NOTE(morganfainberg): Bexar is used for unit test purposes, it is - # expected we maintain a gap between Bexar and Folsom in this list. - BEXAR = 'B' - FOLSOM = 'F' - GRIZZLY = 'G' - HAVANA = 'H' - ICEHOUSE = 'I' - JUNO = 'J' - KILO = 'K' - LIBERTY = 'L' - - _RELEASES = { - # NOTE(morganfainberg): Bexar is used for unit test purposes, it is - # expected we maintain a gap between Bexar and Folsom in this list. - 'B': 'Bexar', - 'F': 'Folsom', - 'G': 'Grizzly', - 'H': 'Havana', - 'I': 'Icehouse', - 'J': 'Juno', - 'K': 'Kilo', - 'L': 'Liberty', - } - - _deprecated_msg_with_alternative = _( - '%(what)s is deprecated as of %(as_of)s in favor of ' - '%(in_favor_of)s and may be removed in %(remove_in)s.') - - _deprecated_msg_no_alternative = _( - '%(what)s is deprecated as of %(as_of)s and may be ' - 'removed in %(remove_in)s. It will not be superseded.') - - _deprecated_msg_with_alternative_no_removal = _( - '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.') - - _deprecated_msg_with_no_alternative_no_removal = _( - '%(what)s is deprecated as of %(as_of)s. It will not be superseded.') - - def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): - """Initialize decorator - - :param as_of: the release deprecating the callable. Constants - are define in this class for convenience. - :param in_favor_of: the replacement for the callable (optional) - :param remove_in: an integer specifying how many releases to wait - before removing (default: 2) - :param what: name of the thing being deprecated (default: the - callable's name) - - """ - self.as_of = as_of - self.in_favor_of = in_favor_of - self.remove_in = remove_in - self.what = what - - def __call__(self, func_or_cls): - if not self.what: - self.what = func_or_cls.__name__ + '()' - msg, details = self._build_message() - - if inspect.isfunction(func_or_cls): - - @six.wraps(func_or_cls) - def wrapped(*args, **kwargs): - report_deprecated_feature(LOG, msg, details) - return func_or_cls(*args, **kwargs) - return wrapped - elif inspect.isclass(func_or_cls): - orig_init = func_or_cls.__init__ - - # TODO(tsufiev): change `functools` module to `six` as - # soon as six 1.7.4 (with fix for passing `assigned` - # argument to underlying `functools.wraps`) is released - # and added to the oslo-incubator requrements - @functools.wraps(orig_init, assigned=('__name__', '__doc__')) - def new_init(self, *args, **kwargs): - report_deprecated_feature(LOG, msg, details) - orig_init(self, *args, **kwargs) - func_or_cls.__init__ = new_init - return func_or_cls - else: - raise TypeError('deprecated can be used only with functions or ' - 'classes') - - def _get_safe_to_remove_release(self, release): - # TODO(dstanek): this method will have to be reimplemented once - # when we get to the X release because once we get to the Y - # release, what is Y+2? - new_release = chr(ord(release) + self.remove_in) - if new_release in self._RELEASES: - return self._RELEASES[new_release] - else: - return new_release - - def _build_message(self): - details = dict(what=self.what, - as_of=self._RELEASES[self.as_of], - remove_in=self._get_safe_to_remove_release(self.as_of)) - - if self.in_favor_of: - details['in_favor_of'] = self.in_favor_of - if self.remove_in > 0: - msg = self._deprecated_msg_with_alternative - else: - # There are no plans to remove this function, but it is - # now deprecated. - msg = self._deprecated_msg_with_alternative_no_removal - else: - if self.remove_in > 0: - msg = self._deprecated_msg_no_alternative - else: - # There are no plans to remove this function, but it is - # now deprecated. - msg = self._deprecated_msg_with_no_alternative_no_removal - return msg, details - - -def is_compatible(requested_version, current_version, same_major=True): - """Determine whether `requested_version` is satisfied by - `current_version`; in other words, `current_version` is >= - `requested_version`. - - :param requested_version: version to check for compatibility - :param current_version: version to check against - :param same_major: if True, the major version must be identical between - `requested_version` and `current_version`. This is used when a - major-version difference indicates incompatibility between the two - versions. Since this is the common-case in practice, the default is - True. - :returns: True if compatible, False if not - """ - requested_parts = pkg_resources.parse_version(requested_version) - current_parts = pkg_resources.parse_version(current_version) - - if same_major and (requested_parts[0] != current_parts[0]): - return False - - return current_parts >= requested_parts - - -# Track the messages we have sent already. See -# report_deprecated_feature(). -_deprecated_messages_sent = {} - - -def report_deprecated_feature(logger, msg, *args, **kwargs): - """Call this function when a deprecated feature is used. - - If the system is configured for fatal deprecations then the message - is logged at the 'critical' level and :class:`DeprecatedConfig` will - be raised. - - Otherwise, the message will be logged (once) at the 'warn' level. - - :raises: :class:`DeprecatedConfig` if the system is configured for - fatal deprecations. - """ - stdmsg = _("Deprecated: %s") % msg - CONF.register_opts(deprecated_opts) - if CONF.fatal_deprecations: - logger.critical(stdmsg, *args, **kwargs) - raise DeprecatedConfig(msg=stdmsg) - - # Using a list because a tuple with dict can't be stored in a set. - sent_args = _deprecated_messages_sent.setdefault(msg, list()) - - if args in sent_args: - # Already logged this message, so don't log it again. - return - - sent_args.append(args) - logger.warn(stdmsg, *args, **kwargs) - - -class DeprecatedConfig(Exception): - message = _("Fatal call to deprecated config: %(msg)s") - - def __init__(self, msg): - super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/neutron/quota.py b/neutron/quota.py index c9f550b054e..e99a01ecdde 100644 --- a/neutron/quota.py +++ b/neutron/quota.py @@ -18,13 +18,13 @@ import sys from oslo_config import cfg from oslo_log import log as logging +from oslo_log import versionutils from oslo_utils import importutils import six import webob from neutron.common import exceptions from neutron.i18n import _LI, _LW -from neutron.openstack.common import versionutils LOG = logging.getLogger(__name__) diff --git a/openstack-common.conf b/openstack-common.conf index 549005c12ec..b61f7dbcfce 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -11,7 +11,6 @@ module=service module=systemd module=threadgroup module=uuidutils -module=versionutils # The base module to hold the copy of openstack.common base=neutron diff --git a/requirements.txt b/requirements.txt index 47fa0316d1f..b8292b8c02f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,7 +26,7 @@ oslo.config>=1.11.0 # Apache-2.0 oslo.context>=0.2.0 # Apache-2.0 oslo.db>=1.7.0 # Apache-2.0 oslo.i18n>=1.5.0 # Apache-2.0 -oslo.log>=1.0.0 # Apache-2.0 +oslo.log>=1.2.0 # Apache-2.0 oslo.messaging>=1.8.0 # Apache-2.0 oslo.middleware>=1.2.0 # Apache-2.0 oslo.rootwrap>=1.6.0 # Apache-2.0 From 38eae7acb30de6f1f8dbb36855a5c913d9370fc5 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Tue, 19 May 2015 16:16:38 +0000 Subject: [PATCH 125/292] Python 3: do not index a dict_values object In Python 3, dict.values() return a dict_values object instead of a list, as in Python 2. This object cannot be indexed. Change-Id: Ia4fdb4cafb1811c55dc8f14e303ab2db1b1110b3 Blueprint: neutron-python3 --- neutron/agent/ovsdb/impl_idl.py | 2 +- neutron/agent/ovsdb/impl_vsctl.py | 2 +- .../unit/agent/test_securitygroups_rpc.py | 21 ++++++++++++------- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/neutron/agent/ovsdb/impl_idl.py b/neutron/agent/ovsdb/impl_idl.py index 45851f8d116..57399fc01af 100644 --- a/neutron/agent/ovsdb/impl_idl.py +++ b/neutron/agent/ovsdb/impl_idl.py @@ -134,7 +134,7 @@ class OvsdbIdl(api.API): @property def _ovs(self): - return self._tables['Open_vSwitch'].rows.values()[0] + return list(self._tables['Open_vSwitch'].rows.values())[0] def transaction(self, check_error=False, log_errors=True, **kwargs): return Transaction(self, OvsdbIdl.ovsdb_connection, diff --git a/neutron/agent/ovsdb/impl_vsctl.py b/neutron/agent/ovsdb/impl_vsctl.py index 4fd8937d346..3351a100246 100644 --- a/neutron/agent/ovsdb/impl_vsctl.py +++ b/neutron/agent/ovsdb/impl_vsctl.py @@ -144,7 +144,7 @@ class DbGetCommand(DbCommand): DbCommand.result.fset(self, val) # DbCommand will return [{'column': value}] and we just want value. if self._result: - self._result = self._result[0].values()[0] + self._result = list(self._result[0].values())[0] class BrExistsCommand(DbCommand): diff --git a/neutron/tests/unit/agent/test_securitygroups_rpc.py b/neutron/tests/unit/agent/test_securitygroups_rpc.py index b732b6b5af5..a5dea8fd6c8 100644 --- a/neutron/tests/unit/agent/test_securitygroups_rpc.py +++ b/neutron/tests/unit/agent/test_securitygroups_rpc.py @@ -1636,12 +1636,16 @@ PORTS = {'tap_port1': 'port1', 'tap_port2': 'port2'} MACS = {'tap_port1': '12:34:56:78:9A:BC', 'tap_port2': '12:34:56:78:9A:BD'} IPS = {'tap_port1': '10.0.0.3/32', 'tap_port2': '10.0.0.4/32'} -IPTABLES_ARG['port1'] = PORTS.values()[0] -IPTABLES_ARG['port2'] = PORTS.values()[1] -IPTABLES_ARG['mac1'] = MACS.values()[0] -IPTABLES_ARG['mac2'] = MACS.values()[1] -IPTABLES_ARG['ip1'] = IPS.values()[0] -IPTABLES_ARG['ip2'] = IPS.values()[1] +ports_values = list(PORTS.values()) +macs_values = list(MACS.values()) +ips_values = list(IPS.values()) + +IPTABLES_ARG['port1'] = ports_values[0] +IPTABLES_ARG['port2'] = ports_values[1] +IPTABLES_ARG['mac1'] = macs_values[0] +IPTABLES_ARG['mac2'] = macs_values[1] +IPTABLES_ARG['ip1'] = ips_values[0] +IPTABLES_ARG['ip2'] = ips_values[1] IPTABLES_ARG['chains'] = CHAINS_NAT IPTABLES_RAW_DEFAULT = """# Generated by iptables_manager @@ -2128,6 +2132,7 @@ COMMIT # TestSecurityGroupAgentWithIptables() to ensure that the ordering # is consistent regardless of hashseed value REVERSE_PORT_ORDER = {'tap_port1': False, 'tap_port2': True} +reverse_port_order_values = list(REVERSE_PORT_ORDER.values()) IPTABLES_FILTER_2_2 = """# Generated by iptables_manager *filter @@ -2161,7 +2166,7 @@ IPTABLES_FILTER_2_2 = """# Generated by iptables_manager --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port1)s -p tcp -m tcp --dport 22 -j RETURN """ % IPTABLES_ARG -if (REVERSE_PORT_ORDER.values()[0] is True): +if reverse_port_order_values[0]: IPTABLES_FILTER_2_2 += ("[0:0] -A %(bn)s-i_%(port1)s -s %(ip2)s " "-j RETURN\n" % IPTABLES_ARG) @@ -2192,7 +2197,7 @@ IPTABLES_FILTER_2_2 += """[0:0] -A %(bn)s-i_%(port1)s -j %(bn)s-sg-fallback --dport 68 -j RETURN [0:0] -A %(bn)s-i_%(port2)s -p tcp -m tcp --dport 22 -j RETURN """ % IPTABLES_ARG -if (REVERSE_PORT_ORDER.values()[0] is False): +if not reverse_port_order_values[0]: IPTABLES_FILTER_2_2 += ("[0:0] -A %(bn)s-i_%(port2)s -s %(ip1)s " "-j RETURN\n" % IPTABLES_ARG) From 839706df7c742ec4b132dc6f18857679cfd870f5 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 3 Jun 2015 22:35:58 +0900 Subject: [PATCH 126/292] test_l3: Don't assume the order of subnets Fixes test_router_add_gateway_multiple_subnets_ipv6 failures on my environment. Change-Id: I44b4dd0cee393ea796f35b999cc111b60ef63db1 --- neutron/tests/unit/extensions/test_l3.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py index 2392adc03bb..51dee1cad5a 100644 --- a/neutron/tests/unit/extensions/test_l3.py +++ b/neutron/tests/unit/extensions/test_l3.py @@ -1432,12 +1432,20 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): res = self._show('routers', r['router']['id']) fips = (res['router']['external_gateway_info'] ['external_fixed_ips']) - fip_subnet_ids = [fip['subnet_id'] for fip in fips] - self.assertIn(s1['subnet']['id'], fip_subnet_ids) - self.assertNotIn(s2['subnet']['id'], fip_subnet_ids) - self.assertIn(s3['subnet']['id'], fip_subnet_ids) - self.assertIn(s4['subnet']['id'], fip_subnet_ids) - self.assertIn(s5['subnet']['id'], fip_subnet_ids) + fip_subnet_ids = {fip['subnet_id'] for fip in fips} + # one of s1 or s2 should be in the list. + if s1['subnet']['id'] in fip_subnet_ids: + self.assertEqual({s1['subnet']['id'], + s3['subnet']['id'], + s4['subnet']['id'], + s5['subnet']['id']}, + fip_subnet_ids) + else: + self.assertEqual({s2['subnet']['id'], + s3['subnet']['id'], + s4['subnet']['id'], + s5['subnet']['id']}, + fip_subnet_ids) self._remove_external_gateway_from_router( r['router']['id'], n['network']['id']) From 615cac19913830e3dd15d1668d4fb2046202270f Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 23 Feb 2015 16:25:53 +0900 Subject: [PATCH 127/292] test_ovs_neutron_agent: Remove useless ofport=10 arguments The kwarg ofport has been removed from method signatures where it was never used. Change-Id: Iee671e8d870ba4ef5175a639bbaf365a5eaf5ef7 --- .../agent/test_ovs_neutron_agent.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py index 9a567dfe3f0..10c9f544090 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py @@ -1251,9 +1251,9 @@ class TestOvsDvrNeutronAgent(object): self.agent.tun_br = self.br_tun_cls(br_name='br-tun') self.agent.sg_agent = mock.Mock() - def _setup_for_dvr_test(self, ofport=10): + def _setup_for_dvr_test(self): self._port = mock.Mock() - self._port.ofport = ofport + self._port.ofport = 10 self._port.vif_id = "1234-5678-90" self._physical_network = 'physeth1' self._old_local_vlan = None @@ -1518,7 +1518,7 @@ class TestOvsDvrNeutronAgent(object): self._test_port_bound_for_dvr_on_vxlan_network( device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6) - def test_port_bound_for_dvr_with_csnat_ports(self, ofport=10): + def test_port_bound_for_dvr_with_csnat_ports(self): self._setup_for_dvr_test() int_br = mock.create_autospec(self.agent.int_br) tun_br = mock.create_autospec(self.agent.tun_br) @@ -1566,13 +1566,11 @@ class TestOvsDvrNeutronAgent(object): ] self.assertEqual(expected_on_tun_br, tun_br.mock_calls) - def test_treat_devices_removed_for_dvr_interface(self, ofport=10): - self._test_treat_devices_removed_for_dvr_interface(ofport) - self._test_treat_devices_removed_for_dvr_interface( - ofport, ip_version=6) + def test_treat_devices_removed_for_dvr_interface(self): + self._test_treat_devices_removed_for_dvr_interface() + self._test_treat_devices_removed_for_dvr_interface(ip_version=6) - def _test_treat_devices_removed_for_dvr_interface(self, ofport=10, - ip_version=4): + def _test_treat_devices_removed_for_dvr_interface(self, ip_version=4): self._setup_for_dvr_test() if ip_version == 4: gateway_ip = '1.1.1.1' @@ -1760,7 +1758,7 @@ class TestOvsDvrNeutronAgent(object): self._test_treat_devices_removed_for_dvr( device_owner=n_const.DEVICE_OWNER_DHCP, ip_version=6) - def test_treat_devices_removed_for_dvr_csnat_port(self, ofport=10): + def test_treat_devices_removed_for_dvr_csnat_port(self): self._setup_for_dvr_test() gateway_mac = 'aa:bb:cc:11:22:33' int_br = mock.create_autospec(self.agent.int_br) From ea5ad714cfcd336c0062ddeb13238c452117782a Mon Sep 17 00:00:00 2001 From: Elena Ezhova Date: Mon, 1 Jun 2015 14:55:13 +0300 Subject: [PATCH 128/292] Handle SIGHUP in ovs neutron agent This change allows ovs neutron agent not to die on receiving SIGHUP and reload its logging options. Note that this patch allows changing only logging options. All other config options are not handled explicitly and changing them using SIGHUP can lead to unpredictable circumstances. So, until changing other options is handled it is highly recommended to use SIGHUP for changing ONLY logging options. Change-Id: Ic0cf8a9ca7f3a16b556a6825e2979471ae136c33 Partial-Bug: #1276694 --- .../plugins/openvswitch/agent/ovs_neutron_agent.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index 7673a674bd3..36e8851fdf5 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -14,6 +14,7 @@ # under the License. import hashlib +import logging as std_logging import signal import sys import time @@ -33,6 +34,7 @@ from neutron.agent.linux import ip_lib from neutron.agent import rpc as agent_rpc from neutron.agent import securitygroups_rpc as sg_rpc from neutron.api.rpc.handlers import dvr_rpc +from neutron.common import config from neutron.common import constants as q_const from neutron.common import exceptions from neutron.common import topics @@ -1591,6 +1593,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # Start everything. LOG.info(_LI("Agent initialized successfully, now running... ")) signal.signal(signal.SIGTERM, self._handle_sigterm) + signal.signal(signal.SIGHUP, self._handle_sighup) with polling.get_polling_manager( self.minimize_polling, self.ovsdb_monitor_respawn_interval) as pm: @@ -1598,11 +1601,18 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.rpc_loop(polling_manager=pm) def _handle_sigterm(self, signum, frame): - LOG.debug("Agent caught SIGTERM, quitting daemon loop.") + LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop.")) self.run_daemon_loop = False if self.quitting_rpc_timeout: self.set_rpc_timeout(self.quitting_rpc_timeout) + def _handle_sighup(self, signum, frame): + LOG.info(_LI("Agent caught SIGHUP, resetting.")) + cfg.CONF.reload_config_files() + config.setup_logging() + LOG.debug('Full set of CONF:') + cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + def set_rpc_timeout(self, timeout): for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc, self.dvr_plugin_rpc, self.state_rpc): From 8c5ef2cd6e1819c4f51b01cb3504da34ef377546 Mon Sep 17 00:00:00 2001 From: Elena Ezhova Date: Thu, 4 Jun 2015 17:00:13 +0300 Subject: [PATCH 129/292] Start linuxbridge neutron agent using a launcher This change ports linuxbridge neutron agent on common/service code that will allow to handle termination signals (SIGHUP, SIGTERM, SIGINT). Note that this patch allows changing only logging options. All other config options are not handled explicitly and changing them using SIGHUP can lead to unpredictable circumstances. So, until changing other options is handled it is highly recommended to use SIGHUP for changing ONLY logging options. DocImpact Change-Id: I1d834e0683b04111ef04c148cbd8d4acf2964065 Closes-Bug: #1461539 --- .../plugins/linuxbridge/linuxbridge_conf.ini | 5 ++ .../agent/linuxbridge_neutron_agent.py | 48 +++++++++++++++---- neutron/plugins/linuxbridge/common/config.py | 4 ++ .../agent/test_linuxbridge_neutron_agent.py | 19 +++++++- 4 files changed, 67 insertions(+), 9 deletions(-) diff --git a/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini b/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini index dac338b868f..b25d02916d5 100644 --- a/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini +++ b/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini @@ -68,6 +68,11 @@ # rpc_support_old_agents = False # Example: rpc_support_old_agents = True +# (IntOpt) Set new timeout in seconds for new rpc calls after agent receives +# SIGTERM. If value is set to 0, rpc timeout won't be changed. +# +# quitting_rpc_timeout = 10 + [securitygroup] # Firewall driver for realizing neutron security group function # firewall_driver = neutron.agent.firewall.NoopFirewallDriver diff --git a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py index 604cf73c438..2cf116619e2 100644 --- a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -44,6 +44,7 @@ from neutron.common import utils as q_utils from neutron import context from neutron.i18n import _LE, _LI, _LW from neutron.openstack.common import loopingcall +from neutron.openstack.common import service from neutron.plugins.common import constants as p_const from neutron.plugins.linuxbridge.common import config # noqa from neutron.plugins.linuxbridge.common import constants as lconst @@ -749,12 +750,26 @@ class LinuxBridgeRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin, getattr(self, method)(context, values) -class LinuxBridgeNeutronAgentRPC(object): +class LinuxBridgeNeutronAgentRPC(service.Service): - def __init__(self, interface_mappings, polling_interval): + def __init__(self, interface_mappings, polling_interval, + quitting_rpc_timeout): + """Constructor. + + :param interface_mappings: dict mapping physical_networks to + physical_interfaces. + :param polling_interval: interval (secs) to poll DB. + :param quitting_rpc_timeout: timeout in seconds for rpc calls after + stop is called. + """ + super(LinuxBridgeNeutronAgentRPC, self).__init__() + self.interface_mappings = interface_mappings self.polling_interval = polling_interval - self.setup_linux_bridge(interface_mappings) - configurations = {'interface_mappings': interface_mappings} + self.quitting_rpc_timeout = quitting_rpc_timeout + + def start(self): + self.setup_linux_bridge(self.interface_mappings) + configurations = {'interface_mappings': self.interface_mappings} if self.br_mgr.vxlan_mode != lconst.VXLAN_NONE: configurations['tunneling_ip'] = self.br_mgr.local_ip configurations['tunnel_types'] = [p_const.TYPE_VXLAN] @@ -774,7 +789,17 @@ class LinuxBridgeNeutronAgentRPC(object): self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context, self.sg_plugin_rpc) - self.setup_rpc(interface_mappings.values()) + self.setup_rpc(self.interface_mappings.values()) + self.daemon_loop() + + def stop(self, graceful=True): + LOG.info(_LI("Stopping linuxbridge agent.")) + if graceful and self.quitting_rpc_timeout: + self.set_rpc_timeout(self.quitting_rpc_timeout) + super(LinuxBridgeNeutronAgentRPC, self).stop(graceful) + + def reset(self): + common_config.setup_logging() def _report_state(self): try: @@ -1005,6 +1030,11 @@ class LinuxBridgeNeutronAgentRPC(object): {'polling_interval': self.polling_interval, 'elapsed': elapsed}) + def set_rpc_timeout(self, timeout): + for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc, + self.state_rpc): + rpc_api.client.timeout = timeout + def main(): common_config.init(sys.argv[1:]) @@ -1020,11 +1050,13 @@ def main(): LOG.info(_LI("Interface mappings: %s"), interface_mappings) polling_interval = cfg.CONF.AGENT.polling_interval + quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout agent = LinuxBridgeNeutronAgentRPC(interface_mappings, - polling_interval) + polling_interval, + quitting_rpc_timeout) LOG.info(_LI("Agent initialized successfully, now running... ")) - agent.daemon_loop() - sys.exit(0) + launcher = service.launch(agent) + launcher.wait() if __name__ == "__main__": diff --git a/neutron/plugins/linuxbridge/common/config.py b/neutron/plugins/linuxbridge/common/config.py index 90c6548dc83..fa1487c6b49 100644 --- a/neutron/plugins/linuxbridge/common/config.py +++ b/neutron/plugins/linuxbridge/common/config.py @@ -62,6 +62,10 @@ agent_opts = [ "polling for local device changes.")), cfg.BoolOpt('rpc_support_old_agents', default=False, help=_("Enable server RPC compatibility with old agents")), + cfg.IntOpt('quitting_rpc_timeout', default=10, + help=_("Set new timeout in seconds for new rpc calls after " + "agent receives SIGTERM. If value is set to 0, rpc " + "timeout won't be changed")), ] diff --git a/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py b/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py index 11e923fd3c0..e1ae8315f3a 100644 --- a/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py +++ b/neutron/tests/unit/plugins/linuxbridge/agent/test_linuxbridge_neutron_agent.py @@ -90,6 +90,7 @@ class TestLinuxBridgeAgent(base.BaseTestCase): cfg.CONF.set_default('firewall_driver', 'neutron.agent.firewall.NoopFirewallDriver', group='SECURITYGROUP') + cfg.CONF.set_default('quitting_rpc_timeout', 10, 'AGENT') self.get_devices_p = mock.patch.object(ip_lib.IPWrapper, 'get_devices') self.get_devices = self.get_devices_p.start() self.get_devices.return_value = [ip_lib.IPDevice('eth77')] @@ -100,7 +101,9 @@ class TestLinuxBridgeAgent(base.BaseTestCase): with mock.patch.object(linuxbridge_neutron_agent.LinuxBridgeManager, 'get_interface_by_ip', return_value=None): self.agent = linuxbridge_neutron_agent.LinuxBridgeNeutronAgentRPC( - {}, 0) + {}, 0, cfg.CONF.AGENT.quitting_rpc_timeout) + with mock.patch.object(self.agent, "daemon_loop"): + self.agent.start() def test_treat_devices_removed_with_existed_device(self): agent = self.agent @@ -328,6 +331,20 @@ class TestLinuxBridgeAgent(base.BaseTestCase): agent.remove_port_binding.assert_called_with('net123', 'port123') self.assertFalse(agent.plugin_rpc.update_device_up.called) + def test_set_rpc_timeout(self): + self.agent.stop() + for rpc_client in (self.agent.plugin_rpc.client, + self.agent.sg_plugin_rpc.client, + self.agent.state_rpc.client): + self.assertEqual(cfg.CONF.AGENT.quitting_rpc_timeout, + rpc_client.timeout) + + def test_set_rpc_timeout_no_value(self): + self.agent.quitting_rpc_timeout = None + with mock.patch.object(self.agent, 'set_rpc_timeout') as mock_set_rpc: + self.agent.stop() + self.assertFalse(mock_set_rpc.called) + class TestLinuxBridgeManager(base.BaseTestCase): def setUp(self): From 45b28ddfe8ac23871e65feb4132d5f048c783222 Mon Sep 17 00:00:00 2001 From: Vincent Legoll Date: Fri, 5 Jun 2015 13:05:48 +0200 Subject: [PATCH 130/292] Fix typo in test class name Make "Redering" -> "Rendering" Change-Id: Ieedb446fa1e06705eb70293d83350d4dfd57d2db Signed-off-by: Vincent Legoll --- neutron/tests/unit/common/test_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neutron/tests/unit/common/test_utils.py b/neutron/tests/unit/common/test_utils.py index 48e59b428b2..cee645c8d54 100644 --- a/neutron/tests/unit/common/test_utils.py +++ b/neutron/tests/unit/common/test_utils.py @@ -636,7 +636,7 @@ class TestIpVersionFromInt(base.BaseTestCase): 8) -class TestDelayedStringRederer(base.BaseTestCase): +class TestDelayedStringRenderer(base.BaseTestCase): def test_call_deferred_until_str(self): my_func = mock.MagicMock(return_value='Brie cheese!') delayed = utils.DelayedStringRenderer(my_func, 1, 2, key_arg=44) From 546cab2d62a0daf88ef192dd2fb2e185b39a7fcb Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Fri, 5 Jun 2015 14:01:10 +0300 Subject: [PATCH 131/292] Decompose db_base_plugin_v2.py part 2 This patch contains method moving with minor modifications like updating class names. IPAM specific methods were moved into ipam_backend_mixin and ipam_non_pluggable_backend. ipam_backend_mixin - contains code common for both backends (pluggable and non-pluggable). ipam_non_pluggable_backend - contains code specific for non-pluggable IPAM implementation. Partially-Implements: blueprint neutron-ipam Change-Id: I6d68e4066ca472107def197c5a5afccbb6886a0e --- neutron/db/db_base_plugin_v2.py | 474 +----------------- neutron/db/ipam_backend_mixin.py | 159 ++++++ neutron/db/ipam_non_pluggable_backend.py | 373 ++++++++++++++ .../tests/unit/db/test_db_base_plugin_v2.py | 13 +- 4 files changed, 541 insertions(+), 478 deletions(-) create mode 100644 neutron/db/ipam_backend_mixin.py create mode 100644 neutron/db/ipam_non_pluggable_backend.py diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 87fd1d3dac9..0395d2b3f2f 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -21,8 +21,6 @@ from oslo_log import log as logging from oslo_utils import excutils from sqlalchemy import and_ from sqlalchemy import event -from sqlalchemy import orm -from sqlalchemy.orm import exc from neutron.api.v2 import attributes from neutron.callbacks import events @@ -35,14 +33,13 @@ from neutron.common import ipv6_utils from neutron.common import utils from neutron import context as ctx from neutron.db import api as db_api -from neutron.db import db_base_plugin_common +from neutron.db import ipam_non_pluggable_backend from neutron.db import models_v2 from neutron.db import sqlalchemyutils from neutron.extensions import l3 from neutron.i18n import _LE, _LI from neutron import ipam from neutron.ipam import subnet_alloc -from neutron.ipam import utils as ipam_utils from neutron import manager from neutron import neutron_plugin_base_v2 from neutron.openstack.common import uuidutils @@ -70,7 +67,7 @@ def _check_subnet_not_used(context, subnet_id): raise n_exc.SubnetInUse(subnet_id=subnet_id, reason=e) -class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, +class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, neutron_plugin_base_v2.NeutronPluginBaseV2): """V2 Neutron plugin interface implementation using SQLAlchemy models. @@ -100,155 +97,6 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, event.listen(models_v2.Port.status, 'set', self.nova_notifier.record_port_status_changed) - @staticmethod - def _generate_ip(context, subnets): - try: - return NeutronDbPluginV2._try_generate_ip(context, subnets) - except n_exc.IpAddressGenerationFailure: - NeutronDbPluginV2._rebuild_availability_ranges(context, subnets) - - return NeutronDbPluginV2._try_generate_ip(context, subnets) - - @staticmethod - def _try_generate_ip(context, subnets): - """Generate an IP address. - - The IP address will be generated from one of the subnets defined on - the network. - """ - range_qry = context.session.query( - models_v2.IPAvailabilityRange).join( - models_v2.IPAllocationPool).with_lockmode('update') - for subnet in subnets: - ip_range = range_qry.filter_by(subnet_id=subnet['id']).first() - if not ip_range: - LOG.debug("All IPs from subnet %(subnet_id)s (%(cidr)s) " - "allocated", - {'subnet_id': subnet['id'], - 'cidr': subnet['cidr']}) - continue - ip_address = ip_range['first_ip'] - if ip_range['first_ip'] == ip_range['last_ip']: - # No more free indices on subnet => delete - LOG.debug("No more free IP's in slice. Deleting " - "allocation pool.") - context.session.delete(ip_range) - else: - # increment the first free - new_first_ip = str(netaddr.IPAddress(ip_address) + 1) - ip_range['first_ip'] = new_first_ip - LOG.debug("Allocated IP - %(ip_address)s from %(first_ip)s " - "to %(last_ip)s", - {'ip_address': ip_address, - 'first_ip': ip_address, - 'last_ip': ip_range['last_ip']}) - return {'ip_address': ip_address, - 'subnet_id': subnet['id']} - raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id']) - - @staticmethod - def _rebuild_availability_ranges(context, subnets): - """Rebuild availability ranges. - - This method is called only when there's no more IP available or by - _update_subnet_allocation_pools. Calling - _update_subnet_allocation_pools before calling this function deletes - the IPAllocationPools associated with the subnet that is updating, - which will result in deleting the IPAvailabilityRange too. - """ - ip_qry = context.session.query( - models_v2.IPAllocation).with_lockmode('update') - # PostgreSQL does not support select...for update with an outer join. - # No join is needed here. - pool_qry = context.session.query( - models_v2.IPAllocationPool).options( - orm.noload('available_ranges')).with_lockmode('update') - for subnet in sorted(subnets): - LOG.debug("Rebuilding availability ranges for subnet %s", - subnet) - - # Create a set of all currently allocated addresses - ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id']) - allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address']) - for i in ip_qry_results]) - - for pool in pool_qry.filter_by(subnet_id=subnet['id']): - # Create a set of all addresses in the pool - poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'], - pool['last_ip'])) - - # Use set difference to find free addresses in the pool - available = poolset - allocations - - # Generator compacts an ip set into contiguous ranges - def ipset_to_ranges(ipset): - first, last = None, None - for cidr in ipset.iter_cidrs(): - if last and last + 1 != cidr.first: - yield netaddr.IPRange(first, last) - first = None - first, last = first if first else cidr.first, cidr.last - if first: - yield netaddr.IPRange(first, last) - - # Write the ranges to the db - for ip_range in ipset_to_ranges(available): - available_range = models_v2.IPAvailabilityRange( - allocation_pool_id=pool['id'], - first_ip=str(netaddr.IPAddress(ip_range.first)), - last_ip=str(netaddr.IPAddress(ip_range.last))) - context.session.add(available_range) - - @staticmethod - def _allocate_specific_ip(context, subnet_id, ip_address): - """Allocate a specific IP address on the subnet.""" - ip = int(netaddr.IPAddress(ip_address)) - range_qry = context.session.query( - models_v2.IPAvailabilityRange).join( - models_v2.IPAllocationPool).with_lockmode('update') - results = range_qry.filter_by(subnet_id=subnet_id) - for ip_range in results: - first = int(netaddr.IPAddress(ip_range['first_ip'])) - last = int(netaddr.IPAddress(ip_range['last_ip'])) - if first <= ip <= last: - if first == last: - context.session.delete(ip_range) - return - elif first == ip: - new_first_ip = str(netaddr.IPAddress(ip_address) + 1) - ip_range['first_ip'] = new_first_ip - return - elif last == ip: - new_last_ip = str(netaddr.IPAddress(ip_address) - 1) - ip_range['last_ip'] = new_last_ip - return - else: - # Adjust the original range to end before ip_address - old_last_ip = ip_range['last_ip'] - new_last_ip = str(netaddr.IPAddress(ip_address) - 1) - ip_range['last_ip'] = new_last_ip - - # Create a new second range for after ip_address - new_first_ip = str(netaddr.IPAddress(ip_address) + 1) - new_ip_range = models_v2.IPAvailabilityRange( - allocation_pool_id=ip_range['allocation_pool_id'], - first_ip=new_first_ip, - last_ip=old_last_ip) - context.session.add(new_ip_range) - return - - @staticmethod - def _check_unique_ip(context, network_id, subnet_id, ip_address): - """Validate that the IP address on the subnet is not in use.""" - ip_qry = context.session.query(models_v2.IPAllocation) - try: - ip_qry.filter_by(network_id=network_id, - subnet_id=subnet_id, - ip_address=ip_address).one() - except exc.NoResultFound: - return True - return False - @staticmethod def _check_ip_in_allocation_pool(context, subnet_id, gateway_ip, ip_address): @@ -274,123 +122,6 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, return True return False - def _test_fixed_ips_for_port(self, context, network_id, fixed_ips, - device_owner): - """Test fixed IPs for port. - - Check that configured subnets are valid prior to allocating any - IPs. Include the subnet_id in the result if only an IP address is - configured. - - :raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork, - InvalidIpForSubnet - """ - fixed_ip_set = [] - for fixed in fixed_ips: - found = False - if 'subnet_id' not in fixed: - if 'ip_address' not in fixed: - msg = _('IP allocation requires subnet_id or ip_address') - raise n_exc.InvalidInput(error_message=msg) - - filter = {'network_id': [network_id]} - subnets = self.get_subnets(context, filters=filter) - for subnet in subnets: - if ipam_utils.check_subnet_ip(subnet['cidr'], - fixed['ip_address']): - found = True - subnet_id = subnet['id'] - break - if not found: - raise n_exc.InvalidIpForNetwork( - ip_address=fixed['ip_address']) - else: - subnet = self._get_subnet(context, fixed['subnet_id']) - if subnet['network_id'] != network_id: - msg = (_("Failed to create port on network %(network_id)s" - ", because fixed_ips included invalid subnet " - "%(subnet_id)s") % - {'network_id': network_id, - 'subnet_id': fixed['subnet_id']}) - raise n_exc.InvalidInput(error_message=msg) - subnet_id = subnet['id'] - - is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) - if 'ip_address' in fixed: - # Ensure that the IP's are unique - if not NeutronDbPluginV2._check_unique_ip(context, network_id, - subnet_id, - fixed['ip_address']): - raise n_exc.IpAddressInUse(net_id=network_id, - ip_address=fixed['ip_address']) - - # Ensure that the IP is valid on the subnet - if (not found and - not ipam_utils.check_subnet_ip(subnet['cidr'], - fixed['ip_address'])): - raise n_exc.InvalidIpForSubnet( - ip_address=fixed['ip_address']) - if (is_auto_addr_subnet and - device_owner not in - constants.ROUTER_INTERFACE_OWNERS): - msg = (_("IPv6 address %(address)s can not be directly " - "assigned to a port on subnet %(id)s since the " - "subnet is configured for automatic addresses") % - {'address': fixed['ip_address'], - 'id': subnet_id}) - raise n_exc.InvalidInput(error_message=msg) - fixed_ip_set.append({'subnet_id': subnet_id, - 'ip_address': fixed['ip_address']}) - else: - # A scan for auto-address subnets on the network is done - # separately so that all such subnets (not just those - # listed explicitly here by subnet ID) are associated - # with the port. - if (device_owner in constants.ROUTER_INTERFACE_OWNERS or - device_owner == constants.DEVICE_OWNER_ROUTER_SNAT or - not is_auto_addr_subnet): - fixed_ip_set.append({'subnet_id': subnet_id}) - - if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port: - msg = _('Exceeded maximim amount of fixed ips per port') - raise n_exc.InvalidInput(error_message=msg) - return fixed_ip_set - - def _allocate_fixed_ips(self, context, fixed_ips, mac_address): - """Allocate IP addresses according to the configured fixed_ips.""" - ips = [] - - # we need to start with entries that asked for a specific IP in case - # those IPs happen to be next in the line for allocation for ones that - # didn't ask for a specific IP - fixed_ips.sort(key=lambda x: 'ip_address' not in x) - for fixed in fixed_ips: - subnet = self._get_subnet(context, fixed['subnet_id']) - is_auto_addr = ipv6_utils.is_auto_address_subnet(subnet) - if 'ip_address' in fixed: - if not is_auto_addr: - # Remove the IP address from the allocation pool - NeutronDbPluginV2._allocate_specific_ip( - context, fixed['subnet_id'], fixed['ip_address']) - ips.append({'ip_address': fixed['ip_address'], - 'subnet_id': fixed['subnet_id']}) - # Only subnet ID is specified => need to generate IP - # from subnet - else: - if is_auto_addr: - ip_address = self._calculate_ipv6_eui64_addr(context, - subnet, - mac_address) - ips.append({'ip_address': ip_address.format(), - 'subnet_id': subnet['id']}) - else: - subnets = [subnet] - # IP address allocation - result = self._generate_ip(context, subnets) - ips.append({'ip_address': result['ip_address'], - 'subnet_id': result['subnet_id']}) - return ips - def _update_ips_for_port(self, context, network_id, port_id, original_ips, new_ips, mac_address, device_owner): """Add or remove IPs from the port.""" @@ -437,78 +168,6 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, ips = self._allocate_fixed_ips(context, to_add, mac_address) return ips, prev_ips - def _calculate_ipv6_eui64_addr(self, context, subnet, mac_addr): - prefix = subnet['cidr'] - network_id = subnet['network_id'] - ip_address = ipv6_utils.get_ipv6_addr_by_EUI64( - prefix, mac_addr).format() - if not self._check_unique_ip(context, network_id, - subnet['id'], ip_address): - raise n_exc.IpAddressInUse(net_id=network_id, - ip_address=ip_address) - return ip_address - - def _allocate_ips_for_port(self, context, port): - """Allocate IP addresses for the port. - - If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP - addresses for the port. If port['fixed_ips'] contains an IP address or - a subnet_id then allocate an IP address accordingly. - """ - p = port['port'] - ips = [] - v6_stateless = [] - net_id_filter = {'network_id': [p['network_id']]} - subnets = self.get_subnets(context, filters=net_id_filter) - is_router_port = ( - p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS or - p['device_owner'] == constants.DEVICE_OWNER_ROUTER_SNAT) - - fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED - if fixed_configured: - configured_ips = self._test_fixed_ips_for_port(context, - p["network_id"], - p['fixed_ips'], - p['device_owner']) - ips = self._allocate_fixed_ips(context, - configured_ips, - p['mac_address']) - - # For ports that are not router ports, implicitly include all - # auto-address subnets for address association. - if not is_router_port: - v6_stateless += [subnet for subnet in subnets - if ipv6_utils.is_auto_address_subnet(subnet)] - else: - # Split into v4, v6 stateless and v6 stateful subnets - v4 = [] - v6_stateful = [] - for subnet in subnets: - if subnet['ip_version'] == 4: - v4.append(subnet) - elif ipv6_utils.is_auto_address_subnet(subnet): - if not is_router_port: - v6_stateless.append(subnet) - else: - v6_stateful.append(subnet) - - version_subnets = [v4, v6_stateful] - for subnets in version_subnets: - if subnets: - result = NeutronDbPluginV2._generate_ip(context, subnets) - ips.append({'ip_address': result['ip_address'], - 'subnet_id': result['subnet_id']}) - - for subnet in v6_stateless: - # IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets - # are implicitly included. - ip_address = self._calculate_ipv6_eui64_addr(context, subnet, - p['mac_address']) - ips.append({'ip_address': ip_address.format(), - 'subnet_id': subnet['id']}) - - return ips - def _validate_subnet_cidr(self, context, network, new_subnet_cidr): """Validate the CIDR for a subnet. @@ -557,73 +216,6 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, new_subnetpool_id != subnet.subnetpool_id): raise n_exc.NetworkSubnetPoolAffinityError() - def _validate_allocation_pools(self, ip_pools, subnet_cidr): - """Validate IP allocation pools. - - Verify start and end address for each allocation pool are valid, - ie: constituted by valid and appropriately ordered IP addresses. - Also, verify pools do not overlap among themselves. - Finally, verify that each range fall within the subnet's CIDR. - """ - subnet = netaddr.IPNetwork(subnet_cidr) - subnet_first_ip = netaddr.IPAddress(subnet.first + 1) - subnet_last_ip = netaddr.IPAddress(subnet.last - 1) - - LOG.debug("Performing IP validity checks on allocation pools") - ip_sets = [] - for ip_pool in ip_pools: - try: - start_ip = netaddr.IPAddress(ip_pool['start']) - end_ip = netaddr.IPAddress(ip_pool['end']) - except netaddr.AddrFormatError: - LOG.info(_LI("Found invalid IP address in pool: " - "%(start)s - %(end)s:"), - {'start': ip_pool['start'], - 'end': ip_pool['end']}) - raise n_exc.InvalidAllocationPool(pool=ip_pool) - if (start_ip.version != subnet.version or - end_ip.version != subnet.version): - LOG.info(_LI("Specified IP addresses do not match " - "the subnet IP version")) - raise n_exc.InvalidAllocationPool(pool=ip_pool) - if end_ip < start_ip: - LOG.info(_LI("Start IP (%(start)s) is greater than end IP " - "(%(end)s)"), - {'start': ip_pool['start'], 'end': ip_pool['end']}) - raise n_exc.InvalidAllocationPool(pool=ip_pool) - if start_ip < subnet_first_ip or end_ip > subnet_last_ip: - LOG.info(_LI("Found pool larger than subnet " - "CIDR:%(start)s - %(end)s"), - {'start': ip_pool['start'], - 'end': ip_pool['end']}) - raise n_exc.OutOfBoundsAllocationPool( - pool=ip_pool, - subnet_cidr=subnet_cidr) - # Valid allocation pool - # Create an IPSet for it for easily verifying overlaps - ip_sets.append(netaddr.IPSet(netaddr.IPRange( - ip_pool['start'], - ip_pool['end']).cidrs())) - - LOG.debug("Checking for overlaps among allocation pools " - "and gateway ip") - ip_ranges = ip_pools[:] - - # Use integer cursors as an efficient way for implementing - # comparison and avoiding comparing the same pair twice - for l_cursor in range(len(ip_sets)): - for r_cursor in range(l_cursor + 1, len(ip_sets)): - if ip_sets[l_cursor] & ip_sets[r_cursor]: - l_range = ip_ranges[l_cursor] - r_range = ip_ranges[r_cursor] - LOG.info(_LI("Found overlapping ranges: %(l_range)s and " - "%(r_range)s"), - {'l_range': l_range, 'r_range': r_range}) - raise n_exc.OverlappingAllocationPools( - pool_1=l_range, - pool_2=r_range, - subnet_cidr=subnet_cidr) - def _validate_host_route(self, route, ip_version): try: netaddr.IPNetwork(route['destination']) @@ -963,16 +555,6 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, if ip_ver == 6: self._validate_ipv6_attributes(s, cur_subnet) - def _validate_gw_out_of_pools(self, gateway_ip, pools): - for allocation_pool in pools: - pool_range = netaddr.IPRange( - allocation_pool['start'], - allocation_pool['end']) - if netaddr.IPAddress(gateway_ip) in pool_range: - raise n_exc.GatewayConflictWithAllocationPools( - pool=pool_range, - ip_address=gateway_ip) - def _update_router_gw_ports(self, context, network, subnet): l3plugin = manager.NeutronManager.get_service_plugins().get( service_constants.L3_ROUTER_NAT) @@ -1245,58 +827,6 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon, LOG.debug("Port %s was deleted while updating it with an " "IPv6 auto-address. Ignoring.", port['id']) - def _update_subnet_dns_nameservers(self, context, id, s): - old_dns_list = self._get_dns_by_subnet(context, id) - new_dns_addr_set = set(s["dns_nameservers"]) - old_dns_addr_set = set([dns['address'] - for dns in old_dns_list]) - - new_dns = list(new_dns_addr_set) - for dns_addr in old_dns_addr_set - new_dns_addr_set: - for dns in old_dns_list: - if dns['address'] == dns_addr: - context.session.delete(dns) - for dns_addr in new_dns_addr_set - old_dns_addr_set: - dns = models_v2.DNSNameServer( - address=dns_addr, - subnet_id=id) - context.session.add(dns) - del s["dns_nameservers"] - return new_dns - - def _update_subnet_host_routes(self, context, id, s): - - def _combine(ht): - return ht['destination'] + "_" + ht['nexthop'] - - old_route_list = self._get_route_by_subnet(context, id) - - new_route_set = set([_combine(route) - for route in s['host_routes']]) - - old_route_set = set([_combine(route) - for route in old_route_list]) - - for route_str in old_route_set - new_route_set: - for route in old_route_list: - if _combine(route) == route_str: - context.session.delete(route) - for route_str in new_route_set - old_route_set: - route = models_v2.SubnetRoute( - destination=route_str.partition("_")[0], - nexthop=route_str.partition("_")[2], - subnet_id=id) - context.session.add(route) - - # Gather host routes for result - new_routes = [] - for route_str in new_route_set: - new_routes.append( - {'destination': route_str.partition("_")[0], - 'nexthop': route_str.partition("_")[2]}) - del s["host_routes"] - return new_routes - def _update_subnet_allocation_pools(self, context, id, s): context.session.query(models_v2.IPAllocationPool).filter_by( subnet_id=id).delete() diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py new file mode 100644 index 00000000000..2330f1afb4d --- /dev/null +++ b/neutron/db/ipam_backend_mixin.py @@ -0,0 +1,159 @@ +# Copyright (c) 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr + +from oslo_log import log as logging + +from neutron.common import exceptions as n_exc +from neutron.db import db_base_plugin_common +from neutron.db import models_v2 +from neutron.i18n import _LI + +LOG = logging.getLogger(__name__) + + +class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): + """Contains IPAM specific code which is common for both backends. + """ + + def _update_subnet_host_routes(self, context, id, s): + + def _combine(ht): + return ht['destination'] + "_" + ht['nexthop'] + + old_route_list = self._get_route_by_subnet(context, id) + + new_route_set = set([_combine(route) + for route in s['host_routes']]) + + old_route_set = set([_combine(route) + for route in old_route_list]) + + for route_str in old_route_set - new_route_set: + for route in old_route_list: + if _combine(route) == route_str: + context.session.delete(route) + for route_str in new_route_set - old_route_set: + route = models_v2.SubnetRoute( + destination=route_str.partition("_")[0], + nexthop=route_str.partition("_")[2], + subnet_id=id) + context.session.add(route) + + # Gather host routes for result + new_routes = [] + for route_str in new_route_set: + new_routes.append( + {'destination': route_str.partition("_")[0], + 'nexthop': route_str.partition("_")[2]}) + del s["host_routes"] + return new_routes + + def _update_subnet_dns_nameservers(self, context, id, s): + old_dns_list = self._get_dns_by_subnet(context, id) + new_dns_addr_set = set(s["dns_nameservers"]) + old_dns_addr_set = set([dns['address'] + for dns in old_dns_list]) + + new_dns = list(new_dns_addr_set) + for dns_addr in old_dns_addr_set - new_dns_addr_set: + for dns in old_dns_list: + if dns['address'] == dns_addr: + context.session.delete(dns) + for dns_addr in new_dns_addr_set - old_dns_addr_set: + dns = models_v2.DNSNameServer( + address=dns_addr, + subnet_id=id) + context.session.add(dns) + del s["dns_nameservers"] + return new_dns + + def _validate_allocation_pools(self, ip_pools, subnet_cidr): + """Validate IP allocation pools. + + Verify start and end address for each allocation pool are valid, + ie: constituted by valid and appropriately ordered IP addresses. + Also, verify pools do not overlap among themselves. + Finally, verify that each range fall within the subnet's CIDR. + """ + subnet = netaddr.IPNetwork(subnet_cidr) + subnet_first_ip = netaddr.IPAddress(subnet.first + 1) + subnet_last_ip = netaddr.IPAddress(subnet.last - 1) + + LOG.debug("Performing IP validity checks on allocation pools") + ip_sets = [] + for ip_pool in ip_pools: + try: + start_ip = netaddr.IPAddress(ip_pool['start']) + end_ip = netaddr.IPAddress(ip_pool['end']) + except netaddr.AddrFormatError: + LOG.info(_LI("Found invalid IP address in pool: " + "%(start)s - %(end)s:"), + {'start': ip_pool['start'], + 'end': ip_pool['end']}) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if (start_ip.version != subnet.version or + end_ip.version != subnet.version): + LOG.info(_LI("Specified IP addresses do not match " + "the subnet IP version")) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if end_ip < start_ip: + LOG.info(_LI("Start IP (%(start)s) is greater than end IP " + "(%(end)s)"), + {'start': ip_pool['start'], 'end': ip_pool['end']}) + raise n_exc.InvalidAllocationPool(pool=ip_pool) + if start_ip < subnet_first_ip or end_ip > subnet_last_ip: + LOG.info(_LI("Found pool larger than subnet " + "CIDR:%(start)s - %(end)s"), + {'start': ip_pool['start'], + 'end': ip_pool['end']}) + raise n_exc.OutOfBoundsAllocationPool( + pool=ip_pool, + subnet_cidr=subnet_cidr) + # Valid allocation pool + # Create an IPSet for it for easily verifying overlaps + ip_sets.append(netaddr.IPSet(netaddr.IPRange( + ip_pool['start'], + ip_pool['end']).cidrs())) + + LOG.debug("Checking for overlaps among allocation pools " + "and gateway ip") + ip_ranges = ip_pools[:] + + # Use integer cursors as an efficient way for implementing + # comparison and avoiding comparing the same pair twice + for l_cursor in range(len(ip_sets)): + for r_cursor in range(l_cursor + 1, len(ip_sets)): + if ip_sets[l_cursor] & ip_sets[r_cursor]: + l_range = ip_ranges[l_cursor] + r_range = ip_ranges[r_cursor] + LOG.info(_LI("Found overlapping ranges: %(l_range)s and " + "%(r_range)s"), + {'l_range': l_range, 'r_range': r_range}) + raise n_exc.OverlappingAllocationPools( + pool_1=l_range, + pool_2=r_range, + subnet_cidr=subnet_cidr) + + def _validate_gw_out_of_pools(self, gateway_ip, pools): + for allocation_pool in pools: + pool_range = netaddr.IPRange( + allocation_pool['start'], + allocation_pool['end']) + if netaddr.IPAddress(gateway_ip) in pool_range: + raise n_exc.GatewayConflictWithAllocationPools( + pool=pool_range, + ip_address=gateway_ip) diff --git a/neutron/db/ipam_non_pluggable_backend.py b/neutron/db/ipam_non_pluggable_backend.py new file mode 100644 index 00000000000..ee143667f3a --- /dev/null +++ b/neutron/db/ipam_non_pluggable_backend.py @@ -0,0 +1,373 @@ +# Copyright (c) 2015 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import netaddr +from oslo_config import cfg +from oslo_log import log as logging +from sqlalchemy import orm +from sqlalchemy.orm import exc + +from neutron.api.v2 import attributes +from neutron.common import constants +from neutron.common import exceptions as n_exc +from neutron.common import ipv6_utils +from neutron.db import ipam_backend_mixin +from neutron.db import models_v2 +from neutron.ipam import utils as ipam_utils + +LOG = logging.getLogger(__name__) + + +class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): + + @staticmethod + def _generate_ip(context, subnets): + try: + return IpamNonPluggableBackend._try_generate_ip(context, subnets) + except n_exc.IpAddressGenerationFailure: + IpamNonPluggableBackend._rebuild_availability_ranges(context, + subnets) + + return IpamNonPluggableBackend._try_generate_ip(context, subnets) + + @staticmethod + def _try_generate_ip(context, subnets): + """Generate an IP address. + + The IP address will be generated from one of the subnets defined on + the network. + """ + range_qry = context.session.query( + models_v2.IPAvailabilityRange).join( + models_v2.IPAllocationPool).with_lockmode('update') + for subnet in subnets: + ip_range = range_qry.filter_by(subnet_id=subnet['id']).first() + if not ip_range: + LOG.debug("All IPs from subnet %(subnet_id)s (%(cidr)s) " + "allocated", + {'subnet_id': subnet['id'], + 'cidr': subnet['cidr']}) + continue + ip_address = ip_range['first_ip'] + if ip_range['first_ip'] == ip_range['last_ip']: + # No more free indices on subnet => delete + LOG.debug("No more free IP's in slice. Deleting " + "allocation pool.") + context.session.delete(ip_range) + else: + # increment the first free + new_first_ip = str(netaddr.IPAddress(ip_address) + 1) + ip_range['first_ip'] = new_first_ip + LOG.debug("Allocated IP - %(ip_address)s from %(first_ip)s " + "to %(last_ip)s", + {'ip_address': ip_address, + 'first_ip': ip_address, + 'last_ip': ip_range['last_ip']}) + return {'ip_address': ip_address, + 'subnet_id': subnet['id']} + raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id']) + + @staticmethod + def _rebuild_availability_ranges(context, subnets): + """Rebuild availability ranges. + + This method is called only when there's no more IP available or by + _update_subnet_allocation_pools. Calling + _update_subnet_allocation_pools before calling this function deletes + the IPAllocationPools associated with the subnet that is updating, + which will result in deleting the IPAvailabilityRange too. + """ + ip_qry = context.session.query( + models_v2.IPAllocation).with_lockmode('update') + # PostgreSQL does not support select...for update with an outer join. + # No join is needed here. + pool_qry = context.session.query( + models_v2.IPAllocationPool).options( + orm.noload('available_ranges')).with_lockmode('update') + for subnet in sorted(subnets): + LOG.debug("Rebuilding availability ranges for subnet %s", + subnet) + + # Create a set of all currently allocated addresses + ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id']) + allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address']) + for i in ip_qry_results]) + + for pool in pool_qry.filter_by(subnet_id=subnet['id']): + # Create a set of all addresses in the pool + poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'], + pool['last_ip'])) + + # Use set difference to find free addresses in the pool + available = poolset - allocations + + # Generator compacts an ip set into contiguous ranges + def ipset_to_ranges(ipset): + first, last = None, None + for cidr in ipset.iter_cidrs(): + if last and last + 1 != cidr.first: + yield netaddr.IPRange(first, last) + first = None + first, last = first if first else cidr.first, cidr.last + if first: + yield netaddr.IPRange(first, last) + + # Write the ranges to the db + for ip_range in ipset_to_ranges(available): + available_range = models_v2.IPAvailabilityRange( + allocation_pool_id=pool['id'], + first_ip=str(netaddr.IPAddress(ip_range.first)), + last_ip=str(netaddr.IPAddress(ip_range.last))) + context.session.add(available_range) + + @staticmethod + def _allocate_specific_ip(context, subnet_id, ip_address): + """Allocate a specific IP address on the subnet.""" + ip = int(netaddr.IPAddress(ip_address)) + range_qry = context.session.query( + models_v2.IPAvailabilityRange).join( + models_v2.IPAllocationPool).with_lockmode('update') + results = range_qry.filter_by(subnet_id=subnet_id) + for ip_range in results: + first = int(netaddr.IPAddress(ip_range['first_ip'])) + last = int(netaddr.IPAddress(ip_range['last_ip'])) + if first <= ip <= last: + if first == last: + context.session.delete(ip_range) + return + elif first == ip: + new_first_ip = str(netaddr.IPAddress(ip_address) + 1) + ip_range['first_ip'] = new_first_ip + return + elif last == ip: + new_last_ip = str(netaddr.IPAddress(ip_address) - 1) + ip_range['last_ip'] = new_last_ip + return + else: + # Adjust the original range to end before ip_address + old_last_ip = ip_range['last_ip'] + new_last_ip = str(netaddr.IPAddress(ip_address) - 1) + ip_range['last_ip'] = new_last_ip + + # Create a new second range for after ip_address + new_first_ip = str(netaddr.IPAddress(ip_address) + 1) + new_ip_range = models_v2.IPAvailabilityRange( + allocation_pool_id=ip_range['allocation_pool_id'], + first_ip=new_first_ip, + last_ip=old_last_ip) + context.session.add(new_ip_range) + return + + @staticmethod + def _check_unique_ip(context, network_id, subnet_id, ip_address): + """Validate that the IP address on the subnet is not in use.""" + ip_qry = context.session.query(models_v2.IPAllocation) + try: + ip_qry.filter_by(network_id=network_id, + subnet_id=subnet_id, + ip_address=ip_address).one() + except exc.NoResultFound: + return True + return False + + def _test_fixed_ips_for_port(self, context, network_id, fixed_ips, + device_owner): + """Test fixed IPs for port. + + Check that configured subnets are valid prior to allocating any + IPs. Include the subnet_id in the result if only an IP address is + configured. + + :raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork, + InvalidIpForSubnet + """ + fixed_ip_set = [] + for fixed in fixed_ips: + found = False + if 'subnet_id' not in fixed: + if 'ip_address' not in fixed: + msg = _('IP allocation requires subnet_id or ip_address') + raise n_exc.InvalidInput(error_message=msg) + + filter = {'network_id': [network_id]} + subnets = self.get_subnets(context, filters=filter) + for subnet in subnets: + if ipam_utils.check_subnet_ip(subnet['cidr'], + fixed['ip_address']): + found = True + subnet_id = subnet['id'] + break + if not found: + raise n_exc.InvalidIpForNetwork( + ip_address=fixed['ip_address']) + else: + subnet = self._get_subnet(context, fixed['subnet_id']) + if subnet['network_id'] != network_id: + msg = (_("Failed to create port on network %(network_id)s" + ", because fixed_ips included invalid subnet " + "%(subnet_id)s") % + {'network_id': network_id, + 'subnet_id': fixed['subnet_id']}) + raise n_exc.InvalidInput(error_message=msg) + subnet_id = subnet['id'] + + is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet) + if 'ip_address' in fixed: + # Ensure that the IP's are unique + if not IpamNonPluggableBackend._check_unique_ip( + context, network_id, + subnet_id, fixed['ip_address']): + raise n_exc.IpAddressInUse(net_id=network_id, + ip_address=fixed['ip_address']) + + # Ensure that the IP is valid on the subnet + if (not found and + not ipam_utils.check_subnet_ip(subnet['cidr'], + fixed['ip_address'])): + raise n_exc.InvalidIpForSubnet( + ip_address=fixed['ip_address']) + if (is_auto_addr_subnet and + device_owner not in + constants.ROUTER_INTERFACE_OWNERS): + msg = (_("IPv6 address %(address)s can not be directly " + "assigned to a port on subnet %(id)s since the " + "subnet is configured for automatic addresses") % + {'address': fixed['ip_address'], + 'id': subnet_id}) + raise n_exc.InvalidInput(error_message=msg) + fixed_ip_set.append({'subnet_id': subnet_id, + 'ip_address': fixed['ip_address']}) + else: + # A scan for auto-address subnets on the network is done + # separately so that all such subnets (not just those + # listed explicitly here by subnet ID) are associated + # with the port. + if (device_owner in constants.ROUTER_INTERFACE_OWNERS or + device_owner == constants.DEVICE_OWNER_ROUTER_SNAT or + not is_auto_addr_subnet): + fixed_ip_set.append({'subnet_id': subnet_id}) + + if len(fixed_ip_set) > cfg.CONF.max_fixed_ips_per_port: + msg = _('Exceeded maximim amount of fixed ips per port') + raise n_exc.InvalidInput(error_message=msg) + return fixed_ip_set + + def _allocate_fixed_ips(self, context, fixed_ips, mac_address): + """Allocate IP addresses according to the configured fixed_ips.""" + ips = [] + + # we need to start with entries that asked for a specific IP in case + # those IPs happen to be next in the line for allocation for ones that + # didn't ask for a specific IP + fixed_ips.sort(key=lambda x: 'ip_address' not in x) + for fixed in fixed_ips: + subnet = self._get_subnet(context, fixed['subnet_id']) + is_auto_addr = ipv6_utils.is_auto_address_subnet(subnet) + if 'ip_address' in fixed: + if not is_auto_addr: + # Remove the IP address from the allocation pool + IpamNonPluggableBackend._allocate_specific_ip( + context, fixed['subnet_id'], fixed['ip_address']) + ips.append({'ip_address': fixed['ip_address'], + 'subnet_id': fixed['subnet_id']}) + # Only subnet ID is specified => need to generate IP + # from subnet + else: + if is_auto_addr: + ip_address = self._calculate_ipv6_eui64_addr(context, + subnet, + mac_address) + ips.append({'ip_address': ip_address.format(), + 'subnet_id': subnet['id']}) + else: + subnets = [subnet] + # IP address allocation + result = self._generate_ip(context, subnets) + ips.append({'ip_address': result['ip_address'], + 'subnet_id': result['subnet_id']}) + return ips + + def _allocate_ips_for_port(self, context, port): + """Allocate IP addresses for the port. + + If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP + addresses for the port. If port['fixed_ips'] contains an IP address or + a subnet_id then allocate an IP address accordingly. + """ + p = port['port'] + ips = [] + v6_stateless = [] + net_id_filter = {'network_id': [p['network_id']]} + subnets = self.get_subnets(context, filters=net_id_filter) + is_router_port = ( + p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS or + p['device_owner'] == constants.DEVICE_OWNER_ROUTER_SNAT) + + fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED + if fixed_configured: + configured_ips = self._test_fixed_ips_for_port(context, + p["network_id"], + p['fixed_ips'], + p['device_owner']) + ips = self._allocate_fixed_ips(context, + configured_ips, + p['mac_address']) + + # For ports that are not router ports, implicitly include all + # auto-address subnets for address association. + if not is_router_port: + v6_stateless += [subnet for subnet in subnets + if ipv6_utils.is_auto_address_subnet(subnet)] + else: + # Split into v4, v6 stateless and v6 stateful subnets + v4 = [] + v6_stateful = [] + for subnet in subnets: + if subnet['ip_version'] == 4: + v4.append(subnet) + elif ipv6_utils.is_auto_address_subnet(subnet): + if not is_router_port: + v6_stateless.append(subnet) + else: + v6_stateful.append(subnet) + + version_subnets = [v4, v6_stateful] + for subnets in version_subnets: + if subnets: + result = IpamNonPluggableBackend._generate_ip(context, + subnets) + ips.append({'ip_address': result['ip_address'], + 'subnet_id': result['subnet_id']}) + + for subnet in v6_stateless: + # IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets + # are implicitly included. + ip_address = self._calculate_ipv6_eui64_addr(context, subnet, + p['mac_address']) + ips.append({'ip_address': ip_address.format(), + 'subnet_id': subnet['id']}) + + return ips + + def _calculate_ipv6_eui64_addr(self, context, subnet, mac_addr): + prefix = subnet['cidr'] + network_id = subnet['network_id'] + ip_address = ipv6_utils.get_ipv6_addr_by_EUI64( + prefix, mac_addr).format() + if not self._check_unique_ip(context, network_id, + subnet['id'], ip_address): + raise n_exc.IpAddressInUse(net_id=network_id, + ip_address=ip_address) + return ip_address diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index 21989c0bfde..06e72e05f3c 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -40,6 +40,7 @@ from neutron.common import test_lib from neutron.common import utils from neutron import context from neutron.db import db_base_plugin_v2 +from neutron.db import ipam_non_pluggable_backend as non_ipam from neutron.db import models_v2 from neutron import manager from neutron.tests import base @@ -5306,26 +5307,26 @@ class TestNeutronDbPluginV2(base.BaseTestCase): """Unit Tests for NeutronDbPluginV2 IPAM Logic.""" def test_generate_ip(self): - with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_try_generate_ip') as generate: - with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_rebuild_availability_ranges') as rebuild: - db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's') + non_ipam.IpamNonPluggableBackend._generate_ip('c', 's') generate.assert_called_once_with('c', 's') self.assertEqual(0, rebuild.call_count) def test_generate_ip_exhausted_pool(self): - with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_try_generate_ip') as generate: - with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, + with mock.patch.object(non_ipam.IpamNonPluggableBackend, '_rebuild_availability_ranges') as rebuild: exception = n_exc.IpAddressGenerationFailure(net_id='n') # fail first call but not second generate.side_effect = [exception, None] - db_base_plugin_v2.NeutronDbPluginV2._generate_ip('c', 's') + non_ipam.IpamNonPluggableBackend._generate_ip('c', 's') self.assertEqual(2, generate.call_count) rebuild.assert_called_once_with('c', 's') From 62d37f91872aaaefdee50e5cd1ed87456b8a6532 Mon Sep 17 00:00:00 2001 From: rossella Date: Fri, 5 Jun 2015 13:09:35 +0200 Subject: [PATCH 132/292] Make _val_to_py and _py_to_val not private Move _val_to_py and _py_to_val to neutron.agent.ovsdb.api and remove the underscore since they will be used by other classes. Change-Id: I3a469ab3b1c1d83ad20dc6c77f1072fc6d546419 --- neutron/agent/ovsdb/api.py | 24 +++++++++++++++++++++++ neutron/agent/ovsdb/impl_vsctl.py | 32 +++++-------------------------- 2 files changed, 29 insertions(+), 27 deletions(-) diff --git a/neutron/agent/ovsdb/api.py b/neutron/agent/ovsdb/api.py index 2d6634ae695..e696f8e85d6 100644 --- a/neutron/agent/ovsdb/api.py +++ b/neutron/agent/ovsdb/api.py @@ -13,6 +13,8 @@ # under the License. import abc +import collections +import uuid from oslo_config import cfg from oslo_utils import importutils @@ -312,3 +314,25 @@ class API(object): :type bridge: string :returns: :class:`Command` with list of port names result """ + + +def val_to_py(val): + """Convert a json ovsdb return value to native python object""" + if isinstance(val, collections.Sequence) and len(val) == 2: + if val[0] == "uuid": + return uuid.UUID(val[1]) + elif val[0] == "set": + return [val_to_py(x) for x in val[1]] + elif val[0] == "map": + return {val_to_py(x): val_to_py(y) for x, y in val[1]} + return val + + +def py_to_val(pyval): + """Convert python value to ovs-vsctl value argument""" + if isinstance(pyval, bool): + return 'true' if pyval is True else 'false' + elif pyval == '': + return '""' + else: + return pyval diff --git a/neutron/agent/ovsdb/impl_vsctl.py b/neutron/agent/ovsdb/impl_vsctl.py index 4fd8937d346..4a1339f8329 100644 --- a/neutron/agent/ovsdb/impl_vsctl.py +++ b/neutron/agent/ovsdb/impl_vsctl.py @@ -14,7 +14,6 @@ import collections import itertools -import uuid from oslo_log import log as logging from oslo_serialization import jsonutils @@ -132,7 +131,7 @@ class DbCommand(BaseCommand): for record in data: obj = {} for pos, heading in enumerate(headings): - obj[heading] = _val_to_py(record[pos]) + obj[heading] = ovsdb.val_to_py(record[pos]) results.append(obj) self._result = results @@ -254,32 +253,11 @@ def _set_colval_args(*col_values): col, op, val = entry if isinstance(val, collections.Mapping): args += ["%s:%s%s%s" % ( - col, k, op, _py_to_val(v)) for k, v in val.items()] + col, k, op, ovsdb.py_to_val(v)) for k, v in val.items()] elif (isinstance(val, collections.Sequence) and not isinstance(val, six.string_types)): - args.append("%s%s%s" % (col, op, ",".join(map(_py_to_val, val)))) + args.append( + "%s%s%s" % (col, op, ",".join(map(ovsdb.py_to_val, val)))) else: - args.append("%s%s%s" % (col, op, _py_to_val(val))) + args.append("%s%s%s" % (col, op, ovsdb.py_to_val(val))) return args - - -def _val_to_py(val): - """Convert a json ovsdb return value to native python object""" - if isinstance(val, collections.Sequence) and len(val) == 2: - if val[0] == "uuid": - return uuid.UUID(val[1]) - elif val[0] == "set": - return [_val_to_py(x) for x in val[1]] - elif val[0] == "map": - return {_val_to_py(x): _val_to_py(y) for x, y in val[1]} - return val - - -def _py_to_val(pyval): - """Convert python value to ovs-vsctl value argument""" - if isinstance(pyval, bool): - return 'true' if pyval is True else 'false' - elif pyval == '': - return '""' - else: - return pyval From 101ff1a81f15314cb9a07a6b65787c4324913c8a Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 19 May 2015 09:42:51 -0700 Subject: [PATCH 133/292] Add a "light" base test class for DB tests This newly added class simply extends DietTestCase with support for SQL operations. Related-Blueprint: better-quotas Change-Id: I2debc6a144ee93155c8e36f06aecb9735dbf55d4 --- neutron/tests/unit/testlib_api.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/neutron/tests/unit/testlib_api.py b/neutron/tests/unit/testlib_api.py index bbfa1bbbed3..90e5bbeeba0 100644 --- a/neutron/tests/unit/testlib_api.py +++ b/neutron/tests/unit/testlib_api.py @@ -75,6 +75,14 @@ class SqlFixture(fixtures.Fixture): self.addCleanup(clear_tables) +class SqlTestCaseLight(base.DietTestCase): + """All SQL taste, zero plugin/rpc sugar""" + + def setUp(self): + super(SqlTestCaseLight, self).setUp() + self.useFixture(SqlFixture()) + + class SqlTestCase(base.BaseTestCase): def setUp(self): From 713ba0e8d7ce59eaff41518360530b2e7831c322 Mon Sep 17 00:00:00 2001 From: Carl Baldwin Date: Thu, 4 Jun 2015 22:25:44 +0000 Subject: [PATCH 134/292] Refactor awkward logic in setup_dhcp_port I noticed this logic as I was reviewing another patch set [1]. I didn't like removing subnet ids from dhcp_enabled_subnet_ids and I wasn't too keen on the ips_need_removal semantics that were kind of forced by the existing structure of the code. I hope you find this alternative much clearer. I like straight-forward code with less indentation that doesn't use awkward booleans like ips_needs_removal. [1] https://review.openstack.org/#/c/157697/6 Change-Id: I8bd3d6924a855ea08f8096e66bd3bfbb165a4da3 --- neutron/agent/linux/dhcp.py | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/neutron/agent/linux/dhcp.py b/neutron/agent/linux/dhcp.py index 70453d265f7..ae6fd43fadb 100644 --- a/neutron/agent/linux/dhcp.py +++ b/neutron/agent/linux/dhcp.py @@ -849,33 +849,28 @@ class DeviceManager(object): """Create/update DHCP port for the host if needed and return port.""" device_id = self.get_device_id(network) - subnets = {} - dhcp_enabled_subnet_ids = [] - for subnet in network.subnets: - if subnet.enable_dhcp: - dhcp_enabled_subnet_ids.append(subnet.id) - subnets[subnet.id] = subnet + subnets = {subnet.id: subnet for subnet in network.subnets + if subnet.enable_dhcp} dhcp_port = None for port in network.ports: port_device_id = getattr(port, 'device_id', None) if port_device_id == device_id: + dhcp_enabled_subnet_ids = set(subnets) port_fixed_ips = [] - ips_needs_removal = False for fixed_ip in port.fixed_ips: if fixed_ip.subnet_id in dhcp_enabled_subnet_ids: port_fixed_ips.append( {'subnet_id': fixed_ip.subnet_id, 'ip_address': fixed_ip.ip_address}) - dhcp_enabled_subnet_ids.remove(fixed_ip.subnet_id) - else: - ips_needs_removal = True - # If there are dhcp_enabled_subnet_ids here that means that - # we need to add those to the port and call update. - if dhcp_enabled_subnet_ids or ips_needs_removal: + port_subnet_ids = set(ip.subnet_id for ip in port.fixed_ips) + # If there is a new dhcp enabled subnet or a port that is no + # longer on a dhcp enabled subnet, we need to call update. + if dhcp_enabled_subnet_ids != port_subnet_ids: port_fixed_ips.extend( - [dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + dict(subnet_id=s) + for s in dhcp_enabled_subnet_ids - port_subnet_ids) dhcp_port = self.plugin.update_dhcp_port( port.id, {'port': {'network_id': network.id, 'fixed_ips': port_fixed_ips}}) @@ -911,7 +906,7 @@ class DeviceManager(object): device_id=device_id, network_id=network.id, tenant_id=network.tenant_id, - fixed_ips=[dict(subnet_id=s) for s in dhcp_enabled_subnet_ids]) + fixed_ips=[dict(subnet_id=s) for s in subnets]) dhcp_port = self.plugin.create_dhcp_port({'port': port_dict}) if not dhcp_port: From 127de06c7e09e1468f2855a3033fb6193a6b9365 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Wed, 6 May 2015 22:40:39 +0200 Subject: [PATCH 135/292] Clean only floating-ip related connection states Currently init_l3 deletes connection states related to ALL ips deleted in init_l3 but it's required only when floating-ips are deleted[1]. This change deletes only connection states related to floating-ips deleted in init_l3 ... it avoids to delete connection states in dhcp agents and on router internal ports! [1] look at change Ia9bd7ae243a0859dcb97e2fa939f7d16f9c2456c Closes-Bug: #1452434 Related-Bug: #1334926 Change-Id: Icfcfc585df6fd41de1e1345fd731e4631a6950ce --- neutron/agent/l3/dvr_fip_ns.py | 3 +- neutron/agent/l3/router_info.py | 3 +- neutron/agent/linux/interface.py | 9 ++++-- neutron/tests/unit/agent/l3/test_agent.py | 9 ++++-- .../tests/unit/agent/linux/test_interface.py | 32 ++++++++++++++++--- 5 files changed, 44 insertions(+), 12 deletions(-) diff --git a/neutron/agent/l3/dvr_fip_ns.py b/neutron/agent/l3/dvr_fip_ns.py index e2e63eb2700..9b7eee99a88 100644 --- a/neutron/agent/l3/dvr_fip_ns.py +++ b/neutron/agent/l3/dvr_fip_ns.py @@ -103,7 +103,8 @@ class FipNamespace(namespaces.Namespace): prefix=FIP_EXT_DEV_PREFIX) ip_cidrs = common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']) - self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name) + self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name, + clean_connections=True) for fixed_ip in ex_gw_port['fixed_ips']: ip_lib.send_gratuitous_arp(ns_name, diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index 6e213d2f24f..0dfbc13ef58 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -462,7 +462,8 @@ class RouterInfo(object): gateway_ips=gateway_ips, extra_subnets=ex_gw_port.get('extra_subnets', []), preserve_ips=preserve_ips, - enable_ra_on_gw=enable_ra_on_gw) + enable_ra_on_gw=enable_ra_on_gw, + clean_connections=True) for fixed_ip in ex_gw_port['fixed_ips']: ip_lib.send_gratuitous_arp(ns_name, interface_name, diff --git a/neutron/agent/linux/interface.py b/neutron/agent/linux/interface.py index ed1e91e98f7..470e8f34f25 100644 --- a/neutron/agent/linux/interface.py +++ b/neutron/agent/linux/interface.py @@ -79,13 +79,15 @@ class LinuxInterfaceDriver(object): def init_l3(self, device_name, ip_cidrs, namespace=None, preserve_ips=[], gateway_ips=None, extra_subnets=[], - enable_ra_on_gw=False): + enable_ra_on_gw=False, clean_connections=False): """Set the L3 settings for the interface using data from the port. ip_cidrs: list of 'X.X.X.X/YY' strings preserve_ips: list of ip cidrs that should not be removed from device gateway_ips: For gateway ports, list of external gateway ip addresses enable_ra_on_gw: Boolean to indicate configuring acceptance of IPv6 RA + clean_connections: Boolean to indicate if we should cleanup connections + associated to removed ips """ device = ip_lib.IPDevice(device_name, namespace=namespace) @@ -113,7 +115,10 @@ class LinuxInterfaceDriver(object): # clean up any old addresses for ip_cidr in previous: if ip_cidr not in preserve_ips: - device.delete_addr_and_conntrack_state(ip_cidr) + if clean_connections: + device.delete_addr_and_conntrack_state(ip_cidr) + else: + device.addr.delete(ip_cidr) for gateway_ip in gateway_ips or []: device.route.add_gateway(gateway_ip) diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index 52dbb073fb6..5e67dda6ae0 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -611,7 +611,8 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): 'gateway_ips': gateway_ips, 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [], - 'enable_ra_on_gw': enable_ra_on_gw} + 'enable_ra_on_gw': enable_ra_on_gw, + 'clean_connections': True} else: exp_arp_calls = [mock.call(ri.ns_name, interface_name, '20.0.0.30', mock.ANY)] @@ -632,7 +633,8 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): 'gateway_ips': gateway_ips, 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], - 'enable_ra_on_gw': enable_ra_on_gw} + 'enable_ra_on_gw': enable_ra_on_gw, + 'clean_connections': True} self.mock_driver.init_l3.assert_called_with(interface_name, ip_cidrs, **kwargs) @@ -799,7 +801,8 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): 'gateway_ips': gateway_ips, 'namespace': 'qrouter-' + router['id'], 'extra_subnets': [{'cidr': '172.16.0.0/24'}], - 'enable_ra_on_gw': False} + 'enable_ra_on_gw': False, + 'clean_connections': True} self.mock_driver.init_l3.assert_called_with(interface_name, ip_cidrs, **kwargs) diff --git a/neutron/tests/unit/agent/linux/test_interface.py b/neutron/tests/unit/agent/linux/test_interface.py index 8bbc210dd9b..1834524e899 100644 --- a/neutron/tests/unit/agent/linux/test_interface.py +++ b/neutron/tests/unit/agent/linux/test_interface.py @@ -94,7 +94,7 @@ class TestABCDriver(TestBase): [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24'), - mock.call().delete_addr_and_conntrack_state('172.16.77.240/24'), + mock.call().addr.delete('172.16.77.240/24'), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')]) @@ -129,6 +129,29 @@ class TestABCDriver(TestBase): self.assertFalse(self.ip_dev().addr.delete.called) self.assertFalse(self.ip_dev().delete_addr_and_conntrack_state.called) + def _test_l3_init_clean_connections(self, clean_connections): + addresses = [ + dict(scope='global', dynamic=False, cidr='10.0.0.1/24'), + dict(scope='global', dynamic=False, cidr='10.0.0.3/32')] + self.ip_dev().addr.list = mock.Mock(return_value=addresses) + + bc = BaseChild(self.conf) + ns = '12345678-1234-5678-90ab-ba0987654321' + bc.init_l3('tap0', ['10.0.0.1/24'], namespace=ns, + clean_connections=clean_connections) + + delete = self.ip_dev().delete_addr_and_conntrack_state + if clean_connections: + delete.assert_called_once_with('10.0.0.3/32') + else: + self.assertFalse(delete.called) + + def test_l3_init_with_clean_connections(self): + self._test_l3_init_clean_connections(True) + + def test_l3_init_without_clean_connections(self): + self._test_l3_init_clean_connections(False) + def _test_l3_init_with_ipv6(self, include_gw_ip): addresses = [dict(scope='global', dynamic=False, @@ -148,8 +171,7 @@ class TestABCDriver(TestBase): [mock.call('tap0', namespace=ns), mock.call().addr.list(filters=['permanent']), mock.call().addr.add('2001:db8:a::124/64'), - mock.call().delete_addr_and_conntrack_state( - '2001:db8:a::123/64')]) + mock.call().addr.delete('2001:db8:a::123/64')]) if include_gw_ip: expected_calls += ( [mock.call().route.add_gateway('2001:db8:a::1')]) @@ -182,8 +204,8 @@ class TestABCDriver(TestBase): mock.call().addr.list(filters=['permanent']), mock.call().addr.add('192.168.1.2/24'), mock.call().addr.add('2001:db8:a::124/64'), - mock.call().delete_addr_and_conntrack_state('172.16.77.240/24'), - mock.call().delete_addr_and_conntrack_state('2001:db8:a::123/64'), + mock.call().addr.delete('172.16.77.240/24'), + mock.call().addr.delete('2001:db8:a::123/64'), mock.call().route.list_onlink_routes(constants.IP_VERSION_4), mock.call().route.list_onlink_routes(constants.IP_VERSION_6), mock.call().route.add_onlink_route('172.20.0.0/24')], From b649b9c871d0734745da5a201eca83a6b407a1c5 Mon Sep 17 00:00:00 2001 From: Stephen Ma Date: Thu, 4 Jun 2015 20:09:23 +0000 Subject: [PATCH 136/292] L3 agent should do report state before full sync at start Sometimes the AgentNotFoundByTypeHost exception is reported during L3-agent startup. The exception is generated when the first get_routers RPC call is made. When the neutron server gets this RPC call, it might not have handled the report state RPC call yet. So the L3-agent hasn't been registered in the API server. The result is a RPC Error exception. By the time the next get_routers RPC call is made, the report state RPC call has already been done and agent registered. This patch modifies the L3 agent startup behavior to have the report state done before the agent do the sync routers RPC call. Closes-bug: 1456822 Change-Id: Id40cfd8466f45e20fea0e9df6fd57bf9c9e59da7 --- neutron/agent/l3/agent.py | 16 +++++++++++++++- neutron/tests/unit/agent/l3/test_agent.py | 20 ++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/neutron/agent/l3/agent.py b/neutron/agent/l3/agent.py index 01395d34ccf..680401979ce 100644 --- a/neutron/agent/l3/agent.py +++ b/neutron/agent/l3/agent.py @@ -546,6 +546,11 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, self._queue.add(update) def after_start(self): + # Note: the FWaaS' vArmourL3NATAgent is a subclass of L3NATAgent. It + # calls this method here. So Removing this after_start() would break + # vArmourL3NATAgent. We need to find out whether vArmourL3NATAgent + # can have L3NATAgentWithStateReport as its base class instead of + # L3NATAgent. eventlet.spawn_n(self._process_routers_loop) LOG.info(_LI("L3 agent started")) # When L3 agent is ready, we immediately do a full sync @@ -555,6 +560,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback, class L3NATAgentWithStateReport(L3NATAgent): def __init__(self, host, conf=None): + self.use_call = True super(L3NATAgentWithStateReport, self).__init__(host=host, conf=conf) self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) self.agent_state = { @@ -574,7 +580,6 @@ class L3NATAgentWithStateReport(L3NATAgent): 'start_flag': True, 'agent_type': l3_constants.AGENT_TYPE_L3} report_interval = self.conf.AGENT.report_interval - self.use_call = True if report_interval: self.heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) @@ -613,6 +618,15 @@ class L3NATAgentWithStateReport(L3NATAgent): except Exception: LOG.exception(_LE("Failed reporting state!")) + def after_start(self): + eventlet.spawn_n(self._process_routers_loop) + LOG.info(_LI("L3 agent started")) + # Do the report state before we do the first full sync. + self._report_state() + + # When L3 agent is ready, we immediately do a full sync + self.periodic_sync_routers_task(self.context) + def agent_updated(self, context, payload): """Handle the agent_updated notification event.""" self.fullsync = True diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index e628cd465c7..627088f3d57 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -39,6 +39,7 @@ from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import ra from neutron.agent.metadata import driver as metadata_driver +from neutron.agent import rpc as agent_rpc from neutron.callbacks import manager from neutron.callbacks import registry from neutron.common import config as base_config @@ -283,6 +284,7 @@ class BasicRouterOperationsFramework(base.BaseTestCase): self.conf = agent_config.setup_conf() self.conf.register_opts(base_config.core_opts) log.register_options(self.conf) + self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT') self.conf.register_opts(l3_config.OPTS) self.conf.register_opts(ha.OPTS) agent_config.register_interface_driver_opts_helper(self.conf) @@ -420,6 +422,24 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): agent.after_start() router_sync.assert_called_once_with(agent.context) + def test_l3_initial_report_state_done(self): + with mock.patch.object(l3_agent.L3NATAgentWithStateReport, + 'periodic_sync_routers_task'),\ + mock.patch.object(agent_rpc.PluginReportStateAPI, + 'report_state') as report_state,\ + mock.patch.object(eventlet, 'spawn_n'): + + agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME, + conf=self.conf) + + self.assertEqual(agent.agent_state['start_flag'], True) + use_call_arg = agent.use_call + agent.after_start() + report_state.assert_called_once_with(agent.context, + agent.agent_state, + use_call_arg) + self.assertTrue(agent.agent_state.get('start_flag') is None) + def test_periodic_sync_routers_task_call_clean_stale_namespaces(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) self.plugin_api.get_routers.return_value = [] From e383378507d0d144de468e579f2e2defd41540b0 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Sun, 7 Jun 2015 02:50:35 +0900 Subject: [PATCH 137/292] ML2: Remove TYPE_MULTI_SEGMENT It's a leftover from the pre-multiprovider world. Change-Id: Iaf93292d698032984dfc392fae3701c49e18adc8 --- neutron/plugins/ml2/plugin.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index aed19ca7b9b..09479868595 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -79,10 +79,6 @@ LOG = log.getLogger(__name__) MAX_BIND_TRIES = 10 -# REVISIT(rkukura): Move this and other network_type constants to -# providernet.py? -TYPE_MULTI_SEGMENT = 'multi-segment' - class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, dvr_mac_db.DVRDbMixin, From e2cfd87ab4abab95d713fa39a576ca7e41bce5c2 Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Sat, 6 Jun 2015 18:41:39 -0400 Subject: [PATCH 138/292] Remove full stack log noise "neutron-server isn't up yet" logs are useless because if you time out when waiting for the server to start that information will be in the trace. When you don't time out, the log is just spam. Same reasoning for the "There are %d agents running!" log. Also made the agents_count parameter mandatory for the wait_until_env_is_up method because having a default of 0, or any other default makes no sense. There's no reason to ever call that method without specifying the agents_count. This method used to be used with agents_count == 0 by the server to make sure its up (And responding to REST calls), but the Neutron server fixture now uses server_is_live method (Which calls list_networks) instead. Change-Id: Ifd6abd04ddaacc9976cb2a75269443f870b47c5b --- neutron/tests/fullstack/fullstack_fixtures.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/neutron/tests/fullstack/fullstack_fixtures.py b/neutron/tests/fullstack/fullstack_fixtures.py index e1959f86771..d6b199f3ed0 100644 --- a/neutron/tests/fullstack/fullstack_fixtures.py +++ b/neutron/tests/fullstack/fullstack_fixtures.py @@ -105,17 +105,15 @@ class FullstackFixture(fixtures.Fixture): NeutronServerFixture( self.test_name, self.temp_dir, rabbitmq_environment)) - def wait_until_env_is_up(self, agents_count=0): + def wait_until_env_is_up(self, agents_count): utils.wait_until_true( functools.partial(self._processes_are_ready, agents_count)) def _processes_are_ready(self, agents_count): try: running_agents = self.neutron_server.client.list_agents()['agents'] - LOG.warn("There are %d agents running!", len(running_agents)) return len(running_agents) == agents_count except nc_exc.NeutronClientException: - LOG.warn("neutron-server isn't up yet (cannot contact REST API).") return False @@ -159,7 +157,6 @@ class NeutronServerFixture(fixtures.Fixture): self.client.list_networks() return True except nc_exc.NeutronClientException: - LOG.warn("neutron-server isn't up yet (cannot contact REST API).") return False @property From b17ff81ef1b18395c0b0671a487f68201d739f43 Mon Sep 17 00:00:00 2001 From: shihanzhang Date: Fri, 24 Apr 2015 18:28:17 +0800 Subject: [PATCH 139/292] Send 'security_groups_member_updated' when port changes With ml2 plugin, when a port's IP or security group changes, it should send 'security_groups_member_updated' message to other l2 agents which have same security group with this changed port. Change-Id: I2e7622d2db4c173ac879a95a6e0adf92b858fe82 Closes-bug: #1448022 --- neutron/db/securitygroups_rpc_base.py | 11 +++++++++++ neutron/plugins/ml2/plugin.py | 2 ++ neutron/tests/unit/plugins/ml2/test_plugin.py | 10 ++++++++++ 3 files changed, 23 insertions(+) diff --git a/neutron/db/securitygroups_rpc_base.py b/neutron/db/securitygroups_rpc_base.py index af0b8a8e109..c3995980512 100644 --- a/neutron/db/securitygroups_rpc_base.py +++ b/neutron/db/securitygroups_rpc_base.py @@ -120,6 +120,17 @@ class SecurityGroupServerRpcMixin(sg_db.SecurityGroupDbMixin): original_port[ext_sg.SECURITYGROUPS]) return need_notify + def check_and_notify_security_group_member_changed( + self, context, original_port, updated_port): + sg_change = not utils.compare_elements( + original_port.get(ext_sg.SECURITYGROUPS), + updated_port.get(ext_sg.SECURITYGROUPS)) + if sg_change: + self.notify_security_groups_member_updated_bulk( + context, [original_port, updated_port]) + elif original_port['fixed_ips'] != updated_port['fixed_ips']: + self.notify_security_groups_member_updated(context, updated_port) + def is_security_group_member_updated(self, context, original_port, updated_port): """Check security group member updated or not. diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index 09479868595..df831fdc81f 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -1157,6 +1157,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, # either undo/retry the operation or delete the resource. self.mechanism_manager.update_port_postcommit(mech_context) + self.check_and_notify_security_group_member_changed( + context, original_port, updated_port) need_port_update_notify |= self.is_security_group_member_updated( context, original_port, updated_port) diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index 73c4a1134da..50dfd319641 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -402,6 +402,16 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): plugin.update_port_status(ctx, short_id, 'UP') mock_gbl.assert_called_once_with(mock.ANY, port_id, mock.ANY) + def test_update_port_fixed_ip_changed(self): + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + with self.port() as port, mock.patch.object( + plugin.notifier, + 'security_groups_member_updated') as sg_member_update: + port['port']['fixed_ips'][0]['ip_address'] = '10.0.0.3' + plugin.update_port(ctx, port['port']['id'], port) + self.assertTrue(sg_member_update.called) + def test_update_port_mac(self): self.check_update_port_mac( host_arg={portbindings.HOST_ID: HOST}, From b239f75644bfdfec86f8a8efdabd6b11b766e822 Mon Sep 17 00:00:00 2001 From: shihanzhang Date: Tue, 26 May 2015 16:42:44 +0800 Subject: [PATCH 140/292] Update ipset members when corresponding sg member is empty if a security group has a rule with 'remote-group-id', the ports in this security group should update its relevant ipset member when the remote-group members is empty. Change-Id: I980ebfd8f6537f803d9d5cbf21ca33f727fea3b3 Closes-bug: #1458786 --- neutron/agent/linux/iptables_firewall.py | 3 +-- neutron/tests/unit/agent/linux/test_iptables_firewall.py | 8 ++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/neutron/agent/linux/iptables_firewall.py b/neutron/agent/linux/iptables_firewall.py index 4dd988fde8b..1cae8f6429f 100644 --- a/neutron/agent/linux/iptables_firewall.py +++ b/neutron/agent/linux/iptables_firewall.py @@ -458,8 +458,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver): for ip_version, sg_ids in security_group_ids.items(): for sg_id in sg_ids: current_ips = self.sg_members[sg_id][ip_version] - if current_ips: - self.ipset.set_members(sg_id, ip_version, current_ips) + self.ipset.set_members(sg_id, ip_version, current_ips) def _generate_ipset_rule_args(self, sg_rule, remote_gid): ethertype = sg_rule.get('ethertype') diff --git a/neutron/tests/unit/agent/linux/test_iptables_firewall.py b/neutron/tests/unit/agent/linux/test_iptables_firewall.py index 53726f81c73..7491d5a8740 100644 --- a/neutron/tests/unit/agent/linux/test_iptables_firewall.py +++ b/neutron/tests/unit/agent/linux/test_iptables_firewall.py @@ -1695,3 +1695,11 @@ class IptablesFirewallEnhancedIpsetTestCase(BaseIptablesFirewallTestCase): self.firewall._build_ipv4v6_mac_ip_list(mac_oth, ipv6, mac_ipv4_pairs, mac_ipv6_pairs) self.assertEqual(fake_ipv6_pair, mac_ipv6_pairs) + + def test_update_ipset_members(self): + self.firewall.sg_members[FAKE_SGID][_IPv4] = [] + self.firewall.sg_members[FAKE_SGID][_IPv6] = [] + sg_info = {constants.IPv4: [FAKE_SGID]} + self.firewall._update_ipset_members(sg_info) + calls = [mock.call.set_members(FAKE_SGID, constants.IPv4, [])] + self.firewall.ipset.assert_has_calls(calls) From 725543684cbe0df0edc4b6924f85e63e1628fa92 Mon Sep 17 00:00:00 2001 From: rossella Date: Thu, 5 Mar 2015 09:24:10 +0000 Subject: [PATCH 141/292] Add get_events to OVSDB monitor OVSDB monitor can generate the events that the OVS agent needs to process (device added or updated). Instead of notifying only that a change occurred and that polling is needed, pass the events to the agent Change-Id: I3d17bf995ad4508c4c6d089de550148da1465fa1 Partially-Implements: blueprint restructure-l2-agent --- neutron/agent/linux/ovsdb_monitor.py | 46 ++++++++++++++--- neutron/agent/linux/polling.py | 3 ++ neutron/tests/common/net_helpers.py | 9 +++- .../agent/linux/test_ovsdb_monitor.py | 50 +++++++++++++++++-- .../unit/agent/linux/test_ovsdb_monitor.py | 12 +++-- 5 files changed, 106 insertions(+), 14 deletions(-) diff --git a/neutron/agent/linux/ovsdb_monitor.py b/neutron/agent/linux/ovsdb_monitor.py index 7e0ef251184..f992bca25b5 100644 --- a/neutron/agent/linux/ovsdb_monitor.py +++ b/neutron/agent/linux/ovsdb_monitor.py @@ -14,13 +14,19 @@ import eventlet from oslo_log import log as logging +from oslo_serialization import jsonutils from neutron.agent.linux import async_process +from neutron.agent.ovsdb import api as ovsdb from neutron.i18n import _LE LOG = logging.getLogger(__name__) +OVSDB_ACTION_INITIAL = 'initial' +OVSDB_ACTION_INSERT = 'insert' +OVSDB_ACTION_DELETE = 'delete' + class OvsdbMonitor(async_process.AsyncProcess): """Manages an invocation of 'ovsdb-client monitor'.""" @@ -63,22 +69,50 @@ class SimpleInterfaceMonitor(OvsdbMonitor): def __init__(self, respawn_interval=None): super(SimpleInterfaceMonitor, self).__init__( 'Interface', - columns=['name', 'ofport'], + columns=['name', 'ofport', 'external_ids'], format='json', respawn_interval=respawn_interval, ) self.data_received = False + self.new_events = {'added': [], 'removed': []} @property def has_updates(self): """Indicate whether the ovsdb Interface table has been updated. - True will be returned if the monitor process is not active. - This 'failing open' minimizes the risk of falsely indicating - the absence of updates at the expense of potential false - positives. + If the monitor process is not active an error will be logged since + it won't be able to communicate any update. This situation should be + temporary if respawn_interval is set. """ - return bool(list(self.iter_stdout())) or not self.is_active() + if not self.is_active(): + LOG.error(_LE("Interface monitor is not active")) + else: + self.process_events() + return bool(self.new_events['added'] or self.new_events['removed']) + + def get_events(self): + self.process_events() + events = self.new_events + self.new_events = {'added': [], 'removed': []} + return events + + def process_events(self): + devices_added = [] + devices_removed = [] + for row in self.iter_stdout(): + json = jsonutils.loads(row).get('data') + for ovs_id, action, name, ofport, external_ids in json: + if external_ids: + external_ids = ovsdb.val_to_py(external_ids) + device = {'name': name, + 'ofport': ofport, + 'external_ids': external_ids} + if action in (OVSDB_ACTION_INITIAL, OVSDB_ACTION_INSERT): + devices_added.append(device) + elif action == OVSDB_ACTION_DELETE: + devices_removed.append(device) + self.new_events['added'].extend(devices_added) + self.new_events['removed'].extend(devices_removed) def start(self, block=False, timeout=5): super(SimpleInterfaceMonitor, self).start() diff --git a/neutron/agent/linux/polling.py b/neutron/agent/linux/polling.py index dffabf34030..ac3a4a620c2 100644 --- a/neutron/agent/linux/polling.py +++ b/neutron/agent/linux/polling.py @@ -60,3 +60,6 @@ class InterfacePollingMinimizer(base_polling.BasePollingManager): # collect output. eventlet.sleep() return self._monitor.has_updates + + def get_events(self): + return self._monitor.get_events() diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py index 5d665f7f9ce..ae494f5f358 100644 --- a/neutron/tests/common/net_helpers.py +++ b/neutron/tests/common/net_helpers.py @@ -181,6 +181,12 @@ class OVSBridgeFixture(fixtures.Fixture): class OVSPortFixture(PortFixture): + def __init__(self, bridge=None, namespace=None, attrs=None): + super(OVSPortFixture, self).__init__(bridge, namespace) + if attrs is None: + attrs = [] + self.attrs = attrs + def _create_bridge_fixture(self): return OVSBridgeFixture() @@ -196,7 +202,8 @@ class OVSPortFixture(PortFixture): self.port.link.set_up() def create_port(self, name): - self.bridge.add_port(name, ('type', 'internal')) + self.attrs.insert(0, ('type', 'internal')) + self.bridge.add_port(name, *self.attrs) return name diff --git a/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py index a9ae8c2365e..fc49b1ae4d1 100644 --- a/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py +++ b/neutron/tests/functional/agent/linux/test_ovsdb_monitor.py @@ -107,9 +107,51 @@ class TestSimpleInterfaceMonitor(BaseMonitorTest): utils.wait_until_true(lambda: self.monitor.data_received is True) self.assertTrue(self.monitor.has_updates, 'Initial call should always be true') - self.assertFalse(self.monitor.has_updates, - 'has_updates without port addition should be False') + # clear the event list + self.monitor.get_events() self.useFixture(net_helpers.OVSPortFixture()) # has_updates after port addition should become True - while not self.monitor.has_updates: - eventlet.sleep(0.01) + utils.wait_until_true(lambda: self.monitor.has_updates is True) + + def _expected_devices_events(self, devices, state): + """Helper to check that events are received for expected devices. + + :param devices: The list of expected devices. WARNING: This list + is modified by this method + :param state: The state of the devices (added or removed) + """ + events = self.monitor.get_events() + event_devices = [ + (dev['name'], dev['external_ids']) for dev in events.get(state)] + for dev in event_devices: + if dev[0] in devices: + devices.remove(dev[0]) + self.assertEqual(dev[1].get('iface-status'), 'active') + if not devices: + return True + + def test_get_events(self): + utils.wait_until_true(lambda: self.monitor.data_received is True) + devices = self.monitor.get_events() + self.assertTrue(devices.get('added'), + 'Initial call should always be true') + p_attrs = [('external_ids', {'iface-status': 'active'})] + br = self.useFixture(net_helpers.OVSBridgeFixture()) + p1 = self.useFixture(net_helpers.OVSPortFixture( + br.bridge, None, p_attrs)) + p2 = self.useFixture(net_helpers.OVSPortFixture( + br.bridge, None, p_attrs)) + added_devices = [p1.port.name, p2.port.name] + utils.wait_until_true( + lambda: self._expected_devices_events(added_devices, 'added')) + br.bridge.delete_port(p1.port.name) + br.bridge.delete_port(p2.port.name) + removed_devices = [p1.port.name, p2.port.name] + utils.wait_until_true( + lambda: self._expected_devices_events(removed_devices, 'removed')) + # restart + self.monitor.stop(block=True) + self.monitor.start(block=True, timeout=60) + devices = self.monitor.get_events() + self.assertTrue(devices.get('added'), + 'Initial call should always be true') diff --git a/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py b/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py index 9b8b9768706..604d6cc4ad8 100644 --- a/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py +++ b/neutron/tests/unit/agent/linux/test_ovsdb_monitor.py @@ -55,9 +55,6 @@ class TestSimpleInterfaceMonitor(base.BaseTestCase): super(TestSimpleInterfaceMonitor, self).setUp() self.monitor = ovsdb_monitor.SimpleInterfaceMonitor() - def test_has_updates_is_true_by_default(self): - self.assertTrue(self.monitor.has_updates) - def test_has_updates_is_false_if_active_with_no_output(self): target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor' '.is_active') @@ -87,3 +84,12 @@ class TestSimpleInterfaceMonitor(base.BaseTestCase): return_value=output): self.monitor._read_stdout() self.assertFalse(self.monitor.data_received) + + def test_has_updates_after_calling_get_events_is_false(self): + with mock.patch.object( + self.monitor, 'process_events') as process_events: + self.monitor.new_events = {'added': ['foo'], 'removed': ['foo1']} + self.assertTrue(self.monitor.has_updates) + self.monitor.get_events() + self.assertTrue(process_events.called) + self.assertFalse(self.monitor.has_updates) From 00899b56213753d523842f29d50353a067df6064 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Mon, 8 Jun 2015 14:42:18 +0000 Subject: [PATCH 142/292] Python3: Enable all working tests in tox.ini Thanks to the recent Python3-related changes, these tests can now be run on Python 3. Change-Id: I7f689e221e59128012d46da2c90e61d5206fe828 Blueprint: neutron-python3 --- tox.ini | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index bd28ed3e269..636a2a37739 100644 --- a/tox.ini +++ b/tox.ini @@ -89,7 +89,95 @@ commands = sphinx-build -W -b html doc/source doc/build [testenv:py34] commands = python -m testtools.run \ - neutron.tests.unit.common.test_rpc + neutron.tests.unit.services.metering.drivers.test_iptables \ + neutron.tests.unit.services.l3_router.test_l3_apic \ + neutron.tests.unit.plugins.sriovnicagent.test_sriov_nic_agent \ + neutron.tests.unit.plugins.sriovnicagent.test_pci_lib \ + neutron.tests.unit.plugins.openvswitch.agent.ovs_test_base \ + neutron.tests.unit.plugins.openvswitch.agent.openflow.ovs_ofctl.test_br_phys \ + neutron.tests.unit.plugins.openvswitch.agent.openflow.ovs_ofctl.test_br_int \ + neutron.tests.unit.plugins.openvswitch.agent.openflow.ovs_ofctl.test_br_tun \ + neutron.tests.unit.plugins.brocade.test_brocade_db \ + neutron.tests.unit.plugins.brocade.test_brocade_vlan \ + neutron.tests.unit.plugins.oneconvergence.test_nvsd_agent \ + neutron.tests.unit.plugins.oneconvergence.test_plugin_helper \ + neutron.tests.unit.plugins.oneconvergence.test_nvsdlib \ + neutron.tests.unit.plugins.ibm.test_sdnve_api \ + neutron.tests.unit.plugins.ml2.test_db \ + neutron.tests.unit.plugins.ml2.test_driver_context \ + neutron.tests.unit.plugins.ml2.test_rpc \ + neutron.tests.unit.plugins.ml2.drivers.mlnx.test_mech_mlnx \ + neutron.tests.unit.plugins.ml2.drivers.test_mech_openvswitch \ + neutron.tests.unit.plugins.ml2.drivers.test_mech_linuxbridge \ + neutron.tests.unit.plugins.ml2.drivers.base_type_tunnel \ + neutron.tests.unit.plugins.ml2.drivers.ext_test \ + neutron.tests.unit.plugins.ml2.drivers.mech_sriov.test_mech_sriov_nic_switch \ + neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent \ + neutron.tests.unit.plugins.ml2.drivers.arista.test_mechanism_arista \ + neutron.tests.unit.plugins.ml2.drivers.test_type_local \ + neutron.tests.unit.plugins.ml2.drivers.mechanism_logger \ + neutron.tests.unit.plugins.ml2.drivers.cisco.apic.test_apic_sync \ + neutron.tests.unit.plugins.ml2.drivers.cisco.apic.base \ + neutron.tests.unit.plugins.ml2.drivers.cisco.apic.test_apic_topology \ + neutron.tests.unit.plugins.ml2.drivers.test_type_flat \ + neutron.tests.unit.plugins.ml2.drivers.mechanism_test \ + neutron.tests.unit.plugins.ml2.extensions.fake_extension \ + neutron.tests.unit.plugins.cisco.n1kv.fake_client \ + neutron.tests.unit.plugins.cisco.test_network_db \ + neutron.tests.unit.db.test_l3_dvr_db \ + neutron.tests.unit.db.test_agents_db \ + neutron.tests.unit.db.test_dvr_mac_db \ + neutron.tests.unit.debug.test_commands \ + neutron.tests.unit.tests.test_base \ + neutron.tests.unit.database_stubs \ + neutron.tests.unit.dummy_plugin \ + neutron.tests.unit.extension_stubs \ + neutron.tests.unit.testlib_api \ + neutron.tests.unit.api.test_api_common \ + neutron.tests.unit.api.rpc.handlers.test_dhcp_rpc \ + neutron.tests.unit.api.rpc.handlers.test_securitygroups_rpc \ + neutron.tests.unit.api.rpc.handlers.test_dvr_rpc \ + neutron.tests.unit.api.rpc.agentnotifiers.test_dhcp_rpc_agent_api \ + neutron.tests.unit.agent.metadata.test_driver \ + neutron.tests.unit.agent.l2population_rpc_base \ + neutron.tests.unit.agent.test_rpc \ + neutron.tests.unit.agent.test_l2population_rpc \ + neutron.tests.unit.agent.l3.test_link_local_allocator \ + neutron.tests.unit.agent.l3.test_ha_router \ + neutron.tests.unit.agent.l3.test_legacy_router \ + neutron.tests.unit.agent.l3.test_router_info \ + neutron.tests.unit.agent.l3.test_router_processing_queue \ + neutron.tests.unit.agent.l3.test_namespace_manager \ + neutron.tests.unit.agent.l3.test_dvr_fip_ns \ + neutron.tests.unit.agent.common.test_config \ + neutron.tests.unit.agent.common.test_polling \ + neutron.tests.unit.agent.linux.test_keepalived \ + neutron.tests.unit.agent.linux.test_ipset_manager \ + neutron.tests.unit.agent.linux.test_ebtables_manager \ + neutron.tests.unit.agent.linux.test_ebtables_driver \ + neutron.tests.unit.agent.linux.test_polling \ + neutron.tests.unit.agent.linux.test_ip_monitor \ + neutron.tests.unit.agent.linux.test_iptables_manager \ + neutron.tests.unit.agent.linux.test_ovsdb_monitor \ + neutron.tests.unit.agent.linux.test_bridge_lib \ + neutron.tests.unit.agent.linux.test_ip_link_support \ + neutron.tests.unit.agent.linux.test_interface \ + neutron.tests.unit.test_auth \ + neutron.tests.unit.extensions.v2attributes \ + neutron.tests.unit.extensions.extendedattribute \ + neutron.tests.unit.extensions.base \ + neutron.tests.unit.extensions.foxinsocks \ + neutron.tests.unit.extensions.extensionattribute \ + neutron.tests.unit.callbacks.test_manager \ + neutron.tests.unit.hacking.test_checks \ + neutron.tests.unit.common.test_config \ + neutron.tests.unit.common.test_rpc \ + neutron.tests.unit.cmd.test_ovs_cleanup \ + neutron.tests.unit.cmd.test_netns_cleanup \ + neutron.tests.unit.ipam.drivers.neutrondb_ipam.test_db_api \ + neutron.tests.unit.ipam.drivers.neutrondb_ipam.test_driver \ + neutron.tests.unit.notifiers.test_nova \ + neutron.tests.unit.notifiers.test_batch_notifier [flake8] # E125 continuation line does not distinguish itself from next logical line From 3a5a8a62c372f3a516caa59fd655dcf923a82519 Mon Sep 17 00:00:00 2001 From: Kyle Mestery Date: Mon, 8 Jun 2015 15:27:23 +0000 Subject: [PATCH 143/292] Add Neutron PTL Office Hours To ensure a weekly oppurtunity to sync between the PTL and the Lieutenants, officially setup Neutron PTL Office Hours. Depends-On: Ia5c8090e90939097104cb95c0aa3b883f7b4dd9b Change-Id: Iab3c21764937ebb3a1d0553b3a3d42b5c44bf3cc Signed-off-by: Kyle Mestery --- doc/source/policies/index.rst | 1 + doc/source/policies/office-hours.rst | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 doc/source/policies/office-hours.rst diff --git a/doc/source/policies/index.rst b/doc/source/policies/index.rst index 9a780eeb56e..6a766ac9159 100644 --- a/doc/source/policies/index.rst +++ b/doc/source/policies/index.rst @@ -32,6 +32,7 @@ Policies core-reviewers gate-failure-triage code-reviews + office-hours Indices and tables ------------------ diff --git a/doc/source/policies/office-hours.rst b/doc/source/policies/office-hours.rst new file mode 100644 index 00000000000..fd50012c99c --- /dev/null +++ b/doc/source/policies/office-hours.rst @@ -0,0 +1,21 @@ +Neutron PTL Office Hours +------------------------ + +Neutron has evolved into a platform. As part of the broader ["Big Tent"][1] +initiative, Neutron has also opened it's doors to the Neutron Stadium +effort. This, combined with the new [Lieutenant System][2], means the PTL is +now responsible for leading an increasingly large and diverse group of +contributors. To ensure weekly syncs between the PTL and the Lieutenants, +as well as to allow for projects under the Neutron Stadium to have a sync +point with the PTL, the project is setting up office hours in the +#openstack-neutron-release IRC channel. The PTL will use these office hours +to allow for questions and syncing with Lieutenants. + +The current office hours can be seen on the [OpenStack eavesdrop][3] page. + +Please note the #openstack-neutron-release channel is logged to allow the +consumption of these discussion by those who cannot make the times above. + +[1]: http://superuser.openstack.org/articles/openstack-as-layers-but-also-a-big-tent-but-also-a-bunch-of-cats +[2]: http://docs.openstack.org/developer/neutron/policies/core-reviewers.html#core-review-hierarchy +[3]: http://eavesdrop.openstack.org/ From b322ebae09cc59ed0a860ea6e39ed9b6fa6c5c12 Mon Sep 17 00:00:00 2001 From: yuyangbj Date: Wed, 13 May 2015 14:07:36 +0800 Subject: [PATCH 144/292] Fixes bulk insertion of data to ml2_port_binding We should use schema definition to insert bulk of data to table. Closes-Bug: #1454566 Change-Id: I66b3ee8c2f9fa6f04b9e89dc49d1a3d277d63191 --- .../2b801560a332_remove_hypervneutronplugin_tables.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py b/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py index 31515849956..711ebe05e3e 100644 --- a/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py +++ b/neutron/db/migration/alembic_migrations/versions/2b801560a332_remove_hypervneutronplugin_tables.py @@ -32,6 +32,7 @@ revision = '2b801560a332' down_revision = '2d2a8a565438' from alembic import op +import sqlalchemy as sa from sqlalchemy.sql import expression as sa_expr from neutron.extensions import portbindings @@ -119,8 +120,9 @@ def _migrate_port_bindings(engine): if segment: binding['segment'] = segment if ml2_bindings: - ml2_port_bindings = sa_expr.table('ml2_port_bindings') - op.execute(ml2_port_bindings.insert(), ml2_bindings) + md = sa.MetaData() + sa.Table('ml2_port_bindings', md, autoload=True, autoload_with=engine) + op.bulk_insert(md.tables['ml2_port_bindings'], ml2_bindings) def upgrade(): From d0bbfc090bb25f1e05b98f0ad70c18209b87ed6b Mon Sep 17 00:00:00 2001 From: Zhenguo Niu Date: Tue, 9 Jun 2015 08:28:45 +0800 Subject: [PATCH 145/292] Fix typos in docs Change-Id: I71aeb8f1e5fc5f3e330e593a463858dd65e6093b --- doc/source/devref/callbacks.rst | 2 +- doc/source/devref/contribute.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/devref/callbacks.rst b/doc/source/devref/callbacks.rst index 4c9f5488447..baaa98a83b7 100644 --- a/doc/source/devref/callbacks.rst +++ b/doc/source/devref/callbacks.rst @@ -223,7 +223,7 @@ There are a few options to unsubscribe registered callbacks: resource R, any notification of events related to resource R will no longer be handed over to C, after the unsubscribe_by_resource() invocation. * unsubscribe_all(): say that callback C has subscribed to events A, B for resource R1, - and events C, D for resource R2, any notification of events pertaining resouces R1 and + and events C, D for resource R2, any notification of events pertaining resources R1 and R2 will no longer be handed over to C, after the unsubscribe_all() invocation. The snippet below shows these concepts in action: diff --git a/doc/source/devref/contribute.rst b/doc/source/devref/contribute.rst index 932474e7500..8f90e4d757d 100644 --- a/doc/source/devref/contribute.rst +++ b/doc/source/devref/contribute.rst @@ -391,7 +391,7 @@ will be removed. The following aspects are captured: effort is not considered justified. Assessment may change in the future. - Absense of an entry for an existing plugin or driver means no active effort + Absence of an entry for an existing plugin or driver means no active effort has been observed or potentially not required. * Completed in: the release in which the effort is considered completed. Code completion can be deemed as such, if there is no overlap/duplication between From e61865807c4c8ff959a7746fe3e17f1ae574c9d0 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Wed, 3 Jun 2015 19:03:29 -0700 Subject: [PATCH 146/292] Defer segment lookup in NetworkContext object Avoid call to get network segments for network context objects until a caller actually tries to lookup the segments. This optimizes cases where the user of a port context never looks at the segments of the associated network context (e.g. update_port_status). Closes-Bug: #1463254 Change-Id: I7e95f81d9a3ef26ccdb18c6bfdf9adc29523aa79 --- neutron/plugins/ml2/driver_context.py | 7 +- .../unit/plugins/ml2/test_driver_context.py | 69 ++++++++++++------- 2 files changed, 51 insertions(+), 25 deletions(-) diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py index ef418fe16e2..6e9b295b594 100644 --- a/neutron/plugins/ml2/driver_context.py +++ b/neutron/plugins/ml2/driver_context.py @@ -42,8 +42,8 @@ class NetworkContext(MechanismDriverContext, api.NetworkContext): super(NetworkContext, self).__init__(plugin, plugin_context) self._network = network self._original_network = original_network - self._segments = db.get_network_segments(plugin_context.session, - network['id']) + self._segments = None + self._session = plugin_context.session @property def current(self): @@ -55,6 +55,9 @@ class NetworkContext(MechanismDriverContext, api.NetworkContext): @property def network_segments(self): + if not self._segments: + self._segments = db.get_network_segments(self._session, + self._network['id']) return self._segments diff --git a/neutron/tests/unit/plugins/ml2/test_driver_context.py b/neutron/tests/unit/plugins/ml2/test_driver_context.py index e30349c9c6c..8171071b6c2 100644 --- a/neutron/tests/unit/plugins/ml2/test_driver_context.py +++ b/neutron/tests/unit/plugins/ml2/test_driver_context.py @@ -37,13 +37,12 @@ class TestPortContext(base.BaseTestCase): port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.host = 'foohost' - with mock.patch.object(driver_context.db, 'get_network_segments'): - ctx = driver_context.PortContext(plugin, - plugin_context, - port, - network, - binding, - None) + ctx = driver_context.PortContext(plugin, + plugin_context, + port, + network, + binding, + None) self.assertEqual('foohost', ctx.host) def test_host_super(self): @@ -56,13 +55,12 @@ class TestPortContext(base.BaseTestCase): portbindings.HOST_ID: 'host'} binding.host = 'foohost' - with mock.patch.object(driver_context.db, 'get_network_segments'): - ctx = driver_context.PortContext(plugin, - plugin_context, - port, - network, - binding, - None) + ctx = driver_context.PortContext(plugin, + plugin_context, + port, + network, + binding, + None) self.assertEqual('host', ctx.host) def test_status(self): @@ -74,13 +72,12 @@ class TestPortContext(base.BaseTestCase): port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.status = 'foostatus' - with mock.patch.object(driver_context.db, 'get_network_segments'): - ctx = driver_context.PortContext(plugin, - plugin_context, - port, - network, - binding, - None) + ctx = driver_context.PortContext(plugin, + plugin_context, + port, + network, + binding, + None) self.assertEqual('foostatus', ctx.status) def test_status_super(self): @@ -93,11 +90,37 @@ class TestPortContext(base.BaseTestCase): 'status': 'status'} binding.status = 'foostatus' - with mock.patch.object(driver_context.db, 'get_network_segments'): + ctx = driver_context.PortContext(plugin, + plugin_context, + port, + network, + binding, + None) + self.assertEqual('status', ctx.status) + + def test_segments_lazy_lookup(self): + plugin = mock.Mock() + plugin_context = mock.Mock() + network = mock.MagicMock() + binding = mock.Mock() + + port = {'device_owner': 'compute', + 'status': 'status'} + binding.status = 'foostatus' + + with mock.patch.object(driver_context.db, + 'get_network_segments') as gs: ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) - self.assertEqual('status', ctx.status) + self.assertFalse(gs.called) + # accessing the network_segments property should trigger + # a lookup the first time + seg = ctx.network.network_segments + self.assertTrue(gs.called) + gs.reset_mock() + self.assertEqual(seg, ctx.network.network_segments) + self.assertFalse(gs.called) From 89c0875178f22651109a85d3c522d80324368caf Mon Sep 17 00:00:00 2001 From: Gal Sagie Date: Mon, 8 Jun 2015 14:27:47 +0300 Subject: [PATCH 147/292] Add documentations for VXLAN Tunnels The VXLAN type driver is currently supported, this patch add description and links for more information to the user. Change-Id: Idb221ca4cce1a3a27bebe5ae6fc1e6ab5d030836 --- doc/source/devref/openvswitch_agent.rst | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/source/devref/openvswitch_agent.rst b/doc/source/devref/openvswitch_agent.rst index c6f165064a7..ae8660af3c5 100644 --- a/doc/source/devref/openvswitch_agent.rst +++ b/doc/source/devref/openvswitch_agent.rst @@ -6,7 +6,10 @@ This Agent uses the `OpenVSwitch`_ virtual switch to create L2 connectivity for instances, along with bridges created in conjunction with OpenStack Nova for filtering. -ovs-neutron-agent can be configured to use two different networking technologies to create tenant isolation, either GRE tunnels or VLAN tags. +ovs-neutron-agent can be configured to use different networking technologies +to create tenant isolation. +These technologies are implemented as ML2 type drivers which are used in +conjunction with the OpenVSwitch mechanism driver. VLAN Tags --------- @@ -23,6 +26,16 @@ GRE Tunneling is documented in depth in the `Networking in too much detail `_ by RedHat. + +VXLAN Tunnels +------------- + +VXLAN is an overlay technology which encapsulates MAC frames +at layer 2 into a UDP header. +More information can be found in `The VXLAN wiki page. + `_ + + Further Reading --------------- From 734e77365b0f241a3cea0f3c9dfb1d5fcf6eac8c Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Fri, 17 Apr 2015 15:00:20 -0700 Subject: [PATCH 148/292] Remove get_admin_roles and associated logic get_admin_roles was introduced so that contextes generated from within plugins could be used for policy checks. This was the case up to the Havana release as several plugins invoked the policy engine directly to authorize requests. This was an incorrect behaviour and has now been fixed, meaning that get_admin_roles is no longer need and can be safely removed. This will result in a leaner and more reliable codebase. Indeed the function being removed here was the cause of several bugs where the policy engine was initialized too early in the server bootstrap process. While this patch removes the feature it does not remove the load_admin_roles parameter from context.get_admin_context. Doing so will break other projects such as neutron-lbaas. The parameter is deprecated by this patch and an appropriate warning emitted. As a consequence neutron's will now no longer perform policy checks when context.is_admin=True. This flag is instead set either when a context is explicitly created for granting admin privileges, or when Neutron is operating in noauth mode. In the latter case every request is treated by neutron as an admin request, and get_admin_roles is simply ensuring the appropriate roles get pushed into the context so that the policy engine will grant admin rights to the request. This behaviour is probably just a waste of resource; also it is not adding anything from a security perspective. On the other hand not performing checks when context.is_admin is True should not pose a security threat either in noauth mode or with the keystone middleware. In the former case the software keeps operating assuming admin rights for every requests, whereas in the latter case the keystone middleware will always supply a context with the appropriate roles, and there is no way for an attacker to trick keystonemiddleware into generating a context for which is_admin=True. Finally, this patch also does some non-trivial changes in test_l3.py as some tests were mocking context.to_dict ignoring the is_admin flag. Closes-Bug: #1446021 Change-Id: I8a5c02712a0b43f3e36a4f14620ebbd73fbfb03f --- neutron/common/rpc.py | 3 +- neutron/context.py | 13 +-- neutron/policy.py | 30 ++---- neutron/tests/functional/db/test_ipam.py | 1 - .../tests/unit/db/test_db_base_plugin_v2.py | 22 ++-- neutron/tests/unit/extensions/test_l3.py | 102 +++++++++--------- .../tests/unit/extensions/test_quotasv2.py | 2 +- .../opencontrail/test_contrail_plugin.py | 3 +- neutron/tests/unit/test_context.py | 8 -- neutron/tests/unit/test_policy.py | 41 +------ requirements.txt | 1 + 11 files changed, 81 insertions(+), 145 deletions(-) diff --git a/neutron/common/rpc.py b/neutron/common/rpc.py index 732f0527f2a..8c4df963fbc 100644 --- a/neutron/common/rpc.py +++ b/neutron/common/rpc.py @@ -130,8 +130,7 @@ class RequestContextSerializer(om_serializer.Serializer): tenant_id = rpc_ctxt_dict.pop('tenant_id', None) if not tenant_id: tenant_id = rpc_ctxt_dict.pop('project_id', None) - return context.Context(user_id, tenant_id, - load_admin_roles=False, **rpc_ctxt_dict) + return context.Context(user_id, tenant_id, **rpc_ctxt_dict) class Service(service.Service): diff --git a/neutron/context.py b/neutron/context.py index 4847d06fa21..ee6ca8ed7d4 100644 --- a/neutron/context.py +++ b/neutron/context.py @@ -18,6 +18,7 @@ import copy import datetime +from debtcollector import removals from oslo_context import context as oslo_context from oslo_log import log as logging @@ -36,9 +37,8 @@ class ContextBase(oslo_context.RequestContext): """ def __init__(self, user_id, tenant_id, is_admin=None, read_deleted="no", - roles=None, timestamp=None, load_admin_roles=True, - request_id=None, tenant_name=None, user_name=None, - overwrite=True, auth_token=None, **kwargs): + roles=None, timestamp=None, request_id=None, tenant_name=None, + user_name=None, overwrite=True, auth_token=None, **kwargs): """Object initialization. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' @@ -68,11 +68,6 @@ class ContextBase(oslo_context.RequestContext): self.is_advsvc = self.is_admin or policy.check_is_advsvc(self) if self.is_admin is None: self.is_admin = policy.check_is_admin(self) - elif self.is_admin and load_admin_roles: - # Ensure context is populated with admin roles - admin_roles = policy.get_admin_roles() - if admin_roles: - self.roles = list(set(self.roles) | set(admin_roles)) @property def project_id(self): @@ -150,12 +145,12 @@ class Context(ContextBase): return self._session +@removals.removed_kwarg('load_admin_roles') def get_admin_context(read_deleted="no", load_admin_roles=True): return Context(user_id=None, tenant_id=None, is_admin=True, read_deleted=read_deleted, - load_admin_roles=load_admin_roles, overwrite=False) diff --git a/neutron/policy.py b/neutron/policy.py index 9352a00a1b9..a2d099f6761 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -384,6 +384,10 @@ def check(context, action, target, plugin=None, might_not_exist=False, :return: Returns True if access is permitted else False. """ + # If we already know the context has admin rights do not perform an + # additional check and authorize the operation + if context.is_admin: + return True if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules): return True match_rule, target, credentials = _prepare_check(context, @@ -417,6 +421,10 @@ def enforce(context, action, target, plugin=None, pluralized=None): :raises neutron.openstack.common.policy.PolicyNotAuthorized: if verification fails. """ + # If we already know the context has admin rights do not perform an + # additional check and authorize the operation + if context.is_admin: + return True rule, target, credentials = _prepare_check(context, action, target, @@ -459,25 +467,3 @@ def _extract_roles(rule, roles): elif hasattr(rule, 'rules'): for rule in rule.rules: _extract_roles(rule, roles) - - -def get_admin_roles(): - """Return a list of roles which are granted admin rights according - to policy settings. - """ - # NOTE(salvatore-orlando): This function provides a solution for - # populating implicit contexts with the appropriate roles so that - # they correctly pass policy checks, and will become superseded - # once all explicit policy checks are removed from db logic and - # plugin modules. For backward compatibility it returns the literal - # admin if ADMIN_CTX_POLICY is not defined - init() - if not _ENFORCER.rules or ADMIN_CTX_POLICY not in _ENFORCER.rules: - return ['admin'] - try: - admin_ctx_rule = _ENFORCER.rules[ADMIN_CTX_POLICY] - except (KeyError, TypeError): - return - roles = [] - _extract_roles(admin_ctx_rule, roles) - return roles diff --git a/neutron/tests/functional/db/test_ipam.py b/neutron/tests/functional/db/test_ipam.py index 8886947ee05..3c3a9d163a4 100644 --- a/neutron/tests/functional/db/test_ipam.py +++ b/neutron/tests/functional/db/test_ipam.py @@ -37,7 +37,6 @@ def get_admin_test_context(db_url): tenant_id=None, is_admin=True, read_deleted="no", - load_admin_roles=True, overwrite=False) facade = session.EngineFacade(db_url, mysql_sql_mode='STRICT_ALL_TABLES') ctx._session = facade.get_session(autocommit=False, expire_on_commit=True) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index ff566b3d383..6ef20399faf 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -436,12 +436,14 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): def _make_subnet(self, fmt, network, gateway, cidr, allocation_pools=None, ip_version=4, enable_dhcp=True, dns_nameservers=None, host_routes=None, shared=None, - ipv6_ra_mode=None, ipv6_address_mode=None): + ipv6_ra_mode=None, ipv6_address_mode=None, + tenant_id=None, set_context=False): res = self._create_subnet(fmt, net_id=network['network']['id'], cidr=cidr, gateway_ip=gateway, - tenant_id=network['network']['tenant_id'], + tenant_id=(tenant_id or + network['network']['tenant_id']), allocation_pools=allocation_pools, ip_version=ip_version, enable_dhcp=enable_dhcp, @@ -449,7 +451,8 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): host_routes=host_routes, shared=shared, ipv6_ra_mode=ipv6_ra_mode, - ipv6_address_mode=ipv6_address_mode) + ipv6_address_mode=ipv6_address_mode, + set_context=set_context) # Things can go wrong - raise HTTP exc with res code only # so it can be caught by unit tests if res.status_int >= webob.exc.HTTPClientError.code: @@ -583,7 +586,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): host_routes=None, shared=None, ipv6_ra_mode=None, - ipv6_address_mode=None): + ipv6_address_mode=None, + tenant_id=None, + set_context=False): with optional_ctx(network, self.network) as network_to_use: subnet = self._make_subnet(fmt or self.fmt, network_to_use, @@ -596,7 +601,9 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): host_routes, shared=shared, ipv6_ra_mode=ipv6_ra_mode, - ipv6_address_mode=ipv6_address_mode) + ipv6_address_mode=ipv6_address_mode, + tenant_id=tenant_id, + set_context=set_context) yield subnet @contextlib.contextmanager @@ -3664,7 +3671,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): def _test_validate_subnet_ipv6_modes(self, cur_subnet=None, expect_success=True, **modes): plugin = manager.NeutronManager.get_plugin() - ctx = context.get_admin_context(load_admin_roles=False) + ctx = context.get_admin_context() new_subnet = {'ip_version': 6, 'cidr': 'fe80::/64', 'enable_dhcp': True, @@ -4579,8 +4586,7 @@ class TestSubnetsV2(NeutronDbPluginV2TestCase): plugin = manager.NeutronManager.get_plugin() e = self.assertRaises(exception, plugin._validate_subnet, - context.get_admin_context( - load_admin_roles=False), + context.get_admin_context(), subnet) self.assertThat( str(e), diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py index 2392adc03bb..7d340cfc3eb 100644 --- a/neutron/tests/unit/extensions/test_l3.py +++ b/neutron/tests/unit/extensions/test_l3.py @@ -1120,34 +1120,28 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): expected_code=error_code) def test_router_add_interface_subnet_with_bad_tenant_returns_404(self): - with mock.patch('neutron.context.Context.to_dict') as tdict: - tenant_id = _uuid() - admin_context = {'roles': ['admin']} - tenant_context = {'tenant_id': 'bad_tenant', - 'roles': []} - tdict.return_value = admin_context - with self.router(tenant_id=tenant_id) as r: - with self.network(tenant_id=tenant_id) as n: - with self.subnet(network=n) as s: - tdict.return_value = tenant_context - err_code = exc.HTTPNotFound.code - self._router_interface_action('add', - r['router']['id'], - s['subnet']['id'], - None, - err_code) - tdict.return_value = admin_context - body = self._router_interface_action('add', - r['router']['id'], - s['subnet']['id'], - None) - self.assertIn('port_id', body) - tdict.return_value = tenant_context - self._router_interface_action('remove', - r['router']['id'], - s['subnet']['id'], - None, - err_code) + tenant_id = _uuid() + with self.router(tenant_id=tenant_id, set_context=True) as r: + with self.network(tenant_id=tenant_id, set_context=True) as n: + with self.subnet(network=n, set_context=True) as s: + err_code = exc.HTTPNotFound.code + self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None, + expected_code=err_code, + tenant_id='bad_tenant') + body = self._router_interface_action('add', + r['router']['id'], + s['subnet']['id'], + None) + self.assertIn('port_id', body) + self._router_interface_action('remove', + r['router']['id'], + s['subnet']['id'], + None, + expected_code=err_code, + tenant_id='bad_tenant') def test_router_add_interface_subnet_with_port_from_other_tenant(self): tenant_id = _uuid() @@ -1270,33 +1264,33 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): HTTPBadRequest.code) def test_router_add_interface_port_bad_tenant_returns_404(self): - with mock.patch('neutron.context.Context.to_dict') as tdict: - admin_context = {'roles': ['admin']} - tenant_context = {'tenant_id': 'bad_tenant', - 'roles': []} - tdict.return_value = admin_context - with self.router() as r: - with self.port() as p: - tdict.return_value = tenant_context - err_code = exc.HTTPNotFound.code - self._router_interface_action('add', - r['router']['id'], - None, - p['port']['id'], - err_code) - tdict.return_value = admin_context - self._router_interface_action('add', - r['router']['id'], - None, - p['port']['id']) + tenant_id = _uuid() + with self.router(tenant_id=tenant_id, set_context=True) as r: + with self.network(tenant_id=tenant_id, set_context=True) as n: + with self.subnet(tenant_id=tenant_id, network=n, + set_context=True) as s: + with self.port(tenant_id=tenant_id, subnet=s, + set_context=True) as p: + err_code = exc.HTTPNotFound.code + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id'], + expected_code=err_code, + tenant_id='bad_tenant') + self._router_interface_action('add', + r['router']['id'], + None, + p['port']['id'], + tenant_id=tenant_id) - tdict.return_value = tenant_context - # clean-up - self._router_interface_action('remove', - r['router']['id'], - None, - p['port']['id'], - err_code) + # clean-up should fail as well + self._router_interface_action('remove', + r['router']['id'], + None, + p['port']['id'], + expected_code=err_code, + tenant_id='bad_tenant') def test_router_add_interface_dup_subnet1_returns_400(self): with self.router() as r: diff --git a/neutron/tests/unit/extensions/test_quotasv2.py b/neutron/tests/unit/extensions/test_quotasv2.py index 7c1b51866a9..6f8fd6b0a2a 100644 --- a/neutron/tests/unit/extensions/test_quotasv2.py +++ b/neutron/tests/unit/extensions/test_quotasv2.py @@ -322,7 +322,7 @@ class QuotaExtensionDbTestCase(QuotaExtensionTestCase): tenant_id = 'tenant_id1' self.assertRaises(exceptions.QuotaResourceUnknown, quota.QUOTAS.limit_check, - context.get_admin_context(load_admin_roles=False), + context.get_admin_context(), tenant_id, foobar=1) diff --git a/neutron/tests/unit/plugins/opencontrail/test_contrail_plugin.py b/neutron/tests/unit/plugins/opencontrail/test_contrail_plugin.py index e7fa6694694..17df9fcab35 100644 --- a/neutron/tests/unit/plugins/opencontrail/test_contrail_plugin.py +++ b/neutron/tests/unit/plugins/opencontrail/test_contrail_plugin.py @@ -231,8 +231,7 @@ class TestContrailSubnetsV2(test_plugin.TestSubnetsV2, 'nexthop': '1.2.3.4'}]} error = self.assertRaises(exception, FAKE_SERVER._validate_subnet, - neutron_context.get_admin_context( - load_admin_roles=False), + neutron_context.get_admin_context(), subnet) self.assertThat( str(error), diff --git a/neutron/tests/unit/test_context.py b/neutron/tests/unit/test_context.py index a53cd111cf7..1ecf338a22f 100644 --- a/neutron/tests/unit/test_context.py +++ b/neutron/tests/unit/test_context.py @@ -105,14 +105,6 @@ class TestNeutronContext(base.BaseTestCase): self.assertIsNone(ctx_dict['auth_token']) self.assertFalse(hasattr(ctx, 'session')) - def test_neutron_context_with_load_roles_true(self): - ctx = context.get_admin_context() - self.assertIn('admin', ctx.roles) - - def test_neutron_context_with_load_roles_false(self): - ctx = context.get_admin_context(load_admin_roles=False) - self.assertFalse(ctx.roles) - def test_neutron_context_elevated_retains_request_id(self): ctx = context.Context('user_id', 'tenant_id') self.assertFalse(ctx.is_admin) diff --git a/neutron/tests/unit/test_policy.py b/neutron/tests/unit/test_policy.py index 4d732d32588..cab94f24b36 100644 --- a/neutron/tests/unit/test_policy.py +++ b/neutron/tests/unit/test_policy.py @@ -359,6 +359,9 @@ class NeutronPolicyTestCase(base.BaseTestCase): def test_check_is_admin_with_admin_context_succeeds(self): admin_context = context.get_admin_context() + # explicitly set roles as this test verifies user credentials + # with the policy engine + admin_context.roles = ['admin'] self.assertTrue(policy.check_is_admin(admin_context)) def test_check_is_admin_with_user_context_fails(self): @@ -559,44 +562,6 @@ class NeutronPolicyTestCase(base.BaseTestCase): def test_enforce_tenant_id_check_invalid_parent_resource_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s') - def test_get_roles_context_is_admin_rule_missing(self): - rules = dict((k, common_policy.parse_rule(v)) for k, v in { - "some_other_rule": "role:admin", - }.items()) - policy.set_rules(common_policy.Rules(rules)) - # 'admin' role is expected for bw compatibility - self.assertEqual(['admin'], policy.get_admin_roles()) - - def test_get_roles_with_role_check(self): - rules = dict((k, common_policy.parse_rule(v)) for k, v in { - policy.ADMIN_CTX_POLICY: "role:admin", - }.items()) - policy.set_rules(common_policy.Rules(rules)) - self.assertEqual(['admin'], policy.get_admin_roles()) - - def test_get_roles_with_rule_check(self): - rules = dict((k, common_policy.parse_rule(v)) for k, v in { - policy.ADMIN_CTX_POLICY: "rule:some_other_rule", - "some_other_rule": "role:admin", - }.items()) - policy.set_rules(common_policy.Rules(rules)) - self.assertEqual(['admin'], policy.get_admin_roles()) - - def test_get_roles_with_or_check(self): - self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { - policy.ADMIN_CTX_POLICY: "rule:rule1 or rule:rule2", - "rule1": "role:admin_1", - "rule2": "role:admin_2" - }.items()) - self.assertEqual(['admin_1', 'admin_2'], - policy.get_admin_roles()) - - def test_get_roles_with_other_rules(self): - self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { - policy.ADMIN_CTX_POLICY: "role:xxx or other:value", - }.items()) - self.assertEqual(['xxx'], policy.get_admin_roles()) - def _test_set_rules_with_deprecated_policy(self, input_rules, expected_rules): policy.set_rules(input_rules.copy()) diff --git a/requirements.txt b/requirements.txt index 47fa0316d1f..101d0e31dd2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ pbr>=0.11,<2.0 Paste PasteDeploy>=1.5.0 Routes>=1.12.3,!=2.0 +debtcollector>=0.3.0 # Apache-2.0 eventlet>=0.17.3 greenlet>=0.3.2 httplib2>=0.7.5 From 6b13cc5275df53c765c450d570521c425c3345d9 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 9 Jun 2015 10:57:29 +0200 Subject: [PATCH 149/292] Enable all deprecation warnings for test runs We would like to catch all deprecation warnings during test runs to be notified in advance about potential problems with next library releases we depend on. Change-Id: I876d8c4de88618b01898ab537a44920789d8178e --- neutron/tests/base.py | 4 ++++ neutron/tests/tools.py | 17 +++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/neutron/tests/base.py b/neutron/tests/base.py index 7b18901044b..a9cf779ee8b 100644 --- a/neutron/tests/base.py +++ b/neutron/tests/base.py @@ -45,6 +45,7 @@ from neutron import manager from neutron import policy from neutron.tests import fake_notifier from neutron.tests import post_mortem_debug +from neutron.tests import tools CONF = cfg.CONF @@ -126,6 +127,9 @@ class DietTestCase(testtools.TestCase): self.addOnException(post_mortem_debug.get_exception_handler( debugger)) + # Make sure we see all relevant deprecation warnings when running tests + self.useFixture(tools.WarningsFixture()) + if bool_from_env('OS_DEBUG'): _level = std_logging.DEBUG else: diff --git a/neutron/tests/tools.py b/neutron/tests/tools.py index fd53793fee6..40c308d5996 100644 --- a/neutron/tests/tools.py +++ b/neutron/tests/tools.py @@ -13,6 +13,8 @@ # License for the specific language governing permissions and limitations # under the License. +import warnings + import fixtures import six @@ -49,6 +51,21 @@ class AttributeMapMemento(fixtures.Fixture): attributes.RESOURCE_ATTRIBUTE_MAP = self.contents_backup +class WarningsFixture(fixtures.Fixture): + """Filters out warnings during test runs.""" + + warning_types = ( + DeprecationWarning, PendingDeprecationWarning, ImportWarning + ) + + def setUp(self): + super(WarningsFixture, self).setUp() + for wtype in self.warning_types: + warnings.filterwarnings( + "always", category=wtype, module='^neutron\\.') + self.addCleanup(warnings.resetwarnings) + + """setup_mock_calls and verify_mock_calls are convenient methods to setup a sequence of mock calls. From 826428dc8aeef124c2251624ae34fdc003e69ca4 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Tue, 9 Jun 2015 19:00:40 +0900 Subject: [PATCH 150/292] Add a comment on _check_update_has_security_groups Despite of its name, _check_update_has_security_groups can handle create requests as well. There are plugins actually using it for create. eg. ml2, vmware Change-Id: I3c26ad0ac00b12ce24096bfc27606797af2d9098 --- neutron/db/securitygroups_db.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/neutron/db/securitygroups_db.py b/neutron/db/securitygroups_db.py index 1fec3f2239b..3caca9bbc91 100644 --- a/neutron/db/securitygroups_db.py +++ b/neutron/db/securitygroups_db.py @@ -709,8 +709,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase): return False def _check_update_has_security_groups(self, port): - """Return True if port has as a security group and False if the - security_group field is is_attr_set or []. + """Return True if port has security_groups attribute set and + its not empty, or False otherwise. + This method is called both for port create and port update. """ if (ext_sg.SECURITYGROUPS in port['port'] and (attributes.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and From d0be7bc57f573d5696108b571c731decfbde9f0b Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 9 Jun 2015 12:46:54 +0200 Subject: [PATCH 151/292] Make pep8 job succeed when /etc/neutron/neutron.conf is not installed Currently, if /etc/neutron/neutron.conf is not installed in the system, neutron-db-manage fails in oslo.config code when trying to determine the default configuration file to use. Test job should not rely on any contents inside /etc/. Instead, pass --config-file with test-only configuration explicitly into the utility. neutron.conf.test was renamed into neutron.conf since for some reason oslo.config does not support a name that does not have .conf at its filename end. Change-Id: I719829fc83a7b20a49c338aaf1dbef916dcc768c --- neutron/tests/base.py | 4 ++-- neutron/tests/etc/{neutron.conf.test => neutron.conf} | 0 neutron/tests/unit/agent/dhcp/test_agent.py | 4 ++-- neutron/tests/unit/api/test_extensions.py | 2 +- neutron/tests/unit/db/test_db_base_plugin_v2.py | 2 +- tox.ini | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) rename neutron/tests/etc/{neutron.conf.test => neutron.conf} (100%) diff --git a/neutron/tests/base.py b/neutron/tests/base.py index 7b18901044b..f55dbeab3b1 100644 --- a/neutron/tests/base.py +++ b/neutron/tests/base.py @@ -238,10 +238,10 @@ class BaseTestCase(DietTestCase): @staticmethod def config_parse(conf=None, args=None): """Create the default configurations.""" - # neutron.conf.test includes rpc_backend which needs to be cleaned up + # neutron.conf includes rpc_backend which needs to be cleaned up if args is None: args = [] - args += ['--config-file', etcdir('neutron.conf.test')] + args += ['--config-file', etcdir('neutron.conf')] if conf is None: config.init(args=args) else: diff --git a/neutron/tests/etc/neutron.conf.test b/neutron/tests/etc/neutron.conf similarity index 100% rename from neutron/tests/etc/neutron.conf.test rename to neutron/tests/etc/neutron.conf diff --git a/neutron/tests/unit/agent/dhcp/test_agent.py b/neutron/tests/unit/agent/dhcp/test_agent.py index 64da28398f1..a068ea01634 100644 --- a/neutron/tests/unit/agent/dhcp/test_agent.py +++ b/neutron/tests/unit/agent/dhcp/test_agent.py @@ -236,7 +236,7 @@ class TestDhcpAgent(base.BaseTestCase): with mock.patch.object(sys, 'argv') as sys_argv: sys_argv.return_value = [ 'dhcp', '--config-file', - base.etcdir('neutron.conf.test')] + base.etcdir('neutron.conf')] cfg.CONF.register_opts(dhcp_config.DHCP_AGENT_OPTS) config.register_interface_driver_opts_helper(cfg.CONF) config.register_agent_state_opts_helper(cfg.CONF) @@ -260,7 +260,7 @@ class TestDhcpAgent(base.BaseTestCase): with mock.patch.object(sys, 'argv') as sys_argv: with mock.patch(launcher_str) as launcher: sys_argv.return_value = ['dhcp', '--config-file', - base.etcdir('neutron.conf.test')] + base.etcdir('neutron.conf')] entry.main() launcher.assert_has_calls( [mock.call(), mock.call().launch_service(mock.ANY), diff --git a/neutron/tests/unit/api/test_extensions.py b/neutron/tests/unit/api/test_extensions.py index aabece09d4c..9a32e865f94 100644 --- a/neutron/tests/unit/api/test_extensions.py +++ b/neutron/tests/unit/api/test_extensions.py @@ -726,7 +726,7 @@ class ExtensionExtendedAttributeTestCase(base.BaseTestCase): "ExtensionExtendedAttributeTestPlugin" ) - # point config file to: neutron/tests/etc/neutron.conf.test + # point config file to: neutron/tests/etc/neutron.conf self.config_parse() self.setup_coreplugin(plugin) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index b215ab3de39..215cca0de1d 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -164,7 +164,7 @@ class NeutronDbPluginV2TestCase(testlib_api.WebTestCase): def setup_config(self): # Create the default configurations - args = ['--config-file', base.etcdir('neutron.conf.test')] + args = ['--config-file', base.etcdir('neutron.conf')] # If test_config specifies some config-file, use it, as well for config_file in test_lib.test_config.get('config_files', []): args.extend(['--config-file', config_file]) diff --git a/tox.ini b/tox.ini index 636a2a37739..a33e89a23b2 100644 --- a/tox.ini +++ b/tox.ini @@ -72,7 +72,7 @@ commands= # Checks for coding and style guidelines flake8 sh ./tools/coding-checks.sh --pylint '{posargs}' - neutron-db-manage check_migration + neutron-db-manage --config-file neutron/tests/etc/neutron.conf check_migration whitelist_externals = sh bash From ea35b299f06050608f3e7bb6fbc880006ed31024 Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Wed, 3 Jun 2015 18:25:14 -0700 Subject: [PATCH 152/292] Allow update_port_status to take network param Allow the update_port_status function to take a network as an optional parameter to skip calling get_network again if the caller has already done so. Closes-Bug: #1463656 Change-Id: I994f3abdb1b0ad3b2766f409b206ad4a8b2309b6 --- neutron/plugins/ml2/plugin.py | 13 ++++++++----- neutron/plugins/ml2/rpc.py | 3 ++- neutron/tests/unit/plugins/ml2/test_plugin.py | 10 ++++++++++ 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/neutron/plugins/ml2/plugin.py b/neutron/plugins/ml2/plugin.py index df831fdc81f..ba8054b9989 100644 --- a/neutron/plugins/ml2/plugin.py +++ b/neutron/plugins/ml2/plugin.py @@ -1375,10 +1375,13 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, return self._bind_port_if_needed(port_context) - def update_port_status(self, context, port_id, status, host=None): + def update_port_status(self, context, port_id, status, host=None, + network=None): """ Returns port_id (non-truncated uuid) if the port exists. Otherwise returns None. + network can be passed in to avoid another get_network call if + one was already performed by the caller. """ updated = False session = context.session @@ -1398,8 +1401,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, original_port = self._make_port_dict(port) port.status = status updated_port = self._make_port_dict(port) - network = self.get_network(context, - original_port['network_id']) + network = network or self.get_network( + context, original_port['network_id']) levels = db.get_binding_levels(session, port.id, port.port_binding.host) mech_context = driver_context.PortContext( @@ -1426,8 +1429,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2, port_id) return original_port = self._make_port_dict(port) - network = self.get_network(context, - original_port['network_id']) + network = network or self.get_network( + context, original_port['network_id']) port.status = db.generate_dvr_port_status(session, port['id']) updated_port = self._make_port_dict(port) levels = db.get_binding_levels(session, port_id, host) diff --git a/neutron/plugins/ml2/rpc.py b/neutron/plugins/ml2/rpc.py index b9d478cd51a..eeccde6a0e9 100644 --- a/neutron/plugins/ml2/rpc.py +++ b/neutron/plugins/ml2/rpc.py @@ -103,7 +103,8 @@ class RpcCallbacks(type_tunnel.TunnelRpcCallbackMixin): plugin.update_port_status(rpc_context, port_id, new_status, - host) + host, + port_context.network.current) entry = {'device': device, 'network_id': port['network_id'], diff --git a/neutron/tests/unit/plugins/ml2/test_plugin.py b/neutron/tests/unit/plugins/ml2/test_plugin.py index 50dfd319641..aa7de37213a 100644 --- a/neutron/tests/unit/plugins/ml2/test_plugin.py +++ b/neutron/tests/unit/plugins/ml2/test_plugin.py @@ -412,6 +412,16 @@ class TestMl2PortsV2(test_plugin.TestPortsV2, Ml2PluginV2TestCase): plugin.update_port(ctx, port['port']['id'], port) self.assertTrue(sg_member_update.called) + def test_update_port_status_with_network(self): + ctx = context.get_admin_context() + plugin = manager.NeutronManager.get_plugin() + with self.port() as port: + net = plugin.get_network(ctx, port['port']['network_id']) + with mock.patch.object(plugin, 'get_network') as get_net: + plugin.update_port_status(ctx, port['port']['id'], 'UP', + network=net) + self.assertFalse(get_net.called) + def test_update_port_mac(self): self.check_update_port_mac( host_arg={portbindings.HOST_ID: HOST}, From 6d0d72973152bb45587437c80d4ffe0fe7bba761 Mon Sep 17 00:00:00 2001 From: Elena Ezhova Date: Tue, 7 Apr 2015 14:58:13 +0300 Subject: [PATCH 153/292] Handle SIGHUP: neutron-server (multiprocess) and metadata agent All launchers implemented in common.service require each service to implement reset method because it is called in case a process receives a SIGHUP. This change adds the reset method to neutron.service.RpcWorker and neutron.wsgi.WorkerService which are used to wrap rpc and api workers correspondingly. Now neutron-server running in multiprocess mode (api_workers > 0 and rpc_workers > 0) and metadata agent don't die on receiving SIGHUP and support reloading policy_path and logging options in config. Note that reset is called only in case a service is running in daemon mode. Other changes made in the scope of this patch that need to be mentioned: * Don't empty self._servers list in RpcWorker's stop method When a service is restarted all services are gracefully shutdowned, resetted and started again (see openstack.common.service code). As graceful shutdown implies calling service.stop() and then service.wait() we don't want to clean self._servers list because it would be impossible to wait for them to stop processing requests and cleaning up their resources. Otherwise, this would lead to problems with rpc after starting the rpc server again. * Create a duplicate socket each time WorkerService starts When api worker is stopped it kills the eventlet wsgi server which internally closes the wsgi server socket object. This server socket object becomes not usable which leads to "Bad file descriptor" errors on service restart. Added functional and unit tests. DocImpact Partial-Bug: #1276694 Change-Id: I75b00946b7cae891c6eb192e853118e7d49e4a24 --- neutron/common/config.py | 9 + neutron/service.py | 9 +- neutron/tests/functional/requirements.txt | 1 + neutron/tests/functional/test_server.py | 247 ++++++++++++++++++++++ neutron/tests/unit/test_service.py | 33 +++ neutron/tests/unit/test_wsgi.py | 14 +- neutron/wsgi.py | 16 +- 7 files changed, 321 insertions(+), 8 deletions(-) create mode 100644 neutron/tests/functional/test_server.py create mode 100644 neutron/tests/unit/test_service.py diff --git a/neutron/common/config.py b/neutron/common/config.py index 93f57159f3e..c8e4eebf52c 100644 --- a/neutron/common/config.py +++ b/neutron/common/config.py @@ -31,6 +31,7 @@ from paste import deploy from neutron.api.v2 import attributes from neutron.common import utils from neutron.i18n import _LI +from neutron import policy from neutron import version @@ -210,6 +211,14 @@ def setup_logging(): LOG.debug("command line: %s", " ".join(sys.argv)) +def reset_service(): + # Reset worker in case SIGHUP is called. + # Note that this is called only in case a service is running in + # daemon mode. + setup_logging() + policy.refresh() + + def load_paste_app(app_name): """Builds and returns a WSGI app from a paste config file. diff --git a/neutron/service.py b/neutron/service.py index 708882b7312..e27dd5cdc2f 100644 --- a/neutron/service.py +++ b/neutron/service.py @@ -32,7 +32,6 @@ from neutron.i18n import _LE, _LI from neutron import manager from neutron.openstack.common import loopingcall from neutron.openstack.common import service as common_service -from neutron import policy from neutron import wsgi @@ -128,7 +127,10 @@ class RpcWorker(object): for server in self._servers: if isinstance(server, rpc_server.MessageHandlingServer): server.stop() - self._servers = [] + + @staticmethod + def reset(): + config.reset_service() def serve_rpc(): @@ -288,8 +290,7 @@ class Service(n_rpc.Service): LOG.exception(_LE("Exception occurs when waiting for timer")) def reset(self): - config.setup_logging() - policy.refresh() + config.reset_service() def periodic_tasks(self, raise_on_error=False): """Tasks to be run at a periodic interval.""" diff --git a/neutron/tests/functional/requirements.txt b/neutron/tests/functional/requirements.txt index 0c5f2215b44..f98f475bc61 100644 --- a/neutron/tests/functional/requirements.txt +++ b/neutron/tests/functional/requirements.txt @@ -4,5 +4,6 @@ # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. +psutil>=1.1.1,<2.0.0 psycopg2 MySQL-python diff --git a/neutron/tests/functional/test_server.py b/neutron/tests/functional/test_server.py new file mode 100644 index 00000000000..8f81f684956 --- /dev/null +++ b/neutron/tests/functional/test_server.py @@ -0,0 +1,247 @@ +# Copyright 2015 Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import httplib2 +import mock +import os +import signal +import socket +import time +import traceback + +from oslo_config import cfg +import psutil + +from neutron.agent.linux import utils +from neutron import service +from neutron.tests import base +from neutron import wsgi + + +CONF = cfg.CONF + +# This message will be written to temporary file each time +# reset method is called. +FAKE_RESET_MSG = "reset".encode("utf-8") + +TARGET_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin' + + +class TestNeutronServer(base.BaseTestCase): + def setUp(self): + super(TestNeutronServer, self).setUp() + self.service_pid = None + self.workers = None + self.temp_file = self.get_temp_file_path("test_server.tmp") + self.health_checker = None + self.pipein, self.pipeout = os.pipe() + self.addCleanup(self._destroy_workers) + + def _destroy_workers(self): + if self.service_pid: + # Make sure all processes are stopped + os.kill(self.service_pid, signal.SIGKILL) + + def _start_server(self, callback, workers): + """Run a given service. + + :param callback: callback that will start the required service + :param workers: number of service workers + :returns: list of spawned workers' pids + """ + + self.workers = workers + + # Fork a new process in which server will be started + pid = os.fork() + if pid == 0: + status = 0 + try: + callback(workers) + except SystemExit as exc: + status = exc.code + except BaseException: + traceback.print_exc() + status = 2 + + # Really exit + os._exit(status) + + self.service_pid = pid + + if self.workers > 0: + # Wait at most 10 seconds to spawn workers + condition = lambda: self.workers == len(self._get_workers()) + + utils.wait_until_true( + condition, timeout=10, sleep=0.1, + exception=RuntimeError( + "Failed to start %d workers." % self.workers)) + + workers = self._get_workers() + self.assertEqual(len(workers), self.workers) + return workers + + # Wait for a service to start. + utils.wait_until_true(self.health_checker, timeout=10, sleep=0.1, + exception=RuntimeError( + "Failed to start service.")) + + return [self.service_pid] + + def _get_workers(self): + """Get the list of processes in which WSGI server is running.""" + + if self.workers > 0: + return [proc.pid for proc in psutil.process_iter() + if proc.ppid == self.service_pid] + else: + return [proc.pid for proc in psutil.process_iter() + if proc.pid == self.service_pid] + + def _fake_reset(self): + """Writes FAKE_RESET_MSG to temporary file on each call.""" + + with open(self.temp_file, 'a') as f: + f.write(FAKE_RESET_MSG) + + def _test_restart_service_on_sighup(self, service, workers=0): + """Test that a service correctly restarts on receiving SIGHUP. + + 1. Start a service with a given number of workers. + 2. Send SIGHUP to the service. + 3. Wait for workers (if any) to restart. + 4. Assert that the pids of the workers didn't change after restart. + """ + + start_workers = self._start_server(callback=service, workers=workers) + + os.kill(self.service_pid, signal.SIGHUP) + + # Wait for temp file to be created and its size become equal + # to size of FAKE_RESET_MSG repeated (workers + 1) times. + expected_size = len(FAKE_RESET_MSG) * (workers + 1) + condition = lambda: (os.path.isfile(self.temp_file) + and os.stat(self.temp_file).st_size == + expected_size) + + utils.wait_until_true( + condition, timeout=5, sleep=0.1, + exception=RuntimeError( + "Timed out waiting for file %(filename)s to be created and " + "its size become equal to %(size)s." % + {'filename': self.temp_file, + 'size': expected_size})) + + # Verify that reset has been called for parent process in which + # a service was started and for each worker by checking that + # FAKE_RESET_MSG has been written to temp file workers + 1 times. + with open(self.temp_file, 'r') as f: + res = f.readline() + self.assertEqual(FAKE_RESET_MSG * (workers + 1), res) + + # Make sure worker pids don't change + end_workers = self._get_workers() + self.assertEqual(start_workers, end_workers) + + +class TestWsgiServer(TestNeutronServer): + """Tests for neutron.wsgi.Server.""" + + def setUp(self): + super(TestWsgiServer, self).setUp() + self.health_checker = self._check_active + self.port = None + + @staticmethod + def application(environ, start_response): + """A primitive test application.""" + + response_body = 'Response' + status = '200 OK' + response_headers = [('Content-Type', 'text/plain'), + ('Content-Length', str(len(response_body)))] + start_response(status, response_headers) + return [response_body] + + def _check_active(self): + """Check a wsgi service is active by making a GET request.""" + port = int(os.read(self.pipein, 5)) + conn = httplib2.HTTPConnectionWithTimeout("localhost", port) + try: + conn.request("GET", "/") + resp = conn.getresponse() + return resp.status == 200 + except socket.error: + return False + + def _run_wsgi(self, workers=0): + """Start WSGI server with a test application.""" + + # Mock reset method to check that it is being called + # on receiving SIGHUP. + with mock.patch("neutron.wsgi.WorkerService.reset") as reset_method: + reset_method.side_effect = self._fake_reset + + server = wsgi.Server("Test") + server.start(self.application, 0, "0.0.0.0", + workers=workers) + + # Memorize a port that was chosen for the service + self.port = server.port + os.write(self.pipeout, str(self.port)) + + server.wait() + + def test_restart_wsgi_on_sighup_multiple_workers(self): + self._test_restart_service_on_sighup(service=self._run_wsgi, + workers=2) + + +class TestRPCServer(TestNeutronServer): + """Tests for neutron RPC server.""" + + def setUp(self): + super(TestRPCServer, self).setUp() + self.setup_coreplugin(TARGET_PLUGIN) + self._plugin_patcher = mock.patch(TARGET_PLUGIN, autospec=True) + self.plugin = self._plugin_patcher.start() + self.plugin.return_value.rpc_workers_supported = True + self.health_checker = self._check_active + + def _check_active(self): + time.sleep(5) + return True + + def _serve_rpc(self, workers=0): + """Start RPC server with a given number of workers.""" + + # Mock reset method to check that it is being called + # on receiving SIGHUP. + with mock.patch("neutron.service.RpcWorker.reset") as reset_method: + with mock.patch( + "neutron.manager.NeutronManager.get_plugin" + ) as get_plugin: + reset_method.side_effect = self._fake_reset + get_plugin.return_value = self.plugin + + CONF.set_override("rpc_workers", workers) + + launcher = service.serve_rpc() + launcher.wait() + + def test_restart_rpc_on_sighup_multiple_workers(self): + self._test_restart_service_on_sighup(service=self._serve_rpc, + workers=2) diff --git a/neutron/tests/unit/test_service.py b/neutron/tests/unit/test_service.py new file mode 100644 index 00000000000..582449f5a36 --- /dev/null +++ b/neutron/tests/unit/test_service.py @@ -0,0 +1,33 @@ +# Copyright 2015 Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron import service +from neutron.tests import base + + +class TestRpcWorker(base.BaseTestCase): + + @mock.patch("neutron.policy.refresh") + @mock.patch("neutron.common.config.setup_logging") + def test_reset(self, setup_logging_mock, refresh_mock): + _plugin = mock.Mock() + + rpc_worker = service.RpcWorker(_plugin) + rpc_worker.reset() + + setup_logging_mock.assert_called_once_with() + refresh_mock.assert_called_once_with() diff --git a/neutron/tests/unit/test_wsgi.py b/neutron/tests/unit/test_wsgi.py index 584a66610ee..3331f450f80 100644 --- a/neutron/tests/unit/test_wsgi.py +++ b/neutron/tests/unit/test_wsgi.py @@ -65,6 +65,18 @@ class TestWorkerService(base.BaseTestCase): workerservice.start() self.assertFalse(apimock.called) + @mock.patch("neutron.policy.refresh") + @mock.patch("neutron.common.config.setup_logging") + def test_reset(self, setup_logging_mock, refresh_mock): + _service = mock.Mock() + _app = mock.Mock() + + worker_service = wsgi.WorkerService(_service, _app) + worker_service.reset() + + setup_logging_mock.assert_called_once_with() + refresh_mock.assert_called_once_with() + class TestWSGIServer(base.BaseTestCase): """WSGI server tests.""" @@ -132,7 +144,7 @@ class TestWSGIServer(base.BaseTestCase): mock.call( server._run, None, - mock_listen.return_value) + mock_listen.return_value.dup.return_value) ]) def test_app(self): diff --git a/neutron/wsgi.py b/neutron/wsgi.py index 437e57b0984..0aecc8069df 100644 --- a/neutron/wsgi.py +++ b/neutron/wsgi.py @@ -37,6 +37,7 @@ import six import webob.dec import webob.exc +from neutron.common import config from neutron.common import exceptions as exception from neutron import context from neutron.db import api @@ -99,12 +100,17 @@ class WorkerService(object): self._server = None def start(self): + # When api worker is stopped it kills the eventlet wsgi server which + # internally closes the wsgi server socket object. This server socket + # object becomes not usable which leads to "Bad file descriptor" + # errors on service restart. + # Duplicate a socket object to keep a file descriptor usable. + dup_sock = self._service._socket.dup() if CONF.use_ssl: - self._service._socket = self._service.wrap_ssl( - self._service._socket) + dup_sock = self._service.wrap_ssl(dup_sock) self._server = self._service.pool.spawn(self._service._run, self._application, - self._service._socket) + dup_sock) def wait(self): if isinstance(self._server, eventlet.greenthread.GreenThread): @@ -115,6 +121,10 @@ class WorkerService(object): self._server.kill() self._server = None + @staticmethod + def reset(): + config.reset_service() + class Server(object): """Server class to manage multiple WSGI sockets and applications.""" From 753196480d9cca10c5b91dfa8221e89f658fa110 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Wed, 27 May 2015 13:54:06 +0000 Subject: [PATCH 154/292] Break Pinger class to functions As the class served only for storing parameters that can be passed as actual function parameters, there is no reason for class. Change-Id: I553b4d6daeb78d495cda09894582a3d885b5d1b5 --- neutron/tests/common/machine_fixtures.py | 35 ++----------------- neutron/tests/common/net_helpers.py | 19 ++++++++++ .../tests/functional/agent/test_ovs_flows.py | 13 +++---- 3 files changed, 26 insertions(+), 41 deletions(-) diff --git a/neutron/tests/common/machine_fixtures.py b/neutron/tests/common/machine_fixtures.py index bc097d31fd8..da548beb62a 100644 --- a/neutron/tests/common/machine_fixtures.py +++ b/neutron/tests/common/machine_fixtures.py @@ -12,39 +12,10 @@ # License for the specific language governing permissions and limitations # under the License. # - import fixtures -import netaddr from neutron.agent.linux import ip_lib from neutron.tests.common import net_helpers -from neutron.tests import tools - - -class Pinger(object): - def __init__(self, namespace, timeout=1, max_attempts=1): - self.namespace = namespace - self._timeout = timeout - self._max_attempts = max_attempts - - def _ping_destination(self, dest_address): - ns_ip_wrapper = ip_lib.IPWrapper(self.namespace) - ipversion = netaddr.IPAddress(dest_address).version - ping_command = 'ping' if ipversion == 4 else 'ping6' - ns_ip_wrapper.netns.execute([ping_command, '-c', self._max_attempts, - '-W', self._timeout, dest_address]) - - def assert_ping(self, dst_ip): - self._ping_destination(dst_ip) - - def assert_no_ping(self, dst_ip): - try: - self._ping_destination(dst_ip) - tools.fail("destination ip %(dst_ip)s is replying to ping " - "from namespace %(ns)s, but it shouldn't" % - {'ns': self.namespace, 'dst_ip': dst_ip}) - except RuntimeError: - pass class FakeMachine(fixtures.Fixture): @@ -89,12 +60,10 @@ class FakeMachine(fixtures.Fixture): return ns_ip_wrapper.netns.execute(*args, **kwargs) def assert_ping(self, dst_ip): - pinger = Pinger(self.namespace) - pinger.assert_ping(dst_ip) + net_helpers.assert_ping(self.namespace, dst_ip) def assert_no_ping(self, dst_ip): - pinger = Pinger(self.namespace) - pinger.assert_no_ping(dst_ip) + net_helpers.assert_no_ping(self.namespace, dst_ip) class PeerMachines(fixtures.Fixture): diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py index 5d665f7f9ce..8884c66966c 100644 --- a/neutron/tests/common/net_helpers.py +++ b/neutron/tests/common/net_helpers.py @@ -62,6 +62,25 @@ def set_namespace_gateway(port_dev, gateway_ip): port_dev.route.add_gateway(gateway_ip) +def assert_ping(src_namespace, dst_ip, timeout=1, count=1): + ipversion = netaddr.IPAddress(dst_ip).version + ping_command = 'ping' if ipversion == 4 else 'ping6' + ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) + ns_ip_wrapper.netns.execute([ping_command, '-c', count, '-W', timeout, + dst_ip]) + + +def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1): + try: + assert_ping(src_namespace, dst_ip, timeout, count) + except RuntimeError: + pass + else: + tools.fail("destination ip %(destination)s is replying to ping from " + "namespace %(ns)s, but it shouldn't" % + {'ns': src_namespace, 'destination': dst_ip}) + + class NamespaceFixture(fixtures.Fixture): """Create a namespace. diff --git a/neutron/tests/functional/agent/test_ovs_flows.py b/neutron/tests/functional/agent/test_ovs_flows.py index bf9936d633a..90107d85552 100644 --- a/neutron/tests/functional/agent/test_ovs_flows.py +++ b/neutron/tests/functional/agent/test_ovs_flows.py @@ -23,7 +23,6 @@ from neutron.agent.linux import ip_lib from neutron.cmd.sanity import checks from neutron.plugins.openvswitch.agent import ovs_neutron_agent as ovsagt from neutron.plugins.openvswitch.common import constants -from neutron.tests.common import machine_fixtures from neutron.tests.common import net_helpers from neutron.tests.functional.agent import test_ovs_lib from neutron.tests.functional import base @@ -98,8 +97,6 @@ class _ARPSpoofTestCase(object): net_helpers.NamespaceFixture()).name self.dst_namespace = self.useFixture( net_helpers.NamespaceFixture()).name - self.pinger = machine_fixtures.Pinger( - self.src_namespace, max_attempts=2) self.src_p = self.useFixture( net_helpers.OVSPortFixture(self.br, self.src_namespace)).port self.dst_p = self.useFixture( @@ -112,7 +109,7 @@ class _ARPSpoofTestCase(object): self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - self.pinger.assert_ping(self.dst_addr) + net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_doesnt_block_ipv6(self): self.src_addr = '2000::1' @@ -124,7 +121,7 @@ class _ARPSpoofTestCase(object): # make sure the IPv6 addresses are ready before pinging self.src_p.addr.wait_until_address_ready(self.src_addr) self.dst_p.addr.wait_until_address_ready(self.dst_addr) - self.pinger.assert_ping(self.dst_addr) + net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_response(self): # this will prevent the destination from responding to the ARP @@ -132,7 +129,7 @@ class _ARPSpoofTestCase(object): self._setup_arp_spoof_for_port(self.dst_p.name, ['192.168.0.3']) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - self.pinger.assert_no_ping(self.dst_addr) + net_helpers.assert_no_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_blocks_request(self): # this will prevent the source from sending an ARP @@ -154,7 +151,7 @@ class _ARPSpoofTestCase(object): self.dst_addr]) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - self.pinger.assert_ping(self.dst_addr) + net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def test_arp_spoof_disable_port_security(self): # block first and then disable port security to make sure old rules @@ -164,7 +161,7 @@ class _ARPSpoofTestCase(object): psec=False) self.src_p.addr.add('%s/24' % self.src_addr) self.dst_p.addr.add('%s/24' % self.dst_addr) - self.pinger.assert_ping(self.dst_addr) + net_helpers.assert_ping(self.src_namespace, self.dst_addr, count=2) def _setup_arp_spoof_for_port(self, port, addrs, psec=True): of_port_map = self.br.get_vif_port_to_ofport_map() From 53ec63c430d123cd1ed4acd3b94537e9cb380bcd Mon Sep 17 00:00:00 2001 From: Romil Gupta Date: Thu, 4 Jun 2015 04:21:14 -0700 Subject: [PATCH 155/292] Fix a regression in "Separate ovs-ofctl using code as a driver" change The tunnels are not getting established between Network Node and Compute Nodes in non DVR mode with l2pop enabled and throws the AttributeError: add_tunnel_port. This fixes a regression in change Ie1224f8a1c17268cd7d1c474ed82fdfb8852eaa8. Co-Authored-By: YAMAMOTO Takashi Closes-Bug: #1461486 Change-Id: I1106fd3dd32f6f827eb25dec4815ff1120af96f0 --- .../agent/openflow/ovs_ofctl/br_tun.py | 20 +++++--- .../agent/openflow/ovs_ofctl/test_br_tun.py | 51 +++++++++++++++++++ 2 files changed, 63 insertions(+), 8 deletions(-) diff --git a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py index 58301dfefe5..eeaf6ee8f05 100644 --- a/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py +++ b/neutron/plugins/openvswitch/agent/openflow/ovs_ofctl/br_tun.py @@ -203,14 +203,16 @@ class OVSTunnelBridge(ovs_bridge.OVSAgentBridge, dl_vlan=vlan, nw_dst='%s' % ip) - def setup_tunnel_port(self, network_type, port): - self.add_flow(priority=1, - in_port=port, - actions="resubmit(,%s)" % - constants.TUN_TABLE[network_type]) + def setup_tunnel_port(self, network_type, port, deferred_br=None): + br = deferred_br if deferred_br else self + br.add_flow(priority=1, + in_port=port, + actions="resubmit(,%s)" % + constants.TUN_TABLE[network_type]) - def cleanup_tunnel_port(self, port): - self.delete_flows(in_port=port) + def cleanup_tunnel_port(self, port, deferred_br=None): + br = deferred_br if deferred_br else self + br.delete_flows(in_port=port) def add_dvr_mac_tun(self, mac, port): # Table DVR_NOT_LEARN ensures unique dvr macs in the cloud @@ -237,10 +239,12 @@ class DeferredOVSTunnelBridge(ovs_lib.DeferredOVSBridge): 'delete_flood_to_tun', 'install_arp_responder', 'delete_arp_responder', + 'setup_tunnel_port', + 'cleanup_tunnel_port', ] def __getattr__(self, name): if name in self._METHODS: m = getattr(self.br, name) return functools.partial(m, deferred_br=self) - raise AttributeError(name) + return super(DeferredOVSTunnelBridge, self).__getattr__(name) diff --git a/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py index 27a046d0a4b..1a6ac5816c7 100644 --- a/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py +++ b/neutron/tests/unit/plugins/openvswitch/agent/openflow/ovs_ofctl/test_br_tun.py @@ -257,3 +257,54 @@ class OVSTunnelBridgeTest(ovs_bridge_test_base.OVSBridgeTestBase, call.delete_flows(eth_src=mac, table_id=9), ] self.assertEqual(expected, self.mock.mock_calls) + + def _mock_add_tunnel_port(self, deferred_br=False): + port_name = 'fake_port' + remote_ip = '192.168.1.3' + local_ip = '192.168.1.2' + tunnel_type = 'vxlan' + vxlan_udp_port = '4789' + dont_fragment = True + if deferred_br: + with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port', + return_value=9999) as add_port, \ + self.br.deferred() as deferred_br: + ofport = deferred_br.add_tunnel_port(port_name, remote_ip, + local_ip, tunnel_type, + vxlan_udp_port, + dont_fragment) + else: + with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.add_port', + return_value=9999) as add_port: + ofport = self.br.add_tunnel_port(port_name, remote_ip, + local_ip, tunnel_type, + vxlan_udp_port, + dont_fragment) + self.assertEqual(9999, ofport) + self.assertEqual(1, add_port.call_count) + self.assertEqual(port_name, add_port.call_args[0][0]) + + def _mock_delete_port(self, deferred_br=False): + port_name = 'fake_port' + if deferred_br: + with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' + 'delete_port') as delete_port, \ + self.br.deferred() as deferred_br: + deferred_br.delete_port(port_name) + else: + with mock.patch('neutron.agent.common.ovs_lib.OVSBridge.' + 'delete_port') as delete_port: + self.br.delete_port(port_name) + self.assertEqual([call(port_name)], delete_port.mock_calls) + + def test_add_tunnel_port(self): + self._mock_add_tunnel_port() + + def test_delete_port(self): + self._mock_delete_port() + + def test_deferred_br_add_tunnel_port(self): + self._mock_add_tunnel_port(True) + + def test_deferred_br_delete_port(self): + self._mock_delete_port(True) From 66fece4f84e62f14fb59a721b37986784976d0c4 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 23 Apr 2015 14:03:52 +0200 Subject: [PATCH 156/292] policy: cleanup deprecation code to handle old extension:xxx rules It served and warned users for enough time (since Icehouse) to be sure everyone was notified about the need to update their policy file. Change-Id: I240b935741e49fbf65c0b95715af04af4b2a73e7 --- neutron/policy.py | 48 +------------------------------ neutron/tests/unit/test_policy.py | 30 ------------------- 2 files changed, 1 insertion(+), 77 deletions(-) diff --git a/neutron/policy.py b/neutron/policy.py index a2d099f6761..7c21559c6bf 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -18,7 +18,6 @@ Policy engine for neutron. Largely copied from nova. """ import collections -import itertools import logging as std_logging import re @@ -30,7 +29,7 @@ import six from neutron.api.v2 import attributes from neutron.common import constants as const from neutron.common import exceptions -from neutron.i18n import _LE, _LI, _LW +from neutron.i18n import _LE, _LW from neutron.openstack.common import policy @@ -39,22 +38,6 @@ LOG = logging.getLogger(__name__) _ENFORCER = None ADMIN_CTX_POLICY = 'context_is_admin' ADVSVC_CTX_POLICY = 'context_is_advsvc' -# Maps deprecated 'extension' policies to new-style policies -DEPRECATED_POLICY_MAP = { - 'extension:provider_network': - ['network:provider:network_type', - 'network:provider:physical_network', - 'network:provider:segmentation_id'], - 'extension:router': - ['network:router:external'], - 'extension:port_binding': - ['port:binding:vif_type', 'port:binding:vif_details', - 'port:binding:profile', 'port:binding:host_id'] -} -DEPRECATED_ACTION_MAP = { - 'view': ['get'], - 'set': ['create', 'update'] -} def reset(): @@ -95,35 +78,6 @@ def set_rules(policies, overwrite=True): """ LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path) - # Ensure backward compatibility with folsom/grizzly convention - # for extension rules - for pol in policies.keys(): - if any([pol.startswith(depr_pol) for depr_pol in - DEPRECATED_POLICY_MAP.keys()]): - LOG.warn(_LW("Found deprecated policy rule:%s. Please consider " - "upgrading your policy configuration file"), pol) - pol_name, action = pol.rsplit(':', 1) - try: - new_actions = DEPRECATED_ACTION_MAP[action] - new_policies = DEPRECATED_POLICY_MAP[pol_name] - # bind new actions and policies together - for actual_policy in ['_'.join(item) for item in - itertools.product(new_actions, - new_policies)]: - if actual_policy not in policies: - # New policy, same rule - LOG.info(_LI("Inserting policy:%(new_policy)s in " - "place of deprecated " - "policy:%(old_policy)s"), - {'new_policy': actual_policy, - 'old_policy': pol}) - policies[actual_policy] = policies[pol] - # Remove old-style policy - del policies[pol] - except KeyError: - LOG.error(_LE("Backward compatibility unavailable for " - "deprecated policy %s. The policy will " - "not be enforced"), pol) init() _ENFORCER.set_rules(policies, overwrite) diff --git a/neutron/tests/unit/test_policy.py b/neutron/tests/unit/test_policy.py index cab94f24b36..a20e531f743 100644 --- a/neutron/tests/unit/test_policy.py +++ b/neutron/tests/unit/test_policy.py @@ -562,36 +562,6 @@ class NeutronPolicyTestCase(base.BaseTestCase): def test_enforce_tenant_id_check_invalid_parent_resource_raises(self): self._test_enforce_tenant_id_raises('tenant_id:%(foobaz_tenant_id)s') - def _test_set_rules_with_deprecated_policy(self, input_rules, - expected_rules): - policy.set_rules(input_rules.copy()) - # verify deprecated policy has been removed - for pol in input_rules.keys(): - self.assertNotIn(pol, policy._ENFORCER.rules) - # verify deprecated policy was correctly translated. Iterate - # over items for compatibility with unittest2 in python 2.6 - for rule in expected_rules: - self.assertIn(rule, policy._ENFORCER.rules) - self.assertEqual(str(policy._ENFORCER.rules[rule]), - expected_rules[rule]) - - def test_set_rules_with_deprecated_view_policy(self): - self._test_set_rules_with_deprecated_policy( - {'extension:router:view': 'rule:admin_or_owner'}, - {'get_network:router:external': 'rule:admin_or_owner'}) - - def test_set_rules_with_deprecated_set_policy(self): - expected_policies = ['create_network:provider:network_type', - 'create_network:provider:physical_network', - 'create_network:provider:segmentation_id', - 'update_network:provider:network_type', - 'update_network:provider:physical_network', - 'update_network:provider:segmentation_id'] - self._test_set_rules_with_deprecated_policy( - {'extension:provider_network:set': 'rule:admin_only'}, - dict((policy, 'rule:admin_only') for policy in - expected_policies)) - def test_process_rules(self): action = "create_" + FAKE_RESOURCE_NAME # Construct RuleChecks for an action, attribute and subattribute From 9143ce10e422bd17c4817dfe08163879e0e5a4ca Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Thu, 23 Apr 2015 12:12:52 +0200 Subject: [PATCH 157/292] Consume oslo.policy Some non intrusive changes to tests are needed, so that we don't rely on library symbols that are now private (f.e. parse_rule). Closes-Bug: #1458945 Change-Id: I90326479e908042fec9ecb25fa19a8dd5b15e7d8 --- neutron/api/v2/base.py | 12 +- neutron/api/v2/resource.py | 4 +- neutron/openstack/common/policy.py | 963 ------------------------- neutron/policy.py | 13 +- neutron/tests/unit/api/v2/test_base.py | 6 +- neutron/tests/unit/test_policy.py | 119 ++- openstack-common.conf | 1 - requirements.txt | 1 + 8 files changed, 78 insertions(+), 1041 deletions(-) delete mode 100644 neutron/openstack/common/policy.py diff --git a/neutron/api/v2/base.py b/neutron/api/v2/base.py index ea4d45b2cb4..8237905d26b 100644 --- a/neutron/api/v2/base.py +++ b/neutron/api/v2/base.py @@ -18,6 +18,7 @@ import copy import netaddr from oslo_config import cfg from oslo_log import log as logging +from oslo_policy import policy as oslo_policy from oslo_utils import excutils import six import webob.exc @@ -30,7 +31,6 @@ from neutron.common import constants as const from neutron.common import exceptions from neutron.common import rpc as n_rpc from neutron.i18n import _LE, _LI -from neutron.openstack.common import policy as common_policy from neutron import policy from neutron import quota @@ -44,7 +44,7 @@ FAULT_MAP = {exceptions.NotFound: webob.exc.HTTPNotFound, exceptions.ServiceUnavailable: webob.exc.HTTPServiceUnavailable, exceptions.NotAuthorized: webob.exc.HTTPForbidden, netaddr.AddrFormatError: webob.exc.HTTPBadRequest, - common_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden + oslo_policy.PolicyNotAuthorized: webob.exc.HTTPForbidden } @@ -192,7 +192,7 @@ class Controller(object): # Fetch the resource and verify if the user can access it try: resource = self._item(request, id, True) - except common_policy.PolicyNotAuthorized: + except oslo_policy.PolicyNotAuthorized: msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) body = kwargs.pop('body', None) @@ -338,7 +338,7 @@ class Controller(object): field_list=field_list, parent_id=parent_id), fields_to_strip=added_fields)} - except common_policy.PolicyNotAuthorized: + except oslo_policy.PolicyNotAuthorized: # To avoid giving away information, pretend that it # doesn't exist msg = _('The resource could not be found.') @@ -481,7 +481,7 @@ class Controller(object): action, obj, pluralized=self._collection) - except common_policy.PolicyNotAuthorized: + except oslo_policy.PolicyNotAuthorized: # To avoid giving away information, pretend that it # doesn't exist msg = _('The resource could not be found.') @@ -537,7 +537,7 @@ class Controller(object): action, orig_obj, pluralized=self._collection) - except common_policy.PolicyNotAuthorized: + except oslo_policy.PolicyNotAuthorized: with excutils.save_and_reraise_exception() as ctxt: # If a tenant is modifying it's own object, it's safe to return # a 403. Otherwise, pretend that it doesn't exist to avoid diff --git a/neutron/api/v2/resource.py b/neutron/api/v2/resource.py index 09919b93815..dec23b00a2e 100644 --- a/neutron/api/v2/resource.py +++ b/neutron/api/v2/resource.py @@ -22,13 +22,13 @@ import sys import netaddr import oslo_i18n from oslo_log import log as logging +from oslo_policy import policy as oslo_policy import six import webob.dec import webob.exc from neutron.common import exceptions from neutron.i18n import _LE, _LI -from neutron.openstack.common import policy as common_policy from neutron import wsgi @@ -83,7 +83,7 @@ def Resource(controller, faults=None, deserializers=None, serializers=None): result = method(request=request, **args) except (exceptions.NeutronException, netaddr.AddrFormatError, - common_policy.PolicyNotAuthorized) as e: + oslo_policy.PolicyNotAuthorized) as e: for fault in faults: if isinstance(e, fault): mapped_exc = faults[fault] diff --git a/neutron/openstack/common/policy.py b/neutron/openstack/common/policy.py deleted file mode 100644 index f5abde3682a..00000000000 --- a/neutron/openstack/common/policy.py +++ /dev/null @@ -1,963 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Common Policy Engine Implementation - -Policies can be expressed in one of two forms: A list of lists, or a -string written in the new policy language. - -In the list-of-lists representation, each check inside the innermost -list is combined as with an "and" conjunction--for that check to pass, -all the specified checks must pass. These innermost lists are then -combined as with an "or" conjunction. As an example, take the following -rule, expressed in the list-of-lists representation:: - - [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] - -This is the original way of expressing policies, but there now exists a -new way: the policy language. - -In the policy language, each check is specified the same way as in the -list-of-lists representation: a simple "a:b" pair that is matched to -the correct class to perform that check:: - - +===========================================================================+ - | TYPE | SYNTAX | - +===========================================================================+ - |User's Role | role:admin | - +---------------------------------------------------------------------------+ - |Rules already defined on policy | rule:admin_required | - +---------------------------------------------------------------------------+ - |Against URL's¹ | http://my-url.org/check | - +---------------------------------------------------------------------------+ - |User attributes² | project_id:%(target.project.id)s | - +---------------------------------------------------------------------------+ - |Strings | :'xpto2035abc' | - | | 'myproject': | - +---------------------------------------------------------------------------+ - | | project_id:xpto2035abc | - |Literals | domain_id:20 | - | | True:%(user.enabled)s | - +===========================================================================+ - -¹URL checking must return 'True' to be valid -²User attributes (obtained through the token): user_id, domain_id or project_id - -Conjunction operators are available, allowing for more expressiveness -in crafting policies. So, in the policy language, the previous check in -list-of-lists becomes:: - - role:admin or (project_id:%(project_id)s and role:projectadmin) - -The policy language also has the "not" operator, allowing a richer -policy rule:: - - project_id:%(project_id)s and not role:dunce - -Attributes sent along with API calls can be used by the policy engine -(on the right side of the expression), by using the following syntax:: - - :%(user.id)s - -Contextual attributes of objects identified by their IDs are loaded -from the database. They are also available to the policy engine and -can be checked through the `target` keyword:: - - :%(target.role.name)s - -Finally, two special policy checks should be mentioned; the policy -check "@" will always accept an access, and the policy check "!" will -always reject an access. (Note that if a rule is either the empty -list ("[]") or the empty string, this is equivalent to the "@" policy -check.) Of these, the "!" policy check is probably the most useful, -as it allows particular rules to be explicitly disabled. -""" - -import abc -import ast -import copy -import logging -import os -import re - -from oslo_config import cfg -from oslo_serialization import jsonutils -import six -import six.moves.urllib.parse as urlparse -import six.moves.urllib.request as urlrequest - -from neutron.openstack.common import fileutils -from neutron.openstack.common._i18n import _, _LE - - -policy_opts = [ - cfg.StrOpt('policy_file', - default='policy.json', - help=_('The JSON file that defines policies.')), - cfg.StrOpt('policy_default_rule', - default='default', - help=_('Default rule. Enforced when a requested rule is not ' - 'found.')), - cfg.MultiStrOpt('policy_dirs', - default=['policy.d'], - help=_('Directories where policy configuration files are ' - 'stored. They can be relative to any directory ' - 'in the search path defined by the config_dir ' - 'option, or absolute paths. The file defined by ' - 'policy_file must exist for these directories to ' - 'be searched. Missing or empty directories are ' - 'ignored.')), -] - -CONF = cfg.CONF -CONF.register_opts(policy_opts) - -LOG = logging.getLogger(__name__) - -_checks = {} - - -def list_opts(): - """Entry point for oslo-config-generator.""" - return [(None, copy.deepcopy(policy_opts))] - - -class PolicyNotAuthorized(Exception): - - def __init__(self, rule): - msg = _("Policy doesn't allow %s to be performed.") % rule - super(PolicyNotAuthorized, self).__init__(msg) - - -class Rules(dict): - """A store for rules. Handles the default_rule setting directly.""" - - @classmethod - def load_json(cls, data, default_rule=None): - """Allow loading of JSON rule data.""" - - # Suck in the JSON data and parse the rules - rules = dict((k, parse_rule(v)) for k, v in - jsonutils.loads(data).items()) - - return cls(rules, default_rule) - - def __init__(self, rules=None, default_rule=None): - """Initialize the Rules store.""" - - super(Rules, self).__init__(rules or {}) - self.default_rule = default_rule - - def __missing__(self, key): - """Implements the default rule handling.""" - - if isinstance(self.default_rule, dict): - raise KeyError(key) - - # If the default rule isn't actually defined, do something - # reasonably intelligent - if not self.default_rule: - raise KeyError(key) - - if isinstance(self.default_rule, BaseCheck): - return self.default_rule - - # We need to check this or we can get infinite recursion - if self.default_rule not in self: - raise KeyError(key) - - elif isinstance(self.default_rule, six.string_types): - return self[self.default_rule] - - def __str__(self): - """Dumps a string representation of the rules.""" - - # Start by building the canonical strings for the rules - out_rules = {} - for key, value in self.items(): - # Use empty string for singleton TrueCheck instances - if isinstance(value, TrueCheck): - out_rules[key] = '' - else: - out_rules[key] = str(value) - - # Dump a pretty-printed JSON representation - return jsonutils.dumps(out_rules, indent=4) - - -class Enforcer(object): - """Responsible for loading and enforcing rules. - - :param policy_file: Custom policy file to use, if none is - specified, `CONF.policy_file` will be - used. - :param rules: Default dictionary / Rules to use. It will be - considered just in the first instantiation. If - `load_rules(True)`, `clear()` or `set_rules(True)` - is called this will be overwritten. - :param default_rule: Default rule to use, CONF.default_rule will - be used if none is specified. - :param use_conf: Whether to load rules from cache or config file. - :param overwrite: Whether to overwrite existing rules when reload rules - from config file. - """ - - def __init__(self, policy_file=None, rules=None, - default_rule=None, use_conf=True, overwrite=True): - self.default_rule = default_rule or CONF.policy_default_rule - self.rules = Rules(rules, self.default_rule) - - self.policy_path = None - self.policy_file = policy_file or CONF.policy_file - self.use_conf = use_conf - self.overwrite = overwrite - - def set_rules(self, rules, overwrite=True, use_conf=False): - """Create a new Rules object based on the provided dict of rules. - - :param rules: New rules to use. It should be an instance of dict. - :param overwrite: Whether to overwrite current rules or update them - with the new rules. - :param use_conf: Whether to reload rules from cache or config file. - """ - - if not isinstance(rules, dict): - raise TypeError(_("Rules must be an instance of dict or Rules, " - "got %s instead") % type(rules)) - self.use_conf = use_conf - if overwrite: - self.rules = Rules(rules, self.default_rule) - else: - self.rules.update(rules) - - def clear(self): - """Clears Enforcer rules, policy's cache and policy's path.""" - self.set_rules({}) - fileutils.delete_cached_file(self.policy_path) - self.default_rule = None - self.policy_path = None - - def load_rules(self, force_reload=False): - """Loads policy_path's rules. - - Policy file is cached and will be reloaded if modified. - - :param force_reload: Whether to reload rules from config file. - """ - - if force_reload: - self.use_conf = force_reload - - if self.use_conf: - if not self.policy_path: - self.policy_path = self._get_policy_path(self.policy_file) - - self._load_policy_file(self.policy_path, force_reload, - overwrite=self.overwrite) - for path in CONF.policy_dirs: - try: - path = self._get_policy_path(path) - except cfg.ConfigFilesNotFoundError: - continue - self._walk_through_policy_directory(path, - self._load_policy_file, - force_reload, False) - - @staticmethod - def _walk_through_policy_directory(path, func, *args): - # We do not iterate over sub-directories. - policy_files = next(os.walk(path))[2] - policy_files.sort() - for policy_file in [p for p in policy_files if not p.startswith('.')]: - func(os.path.join(path, policy_file), *args) - - def _load_policy_file(self, path, force_reload, overwrite=True): - reloaded, data = fileutils.read_cached_file( - path, force_reload=force_reload) - if reloaded or not self.rules or not overwrite: - rules = Rules.load_json(data, self.default_rule) - self.set_rules(rules, overwrite=overwrite, use_conf=True) - LOG.debug("Reloaded policy file: %(path)s", - {'path': path}) - - def _get_policy_path(self, path): - """Locate the policy json data file/path. - - :param path: It's value can be a full path or related path. When - full path specified, this function just returns the full - path. When related path specified, this function will - search configuration directories to find one that exists. - - :returns: The policy path - - :raises: ConfigFilesNotFoundError if the file/path couldn't - be located. - """ - policy_path = CONF.find_file(path) - - if policy_path: - return policy_path - - raise cfg.ConfigFilesNotFoundError((path,)) - - def enforce(self, rule, target, creds, do_raise=False, - exc=None, *args, **kwargs): - """Checks authorization of a rule against the target and credentials. - - :param rule: A string or BaseCheck instance specifying the rule - to evaluate. - :param target: As much information about the object being operated - on as possible, as a dictionary. - :param creds: As much information about the user performing the - action as possible, as a dictionary. - :param do_raise: Whether to raise an exception or not if check - fails. - :param exc: Class of the exception to raise if the check fails. - Any remaining arguments passed to enforce() (both - positional and keyword arguments) will be passed to - the exception class. If not specified, PolicyNotAuthorized - will be used. - - :return: Returns False if the policy does not allow the action and - exc is not provided; otherwise, returns a value that - evaluates to True. Note: for rules using the "case" - expression, this True value will be the specified string - from the expression. - """ - - self.load_rules() - - # Allow the rule to be a Check tree - if isinstance(rule, BaseCheck): - result = rule(target, creds, self) - elif not self.rules: - # No rules to reference means we're going to fail closed - result = False - else: - try: - # Evaluate the rule - result = self.rules[rule](target, creds, self) - except KeyError: - LOG.debug("Rule [%s] doesn't exist" % rule) - # If the rule doesn't exist, fail closed - result = False - - # If it is False, raise the exception if requested - if do_raise and not result: - if exc: - raise exc(*args, **kwargs) - - raise PolicyNotAuthorized(rule) - - return result - - -@six.add_metaclass(abc.ABCMeta) -class BaseCheck(object): - """Abstract base class for Check classes.""" - - @abc.abstractmethod - def __str__(self): - """String representation of the Check tree rooted at this node.""" - - pass - - @abc.abstractmethod - def __call__(self, target, cred, enforcer): - """Triggers if instance of the class is called. - - Performs the check. Returns False to reject the access or a - true value (not necessary True) to accept the access. - """ - - pass - - -class FalseCheck(BaseCheck): - """A policy check that always returns False (disallow).""" - - def __str__(self): - """Return a string representation of this check.""" - - return "!" - - def __call__(self, target, cred, enforcer): - """Check the policy.""" - - return False - - -class TrueCheck(BaseCheck): - """A policy check that always returns True (allow).""" - - def __str__(self): - """Return a string representation of this check.""" - - return "@" - - def __call__(self, target, cred, enforcer): - """Check the policy.""" - - return True - - -class Check(BaseCheck): - """A base class to allow for user-defined policy checks.""" - - def __init__(self, kind, match): - """Initiates Check instance. - - :param kind: The kind of the check, i.e., the field before the - ':'. - :param match: The match of the check, i.e., the field after - the ':'. - """ - - self.kind = kind - self.match = match - - def __str__(self): - """Return a string representation of this check.""" - - return "%s:%s" % (self.kind, self.match) - - -class NotCheck(BaseCheck): - """Implements the "not" logical operator. - - A policy check that inverts the result of another policy check. - """ - - def __init__(self, rule): - """Initialize the 'not' check. - - :param rule: The rule to negate. Must be a Check. - """ - - self.rule = rule - - def __str__(self): - """Return a string representation of this check.""" - - return "not %s" % self.rule - - def __call__(self, target, cred, enforcer): - """Check the policy. - - Returns the logical inverse of the wrapped check. - """ - - return not self.rule(target, cred, enforcer) - - -class AndCheck(BaseCheck): - """Implements the "and" logical operator. - - A policy check that requires that a list of other checks all return True. - """ - - def __init__(self, rules): - """Initialize the 'and' check. - - :param rules: A list of rules that will be tested. - """ - - self.rules = rules - - def __str__(self): - """Return a string representation of this check.""" - - return "(%s)" % ' and '.join(str(r) for r in self.rules) - - def __call__(self, target, cred, enforcer): - """Check the policy. - - Requires that all rules accept in order to return True. - """ - - for rule in self.rules: - if not rule(target, cred, enforcer): - return False - - return True - - def add_check(self, rule): - """Adds rule to be tested. - - Allows addition of another rule to the list of rules that will - be tested. Returns the AndCheck object for convenience. - """ - - self.rules.append(rule) - return self - - -class OrCheck(BaseCheck): - """Implements the "or" operator. - - A policy check that requires that at least one of a list of other - checks returns True. - """ - - def __init__(self, rules): - """Initialize the 'or' check. - - :param rules: A list of rules that will be tested. - """ - - self.rules = rules - - def __str__(self): - """Return a string representation of this check.""" - - return "(%s)" % ' or '.join(str(r) for r in self.rules) - - def __call__(self, target, cred, enforcer): - """Check the policy. - - Requires that at least one rule accept in order to return True. - """ - - for rule in self.rules: - if rule(target, cred, enforcer): - return True - return False - - def add_check(self, rule): - """Adds rule to be tested. - - Allows addition of another rule to the list of rules that will - be tested. Returns the OrCheck object for convenience. - """ - - self.rules.append(rule) - return self - - -def _parse_check(rule): - """Parse a single base check rule into an appropriate Check object.""" - - # Handle the special checks - if rule == '!': - return FalseCheck() - elif rule == '@': - return TrueCheck() - - try: - kind, match = rule.split(':', 1) - except Exception: - LOG.exception(_LE("Failed to understand rule %s") % rule) - # If the rule is invalid, we'll fail closed - return FalseCheck() - - # Find what implements the check - if kind in _checks: - return _checks[kind](kind, match) - elif None in _checks: - return _checks[None](kind, match) - else: - LOG.error(_LE("No handler for matches of kind %s") % kind) - return FalseCheck() - - -def _parse_list_rule(rule): - """Translates the old list-of-lists syntax into a tree of Check objects. - - Provided for backwards compatibility. - """ - - # Empty rule defaults to True - if not rule: - return TrueCheck() - - # Outer list is joined by "or"; inner list by "and" - or_list = [] - for inner_rule in rule: - # Elide empty inner lists - if not inner_rule: - continue - - # Handle bare strings - if isinstance(inner_rule, six.string_types): - inner_rule = [inner_rule] - - # Parse the inner rules into Check objects - and_list = [_parse_check(r) for r in inner_rule] - - # Append the appropriate check to the or_list - if len(and_list) == 1: - or_list.append(and_list[0]) - else: - or_list.append(AndCheck(and_list)) - - # If we have only one check, omit the "or" - if not or_list: - return FalseCheck() - elif len(or_list) == 1: - return or_list[0] - - return OrCheck(or_list) - - -# Used for tokenizing the policy language -_tokenize_re = re.compile(r'\s+') - - -def _parse_tokenize(rule): - """Tokenizer for the policy language. - - Most of the single-character tokens are specified in the - _tokenize_re; however, parentheses need to be handled specially, - because they can appear inside a check string. Thankfully, those - parentheses that appear inside a check string can never occur at - the very beginning or end ("%(variable)s" is the correct syntax). - """ - - for tok in _tokenize_re.split(rule): - # Skip empty tokens - if not tok or tok.isspace(): - continue - - # Handle leading parens on the token - clean = tok.lstrip('(') - for i in range(len(tok) - len(clean)): - yield '(', '(' - - # If it was only parentheses, continue - if not clean: - continue - else: - tok = clean - - # Handle trailing parens on the token - clean = tok.rstrip(')') - trail = len(tok) - len(clean) - - # Yield the cleaned token - lowered = clean.lower() - if lowered in ('and', 'or', 'not'): - # Special tokens - yield lowered, clean - elif clean: - # Not a special token, but not composed solely of ')' - if len(tok) >= 2 and ((tok[0], tok[-1]) in - [('"', '"'), ("'", "'")]): - # It's a quoted string - yield 'string', tok[1:-1] - else: - yield 'check', _parse_check(clean) - - # Yield the trailing parens - for i in range(trail): - yield ')', ')' - - -class ParseStateMeta(type): - """Metaclass for the ParseState class. - - Facilitates identifying reduction methods. - """ - - def __new__(mcs, name, bases, cls_dict): - """Create the class. - - Injects the 'reducers' list, a list of tuples matching token sequences - to the names of the corresponding reduction methods. - """ - - reducers = [] - - for key, value in cls_dict.items(): - if not hasattr(value, 'reducers'): - continue - for reduction in value.reducers: - reducers.append((reduction, key)) - - cls_dict['reducers'] = reducers - - return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) - - -def reducer(*tokens): - """Decorator for reduction methods. - - Arguments are a sequence of tokens, in order, which should trigger running - this reduction method. - """ - - def decorator(func): - # Make sure we have a list of reducer sequences - if not hasattr(func, 'reducers'): - func.reducers = [] - - # Add the tokens to the list of reducer sequences - func.reducers.append(list(tokens)) - - return func - - return decorator - - -@six.add_metaclass(ParseStateMeta) -class ParseState(object): - """Implement the core of parsing the policy language. - - Uses a greedy reduction algorithm to reduce a sequence of tokens into - a single terminal, the value of which will be the root of the Check tree. - - Note: error reporting is rather lacking. The best we can get with - this parser formulation is an overall "parse failed" error. - Fortunately, the policy language is simple enough that this - shouldn't be that big a problem. - """ - - def __init__(self): - """Initialize the ParseState.""" - - self.tokens = [] - self.values = [] - - def reduce(self): - """Perform a greedy reduction of the token stream. - - If a reducer method matches, it will be executed, then the - reduce() method will be called recursively to search for any more - possible reductions. - """ - - for reduction, methname in self.reducers: - if (len(self.tokens) >= len(reduction) and - self.tokens[-len(reduction):] == reduction): - # Get the reduction method - meth = getattr(self, methname) - - # Reduce the token stream - results = meth(*self.values[-len(reduction):]) - - # Update the tokens and values - self.tokens[-len(reduction):] = [r[0] for r in results] - self.values[-len(reduction):] = [r[1] for r in results] - - # Check for any more reductions - return self.reduce() - - def shift(self, tok, value): - """Adds one more token to the state. Calls reduce().""" - - self.tokens.append(tok) - self.values.append(value) - - # Do a greedy reduce... - self.reduce() - - @property - def result(self): - """Obtain the final result of the parse. - - Raises ValueError if the parse failed to reduce to a single result. - """ - - if len(self.values) != 1: - raise ValueError("Could not parse rule") - return self.values[0] - - @reducer('(', 'check', ')') - @reducer('(', 'and_expr', ')') - @reducer('(', 'or_expr', ')') - def _wrap_check(self, _p1, check, _p2): - """Turn parenthesized expressions into a 'check' token.""" - - return [('check', check)] - - @reducer('check', 'and', 'check') - def _make_and_expr(self, check1, _and, check2): - """Create an 'and_expr'. - - Join two checks by the 'and' operator. - """ - - return [('and_expr', AndCheck([check1, check2]))] - - @reducer('and_expr', 'and', 'check') - def _extend_and_expr(self, and_expr, _and, check): - """Extend an 'and_expr' by adding one more check.""" - - return [('and_expr', and_expr.add_check(check))] - - @reducer('check', 'or', 'check') - def _make_or_expr(self, check1, _or, check2): - """Create an 'or_expr'. - - Join two checks by the 'or' operator. - """ - - return [('or_expr', OrCheck([check1, check2]))] - - @reducer('or_expr', 'or', 'check') - def _extend_or_expr(self, or_expr, _or, check): - """Extend an 'or_expr' by adding one more check.""" - - return [('or_expr', or_expr.add_check(check))] - - @reducer('not', 'check') - def _make_not_expr(self, _not, check): - """Invert the result of another check.""" - - return [('check', NotCheck(check))] - - -def _parse_text_rule(rule): - """Parses policy to the tree. - - Translates a policy written in the policy language into a tree of - Check objects. - """ - - # Empty rule means always accept - if not rule: - return TrueCheck() - - # Parse the token stream - state = ParseState() - for tok, value in _parse_tokenize(rule): - state.shift(tok, value) - - try: - return state.result - except ValueError: - # Couldn't parse the rule - LOG.exception(_LE("Failed to understand rule %s") % rule) - - # Fail closed - return FalseCheck() - - -def parse_rule(rule): - """Parses a policy rule into a tree of Check objects.""" - - # If the rule is a string, it's in the policy language - if isinstance(rule, six.string_types): - return _parse_text_rule(rule) - return _parse_list_rule(rule) - - -def register(name, func=None): - """Register a function or Check class as a policy check. - - :param name: Gives the name of the check type, e.g., 'rule', - 'role', etc. If name is None, a default check type - will be registered. - :param func: If given, provides the function or class to register. - If not given, returns a function taking one argument - to specify the function or class to register, - allowing use as a decorator. - """ - - # Perform the actual decoration by registering the function or - # class. Returns the function or class for compliance with the - # decorator interface. - def decorator(func): - _checks[name] = func - return func - - # If the function or class is given, do the registration - if func: - return decorator(func) - - return decorator - - -@register("rule") -class RuleCheck(Check): - def __call__(self, target, creds, enforcer): - """Recursively checks credentials based on the defined rules.""" - - try: - return enforcer.rules[self.match](target, creds, enforcer) - except KeyError: - # We don't have any matching rule; fail closed - return False - - -@register("role") -class RoleCheck(Check): - def __call__(self, target, creds, enforcer): - """Check that there is a matching role in the cred dict.""" - - return self.match.lower() in [x.lower() for x in creds['roles']] - - -@register('http') -class HttpCheck(Check): - def __call__(self, target, creds, enforcer): - """Check http: rules by calling to a remote server. - - This example implementation simply verifies that the response - is exactly 'True'. - """ - - url = ('http:' + self.match) % target - - # Convert instances of object() in target temporarily to - # empty dict to avoid circular reference detection - # errors in jsonutils.dumps(). - temp_target = copy.deepcopy(target) - for key in target.keys(): - element = target.get(key) - if type(element) is object: - temp_target[key] = {} - - data = {'target': jsonutils.dumps(temp_target), - 'credentials': jsonutils.dumps(creds)} - post_data = urlparse.urlencode(data) - f = urlrequest.urlopen(url, post_data) - return f.read() == "True" - - -@register(None) -class GenericCheck(Check): - def __call__(self, target, creds, enforcer): - """Check an individual match. - - Matches look like: - - tenant:%(tenant_id)s - role:compute:admin - True:%(user.enabled)s - 'Member':%(role.name)s - """ - - try: - match = self.match % target - except KeyError: - # While doing GenericCheck if key not - # present in Target return false - return False - - try: - # Try to interpret self.kind as a literal - leftval = ast.literal_eval(self.kind) - except ValueError: - try: - kind_parts = self.kind.split('.') - leftval = creds - for kind_part in kind_parts: - leftval = leftval[kind_part] - except KeyError: - return False - return match == six.text_type(leftval) diff --git a/neutron/policy.py b/neutron/policy.py index 7c21559c6bf..5424dbe685e 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -21,7 +21,9 @@ import collections import logging as std_logging import re +from oslo_config import cfg from oslo_log import log as logging +from oslo_policy import policy from oslo_utils import excutils from oslo_utils import importutils import six @@ -30,7 +32,6 @@ from neutron.api.v2 import attributes from neutron.common import constants as const from neutron.common import exceptions from neutron.i18n import _LE, _LW -from neutron.openstack.common import policy LOG = logging.getLogger(__name__) @@ -47,19 +48,19 @@ def reset(): _ENFORCER = None -def init(): +def init(conf=cfg.CONF, policy_file=None): """Init an instance of the Enforcer class.""" global _ENFORCER if not _ENFORCER: - _ENFORCER = policy.Enforcer() + _ENFORCER = policy.Enforcer(conf, policy_file=policy_file) _ENFORCER.load_rules(True) -def refresh(): +def refresh(policy_file=None): """Reset policy and init a new instance of Enforcer.""" reset() - init() + init(policy_file=policy_file) def get_resource_and_action(action, pluralized=None): @@ -372,7 +373,7 @@ def enforce(context, action, target, plugin=None, pluralized=None): :param pluralized: pluralized case of resource e.g. firewall_policy -> pluralized = "firewall_policies" - :raises neutron.openstack.common.policy.PolicyNotAuthorized: + :raises oslo_policy.policy.PolicyNotAuthorized: if verification fails. """ # If we already know the context has admin rights do not perform an diff --git a/neutron/tests/unit/api/v2/test_base.py b/neutron/tests/unit/api/v2/test_base.py index ab05215e3f9..dfac39a8443 100644 --- a/neutron/tests/unit/api/v2/test_base.py +++ b/neutron/tests/unit/api/v2/test_base.py @@ -17,6 +17,7 @@ import os import mock from oslo_config import cfg +from oslo_policy import policy as oslo_policy import six from six import moves import six.moves.urllib.parse as urlparse @@ -33,7 +34,6 @@ from neutron.api.v2 import router from neutron.common import exceptions as n_exc from neutron import context from neutron import manager -from neutron.openstack.common import policy as common_policy from neutron.openstack.common import uuidutils from neutron import policy from neutron import quota @@ -1047,8 +1047,8 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase): def test_get_keystone_strip_admin_only_attribute(self): tenant_id = _uuid() # Inject rule in policy engine - rules = {'get_network:name': common_policy.parse_rule( - "rule:admin_only")} + rules = oslo_policy.Rules.from_dict( + {'get_network:name': "rule:admin_only"}) policy.set_rules(rules, overwrite=False) res = self._test_get(tenant_id, tenant_id, 200) res = self.deserialize(res) diff --git a/neutron/tests/unit/test_policy.py b/neutron/tests/unit/test_policy.py index a20e531f743..56ab849b6c9 100644 --- a/neutron/tests/unit/test_policy.py +++ b/neutron/tests/unit/test_policy.py @@ -16,7 +16,7 @@ """Test of Policy Engine For Neutron""" import mock -from oslo_config import cfg +from oslo_policy import policy as oslo_policy from oslo_serialization import jsonutils from oslo_utils import importutils import six @@ -28,7 +28,6 @@ from neutron.common import constants as const from neutron.common import exceptions from neutron import context from neutron import manager -from neutron.openstack.common import policy as common_policy from neutron import policy from neutron.tests import base @@ -44,14 +43,13 @@ class PolicyFileTestCase(base.BaseTestCase): action = "example:test" with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ""}""") - cfg.CONF.set_override('policy_file', tmpfilename) - policy.refresh() + policy.refresh(policy_file=tmpfilename) policy.enforce(self.context, action, self.target) with open(tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": "!"}""") - policy.refresh() + policy.refresh(policy_file=tmpfilename) self.target = {'tenant_id': 'fake_tenant'} - self.assertRaises(common_policy.PolicyNotAuthorized, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, @@ -75,19 +73,18 @@ class PolicyTestCase(base.BaseTestCase): } policy.refresh() # NOTE(vish): then overload underlying rules - policy.set_rules(dict((k, common_policy.parse_rule(v)) - for k, v in rules.items())) + policy.set_rules(oslo_policy.Rules.from_dict(rules)) self.context = context.Context('fake', 'fake', roles=['member']) self.target = {} def test_enforce_nonexistent_action_throws(self): action = "example:noexist" - self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_enforce_bad_action_throws(self): action = "example:denied" - self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_check_bad_action_noraise(self): @@ -124,7 +121,7 @@ class PolicyTestCase(base.BaseTestCase): with mock.patch.object(urlrequest, 'urlopen', new=fakeurlopen): action = "example:get_http" target = {} - self.assertRaises(common_policy.PolicyNotAuthorized, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target) @@ -133,12 +130,12 @@ class PolicyTestCase(base.BaseTestCase): target_not_mine = {'tenant_id': 'another'} action = "example:my_file" policy.enforce(self.context, action, target_mine) - self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target_not_mine) def test_early_AND_enforcement(self): action = "example:early_and_fail" - self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, self.target) def test_early_OR_enforcement(self): @@ -166,13 +163,12 @@ class DefaultPolicyTestCase(base.BaseTestCase): } with open(tmpfilename, "w") as policyfile: jsonutils.dump(self.rules, policyfile) - cfg.CONF.set_override('policy_file', tmpfilename) - policy.refresh() + policy.refresh(policy_file=tmpfilename) self.context = context.Context('fake', 'fake') def test_policy_called(self): - self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, "example:exist", {}) def test_not_found_policy_calls_default(self): @@ -208,14 +204,33 @@ class NeutronPolicyTestCase(base.BaseTestCase): def fakepolicyinit(self, **kwargs): enf = policy._ENFORCER - enf.set_rules(common_policy.Rules(self.rules)) + enf.set_rules(oslo_policy.Rules(self.rules)) def setUp(self): super(NeutronPolicyTestCase, self).setUp() policy.refresh() # Add Fake resources to RESOURCE_ATTRIBUTE_MAP attributes.RESOURCE_ATTRIBUTE_MAP.update(FAKE_RESOURCES) - self.rules = dict((k, common_policy.parse_rule(v)) for k, v in { + self._set_rules() + + def remove_fake_resource(): + del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME] + + self.patcher = mock.patch.object(neutron.policy, + 'init', + new=self.fakepolicyinit) + self.patcher.start() + self.addCleanup(remove_fake_resource) + self.context = context.Context('fake', 'fake', roles=['user']) + plugin_klass = importutils.import_class( + "neutron.db.db_base_plugin_v2.NeutronDbPluginV2") + self.manager_patcher = mock.patch('neutron.manager.NeutronManager') + fake_manager = self.manager_patcher.start() + fake_manager_instance = fake_manager.return_value + fake_manager_instance.plugin = plugin_klass() + + def _set_rules(self, **kwargs): + rules_dict = { "context_is_admin": "role:admin", "context_is_advsvc": "role:advsvc", "admin_or_network_owner": "rule:context_is_admin or " @@ -253,23 +268,9 @@ class NeutronPolicyTestCase(base.BaseTestCase): "insert_rule": "rule:admin_or_owner", "remove_rule": "rule:admin_or_owner", - }.items()) - - def remove_fake_resource(): - del attributes.RESOURCE_ATTRIBUTE_MAP["%ss" % FAKE_RESOURCE_NAME] - - self.patcher = mock.patch.object(neutron.policy, - 'init', - new=self.fakepolicyinit) - self.patcher.start() - self.addCleanup(remove_fake_resource) - self.context = context.Context('fake', 'fake', roles=['user']) - plugin_klass = importutils.import_class( - "neutron.db.db_base_plugin_v2.NeutronDbPluginV2") - self.manager_patcher = mock.patch('neutron.manager.NeutronManager') - fake_manager = self.manager_patcher.start() - fake_manager_instance = fake_manager.return_value - fake_manager_instance.plugin = plugin_klass() + } + rules_dict.update(**kwargs) + self.rules = oslo_policy.Rules.from_dict(rules_dict) def test_firewall_policy_insert_rule_with_admin_context(self): action = "insert_rule" @@ -319,22 +320,22 @@ class NeutronPolicyTestCase(base.BaseTestCase): def test_nonadmin_write_on_private_fails(self): self._test_nonadmin_action_on_attr('create', 'shared', False, - common_policy.PolicyNotAuthorized) + oslo_policy.PolicyNotAuthorized) def test_nonadmin_read_on_private_fails(self): self._test_nonadmin_action_on_attr('get', 'shared', False, - common_policy.PolicyNotAuthorized) + oslo_policy.PolicyNotAuthorized) def test_nonadmin_write_on_shared_fails(self): self._test_nonadmin_action_on_attr('create', 'shared', True, - common_policy.PolicyNotAuthorized) + oslo_policy.PolicyNotAuthorized) def test_advsvc_get_network_works(self): self._test_advsvc_action_on_attr('get', 'network', 'shared', False) def test_advsvc_create_network_fails(self): self._test_advsvc_action_on_attr('create', 'network', 'shared', False, - common_policy.PolicyNotAuthorized) + oslo_policy.PolicyNotAuthorized) def test_advsvc_create_port_works(self): self._test_advsvc_action_on_attr('create', 'port:mac', 'shared', False) @@ -352,7 +353,7 @@ class NeutronPolicyTestCase(base.BaseTestCase): def test_advsvc_create_subnet_fails(self): self._test_advsvc_action_on_attr('create', 'subnet', 'shared', False, - common_policy.PolicyNotAuthorized) + oslo_policy.PolicyNotAuthorized) def test_nonadmin_read_on_shared_succeeds(self): self._test_nonadmin_action_on_attr('get', 'shared', True) @@ -406,13 +407,13 @@ class NeutronPolicyTestCase(base.BaseTestCase): def test_reset_adminonly_attr_to_default_fails(self): kwargs = {const.ATTRIBUTES_TO_UPDATE: ['shared']} self._test_nonadmin_action_on_attr('update', 'shared', False, - common_policy.PolicyNotAuthorized, + oslo_policy.PolicyNotAuthorized, **kwargs) def test_enforce_adminonly_attribute_nonadminctx_returns_403(self): action = "create_network" target = {'shared': True, 'tenant_id': 'somebody_else'} - self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target) def _test_build_subattribute_match_rule(self, validate_value): @@ -466,7 +467,7 @@ class NeutronPolicyTestCase(base.BaseTestCase): action = "create_" + FAKE_RESOURCE_NAME target = {'tenant_id': 'fake', 'attr': {'sub_attr_1': 'x', 'sub_attr_2': 'y'}} - self.assertRaises(common_policy.PolicyNotAuthorized, policy.enforce, + self.assertRaises(oslo_policy.PolicyNotAuthorized, policy.enforce, self.context, action, target, None) def test_enforce_regularuser_on_read(self): @@ -529,9 +530,9 @@ class NeutronPolicyTestCase(base.BaseTestCase): def fakegetnetwork(*args, **kwargs): return {'tenant_id': 'fake'} - del self.rules['admin_or_network_owner'] - self.rules['admin_or_network_owner'] = common_policy.parse_rule( - "role:admin or tenant_id:%(network_tenant_id)s") + self._set_rules( + admin_or_network_owner="role:admin or " + "tenant_id:%(network_tenant_id)s") action = "create_port:mac" with mock.patch.object(manager.NeutronManager.get_instance().plugin, 'get_network', new=fakegetnetwork): @@ -543,11 +544,11 @@ class NeutronPolicyTestCase(base.BaseTestCase): # Try and add a bad rule self.assertRaises( exceptions.PolicyInitError, - common_policy.parse_rule, - 'tenant_id:(wrong_stuff)') + oslo_policy.Rules.from_dict, + {'test_policy': 'tenant_id:(wrong_stuff)'}) def _test_enforce_tenant_id_raises(self, bad_rule): - self.rules['admin_or_owner'] = common_policy.parse_rule(bad_rule) + self._set_rules(admin_or_owner=bad_rule) # Trigger a policy with rule admin_or_owner action = "create_network" target = {'tenant_id': 'fake'} @@ -565,20 +566,18 @@ class NeutronPolicyTestCase(base.BaseTestCase): def test_process_rules(self): action = "create_" + FAKE_RESOURCE_NAME # Construct RuleChecks for an action, attribute and subattribute - match_rule = common_policy.RuleCheck('rule', action) - attr_rule = common_policy.RuleCheck('rule', '%s:%ss' % - (action, - FAKE_RESOURCE_NAME)) - sub_attr_rules = [common_policy.RuleCheck('rule', '%s:%s:%s' % - (action, 'attr', - 'sub_attr_1'))] + match_rule = oslo_policy.RuleCheck('rule', action) + attr_rule = oslo_policy.RuleCheck( + 'rule', '%s:%ss' % (action, FAKE_RESOURCE_NAME)) + sub_attr_rules = [oslo_policy.RuleCheck( + 'rule', '%s:%s:%s' % (action, 'attr', 'sub_attr_1'))] # Build an AndCheck from the given RuleChecks # Make the checks nested to better check the recursion - sub_attr_rules = common_policy.AndCheck(sub_attr_rules) - attr_rule = common_policy.AndCheck( + sub_attr_rules = oslo_policy.AndCheck(sub_attr_rules) + attr_rule = oslo_policy.AndCheck( [attr_rule, sub_attr_rules]) - match_rule = common_policy.AndCheck([match_rule, attr_rule]) + match_rule = oslo_policy.AndCheck([match_rule, attr_rule]) # Assert that the rules are correctly extracted from the match_rule rules = policy._process_rules_list([], match_rule) self.assertEqual(['create_fake_resource', @@ -588,6 +587,6 @@ class NeutronPolicyTestCase(base.BaseTestCase): @mock.patch.object(policy.LOG, 'isEnabledFor', return_value=True) @mock.patch.object(policy.LOG, 'debug') def test_log_rule_list(self, mock_debug, mock_is_e): - policy.log_rule_list(common_policy.RuleCheck('rule', 'create_')) + policy.log_rule_list(oslo_policy.RuleCheck('rule', 'create_')) self.assertTrue(mock_is_e.called) self.assertTrue(mock_debug.called) diff --git a/openstack-common.conf b/openstack-common.conf index 549005c12ec..c5421f062d7 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -6,7 +6,6 @@ module=fileutils module=install_venv_common module=loopingcall module=periodic_task -module=policy module=service module=systemd module=threadgroup diff --git a/requirements.txt b/requirements.txt index 101d0e31dd2..48a1b51fc33 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,6 +30,7 @@ oslo.i18n>=1.5.0 # Apache-2.0 oslo.log>=1.0.0 # Apache-2.0 oslo.messaging>=1.8.0 # Apache-2.0 oslo.middleware>=1.2.0 # Apache-2.0 +oslo.policy>=0.5.0 # Apache-2.0 oslo.rootwrap>=1.6.0 # Apache-2.0 oslo.serialization>=1.4.0 # Apache-2.0 oslo.utils>=1.4.0 # Apache-2.0 From 303f37f4e0c84f90e40b95731a828fc6ce8a0bbf Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Mon, 8 Jun 2015 16:09:49 +0000 Subject: [PATCH 158/292] Python 3: use next() instead of iterator.next() The latter only works in Python 2. Also define a __next__ method in the classes that define a next method. Change-Id: Iaa1a1e500facab50d8bcdffda39ccad3f2e4e9bb Blueprint: neutron-python3 --- neutron/agent/linux/ip_lib.py | 8 ++++---- neutron/db/model_base.py | 4 +++- neutron/ipam/subnet_alloc.py | 2 +- neutron/plugins/ml2/drivers/type_tunnel.py | 4 ++-- neutron/tests/tempest/common/glance_http.py | 4 +++- neutron/tests/unit/agent/l3/test_agent.py | 4 ++-- tox.ini | 4 ++++ 7 files changed, 19 insertions(+), 11 deletions(-) diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index ce5dec796d1..32fe1f9ac84 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -557,13 +557,13 @@ class IpRouteCommand(IpDeviceCommandBase): ).split('\n') for subnet_route_line in subnet_route_list_lines: i = iter(subnet_route_line.split()) - while(i.next() != 'dev'): + while(next(i) != 'dev'): pass - device = i.next() + device = next(i) try: - while(i.next() != 'src'): + while(next(i) != 'src'): pass - src = i.next() + src = next(i) except Exception: src = '' if device != interface_name: diff --git a/neutron/db/model_base.py b/neutron/db/model_base.py index b613447d4a8..e1abbd5533a 100644 --- a/neutron/db/model_base.py +++ b/neutron/db/model_base.py @@ -28,9 +28,11 @@ class NeutronBase(models.ModelBase): return self def next(self): - n = self._i.next().name + n = next(self._i).name return n, getattr(self, n) + __next__ = next + def __repr__(self): """sqlalchemy based automatic __repr__ method.""" items = ['%s=%r' % (col.name, getattr(self, col.name)) diff --git a/neutron/ipam/subnet_alloc.py b/neutron/ipam/subnet_alloc.py index 49b6eda2ab5..ff9b30c9a58 100644 --- a/neutron/ipam/subnet_alloc.py +++ b/neutron/ipam/subnet_alloc.py @@ -93,7 +93,7 @@ class SubnetAllocator(driver.Pool): prefix_pool = self._get_available_prefix_list() for prefix in prefix_pool: if request.prefixlen >= prefix.prefixlen: - subnet = prefix.subnet(request.prefixlen).next() + subnet = next(prefix.subnet(request.prefixlen)) gateway_ip = request.gateway_ip if not gateway_ip: gateway_ip = subnet.network + 1 diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py index 12dce86f48f..14904b31d8a 100644 --- a/neutron/plugins/ml2/drivers/type_tunnel.py +++ b/neutron/plugins/ml2/drivers/type_tunnel.py @@ -40,7 +40,7 @@ class TunnelTypeDriver(helpers.SegmentTypeDriver): def __init__(self, model): super(TunnelTypeDriver, self).__init__(model) - self.segmentation_key = iter(self.primary_keys).next() + self.segmentation_key = next(iter(self.primary_keys)) @abc.abstractmethod def sync_allocations(self): @@ -203,7 +203,7 @@ class EndpointTunnelTypeDriver(TunnelTypeDriver): def __init__(self, segment_model, endpoint_model): super(EndpointTunnelTypeDriver, self).__init__(segment_model) self.endpoint_model = endpoint_model - self.segmentation_key = iter(self.primary_keys).next() + self.segmentation_key = next(iter(self.primary_keys)) def get_endpoint_by_host(self, host): LOG.debug("get_endpoint_by_host() called for host %s", host) diff --git a/neutron/tests/tempest/common/glance_http.py b/neutron/tests/tempest/common/glance_http.py index 6cdbadc3bab..0a6f985e7c6 100644 --- a/neutron/tests/tempest/common/glance_http.py +++ b/neutron/tests/tempest/common/glance_http.py @@ -367,7 +367,7 @@ class ResponseBodyIterator(object): def __iter__(self): while True: - yield self.next() + yield next(self) def next(self): chunk = self.resp.read(CHUNKSIZE) @@ -375,3 +375,5 @@ class ResponseBodyIterator(object): return chunk else: raise StopIteration() + + __next__ = next diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index 577eef30c75..aeec5c6f1c2 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -136,8 +136,8 @@ def router_append_subnet(router, count=1, ip_version=4, interfaces = copy.deepcopy(router.get(l3_constants.INTERFACE_KEY, [])) if interface_id: try: - interface = (i for i in interfaces - if i['id'] == interface_id).next() + interface = next(i for i in interfaces + if i['id'] == interface_id) except StopIteration: raise ValueError("interface_id not found") diff --git a/tox.ini b/tox.ini index 636a2a37739..d7cf102ed1e 100644 --- a/tox.ini +++ b/tox.ini @@ -113,6 +113,8 @@ commands = python -m testtools.run \ neutron.tests.unit.plugins.ml2.drivers.ext_test \ neutron.tests.unit.plugins.ml2.drivers.mech_sriov.test_mech_sriov_nic_switch \ neutron.tests.unit.plugins.ml2.drivers.mech_fake_agent \ + neutron.tests.unit.plugins.ml2.drivers.test_type_vxlan \ + neutron.tests.unit.plugins.ml2.drivers.test_type_gre \ neutron.tests.unit.plugins.ml2.drivers.arista.test_mechanism_arista \ neutron.tests.unit.plugins.ml2.drivers.test_type_local \ neutron.tests.unit.plugins.ml2.drivers.mechanism_logger \ @@ -151,6 +153,7 @@ commands = python -m testtools.run \ neutron.tests.unit.agent.l3.test_dvr_fip_ns \ neutron.tests.unit.agent.common.test_config \ neutron.tests.unit.agent.common.test_polling \ + neutron.tests.unit.agent.linux.test_ip_lib \ neutron.tests.unit.agent.linux.test_keepalived \ neutron.tests.unit.agent.linux.test_ipset_manager \ neutron.tests.unit.agent.linux.test_ebtables_manager \ @@ -176,6 +179,7 @@ commands = python -m testtools.run \ neutron.tests.unit.cmd.test_netns_cleanup \ neutron.tests.unit.ipam.drivers.neutrondb_ipam.test_db_api \ neutron.tests.unit.ipam.drivers.neutrondb_ipam.test_driver \ + neutron.tests.unit.ipam.test_subnet_alloc \ neutron.tests.unit.notifiers.test_nova \ neutron.tests.unit.notifiers.test_batch_notifier From 6886655b491aede40aa9f4a0bd4c6d402d5a7a78 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 28 Apr 2015 04:59:35 -0700 Subject: [PATCH 159/292] Context: Remove logic for read_deleted and deprecate it The read_deleted parameter in the Context object is simply unused. This patch removes associated logic, and for what is worth, adds deprecation warnings against explicit usage of read_deleted when creating a context instance, generate an admin context, and elevating a context instance. Change-Id: Ic69d22dc229ebe8fac1f6be0c4860d19732505b1 Closes-Bug: #1449462 --- neutron/context.py | 36 ++++--------------- neutron/tests/functional/db/test_ipam.py | 1 - .../unit/_test_extension_portbindings.py | 15 +++----- 3 files changed, 12 insertions(+), 40 deletions(-) diff --git a/neutron/context.py b/neutron/context.py index ee6ca8ed7d4..3debe4ccd90 100644 --- a/neutron/context.py +++ b/neutron/context.py @@ -36,15 +36,12 @@ class ContextBase(oslo_context.RequestContext): """ - def __init__(self, user_id, tenant_id, is_admin=None, read_deleted="no", - roles=None, timestamp=None, request_id=None, tenant_name=None, + @removals.removed_kwarg('read_deleted') + def __init__(self, user_id, tenant_id, is_admin=None, roles=None, + timestamp=None, request_id=None, tenant_name=None, user_name=None, overwrite=True, auth_token=None, **kwargs): """Object initialization. - :param read_deleted: 'no' indicates deleted records are hidden, 'yes' - indicates deleted records are visible, 'only' indicates that - *only* deleted records are visible. - :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. @@ -59,7 +56,6 @@ class ContextBase(oslo_context.RequestContext): self.user_name = user_name self.tenant_name = tenant_name - self.read_deleted = read_deleted if not timestamp: timestamp = datetime.datetime.utcnow() self.timestamp = timestamp @@ -89,28 +85,12 @@ class ContextBase(oslo_context.RequestContext): def user_id(self, user_id): self.user = user_id - def _get_read_deleted(self): - return self._read_deleted - - def _set_read_deleted(self, read_deleted): - if read_deleted not in ('no', 'yes', 'only'): - raise ValueError(_("read_deleted can only be one of 'no', " - "'yes' or 'only', not %r") % read_deleted) - self._read_deleted = read_deleted - - def _del_read_deleted(self): - del self._read_deleted - - read_deleted = property(_get_read_deleted, _set_read_deleted, - _del_read_deleted) - def to_dict(self): context = super(ContextBase, self).to_dict() context.update({ 'user_id': self.user_id, 'tenant_id': self.tenant_id, 'project_id': self.project_id, - 'read_deleted': self.read_deleted, 'roles': self.roles, 'timestamp': str(self.timestamp), 'tenant_name': self.tenant_name, @@ -123,6 +103,7 @@ class ContextBase(oslo_context.RequestContext): def from_dict(cls, values): return cls(**values) + @removals.removed_kwarg('read_deleted') def elevated(self, read_deleted=None): """Return a version of this context with admin flag set.""" context = copy.copy(self) @@ -131,9 +112,6 @@ class ContextBase(oslo_context.RequestContext): if 'admin' not in [x.lower() for x in context.roles]: context.roles = context.roles + ["admin"] - if read_deleted is not None: - context.read_deleted = read_deleted - return context @@ -145,17 +123,17 @@ class Context(ContextBase): return self._session +@removals.removed_kwarg('read_deleted') @removals.removed_kwarg('load_admin_roles') def get_admin_context(read_deleted="no", load_admin_roles=True): return Context(user_id=None, tenant_id=None, is_admin=True, - read_deleted=read_deleted, overwrite=False) +@removals.removed_kwarg('read_deleted') def get_admin_context_without_session(read_deleted="no"): return ContextBase(user_id=None, tenant_id=None, - is_admin=True, - read_deleted=read_deleted) + is_admin=True) diff --git a/neutron/tests/functional/db/test_ipam.py b/neutron/tests/functional/db/test_ipam.py index 3c3a9d163a4..c30b72b5579 100644 --- a/neutron/tests/functional/db/test_ipam.py +++ b/neutron/tests/functional/db/test_ipam.py @@ -36,7 +36,6 @@ def get_admin_test_context(db_url): ctx = context.Context(user_id=None, tenant_id=None, is_admin=True, - read_deleted="no", overwrite=False) facade = session.EngineFacade(db_url, mysql_sql_mode='STRICT_ALL_TABLES') ctx._session = facade.get_session(autocommit=False, expire_on_commit=True) diff --git a/neutron/tests/unit/_test_extension_portbindings.py b/neutron/tests/unit/_test_extension_portbindings.py index b3d82abcca7..42840d410f5 100644 --- a/neutron/tests/unit/_test_extension_portbindings.py +++ b/neutron/tests/unit/_test_extension_portbindings.py @@ -58,8 +58,7 @@ class PortBindingsTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase): def _get_non_admin_context(self): return context.Context(user_id=None, tenant_id=self._tenant_id, - is_admin=False, - read_deleted="no") + is_admin=False) def test_port_vif_details(self): with self.port(name='name') as port: @@ -204,8 +203,7 @@ class PortBindingsHostTestCaseMixin(object): # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, - is_admin=False, - read_deleted="no") + is_admin=False) non_admin_port = self._show( 'ports', port_id, neutron_context=ctx)['port'] self._check_response_no_portbindings_host(non_admin_port) @@ -227,8 +225,7 @@ class PortBindingsHostTestCaseMixin(object): # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, - is_admin=False, - read_deleted="no") + is_admin=False) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: @@ -319,8 +316,7 @@ class PortBindingsVnicTestCaseMixin(object): # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, - is_admin=False, - read_deleted="no") + is_admin=False) non_admin_port = self._show( 'ports', port_id, neutron_context=ctx)['port'] self._check_response_portbindings_vnic_type(non_admin_port) @@ -342,8 +338,7 @@ class PortBindingsVnicTestCaseMixin(object): # By default user is admin - now test non admin user ctx = context.Context(user_id=None, tenant_id=self._tenant_id, - is_admin=False, - read_deleted="no") + is_admin=False) ports = self._list('ports', neutron_context=ctx)['ports'] self.assertEqual(2, len(ports)) for non_admin_port in ports: From f08e9f1f53efa97e07f21ca72a940fcbeb4570e5 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Wed, 20 May 2015 01:03:59 +0000 Subject: [PATCH 160/292] Switch from MySQL-python to PyMySQL As discussed in the Liberty Design Summit "Moving apps to Python 3" cross-project workshop, the way forward in the near future is to switch to the pure-python PyMySQL library as a default. https://etherpad.openstack.org/p/liberty-cross-project-python3 Change-Id: I73e0fdb6eca70e7d029a40a2f6f17a7c0797a21d --- etc/neutron.conf | 2 +- etc/neutron/plugins/oneconvergence/nvsdplugin.ini | 2 +- neutron/db/migration/migrate_to_ml2.py | 2 +- neutron/plugins/brocade/README.md | 2 +- neutron/tests/fullstack/base.py | 9 +++++---- neutron/tests/functional/requirements.txt | 2 +- 6 files changed, 10 insertions(+), 9 deletions(-) diff --git a/etc/neutron.conf b/etc/neutron.conf index 5b58519d0ea..7a11b939fb5 100755 --- a/etc/neutron.conf +++ b/etc/neutron.conf @@ -708,7 +708,7 @@ admin_password = %SERVICE_PASSWORD% [database] # This line MUST be changed to actually run the plugin. # Example: -# connection = mysql://root:pass@127.0.0.1:3306/neutron +# connection = mysql+pymysql://root:pass@127.0.0.1:3306/neutron # Replace 127.0.0.1 above with the IP address of the database used by the # main neutron server. (Leave it as is if the database runs on this host.) # connection = sqlite:// diff --git a/etc/neutron/plugins/oneconvergence/nvsdplugin.ini b/etc/neutron/plugins/oneconvergence/nvsdplugin.ini index a1c05d971e3..2d8cc77a504 100644 --- a/etc/neutron/plugins/oneconvergence/nvsdplugin.ini +++ b/etc/neutron/plugins/oneconvergence/nvsdplugin.ini @@ -32,4 +32,4 @@ # root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf [database] -# connection = mysql://root:@127.0.0.1/?charset=utf8 +# connection = mysql+pymysql://root:@127.0.0.1/?charset=utf8 diff --git a/neutron/db/migration/migrate_to_ml2.py b/neutron/db/migration/migrate_to_ml2.py index e6c5db332fd..e1b9579bcd5 100755 --- a/neutron/db/migration/migrate_to_ml2.py +++ b/neutron/db/migration/migrate_to_ml2.py @@ -32,7 +32,7 @@ Known Limitations: Example usage: python -m neutron.db.migration.migrate_to_ml2 openvswitch \ - mysql://login:pass@127.0.0.1/neutron + mysql+pymysql://login:pass@127.0.0.1/neutron Note that migration of tunneling state will only be attempted if the --tunnel-type parameter is provided. diff --git a/neutron/plugins/brocade/README.md b/neutron/plugins/brocade/README.md index 82b3ad89d83..1baa3124746 100644 --- a/neutron/plugins/brocade/README.md +++ b/neutron/plugins/brocade/README.md @@ -76,7 +76,7 @@ the configuration file specified in the brocade.ini files: ostype = NOS [database] - connection = mysql://root:pass@localhost/brocade_neutron?charset=utf8 + connection = mysql+pymysql://root:pass@localhost/brocade_neutron?charset=utf8 (please see list of more configuration parameters in the brocade.ini file) diff --git a/neutron/tests/fullstack/base.py b/neutron/tests/fullstack/base.py index 9fed9f3e621..c886bd5e790 100644 --- a/neutron/tests/fullstack/base.py +++ b/neutron/tests/fullstack/base.py @@ -52,10 +52,11 @@ class BaseFullStackTestCase(test_base.MySQLOpportunisticTestCase): we only support MySQL for now, but the groundwork for adding Postgres is already laid. """ - conn = "mysql://%(username)s:%(password)s@127.0.0.1/%(db_name)s" % { - 'username': test_base.DbFixture.USERNAME, - 'password': test_base.DbFixture.PASSWORD, - 'db_name': self.engine.url.database} + conn = ("mysql+pymysql://%(username)s:%(password)s" + "@127.0.0.1/%(db_name)s" % { + 'username': test_base.DbFixture.USERNAME, + 'password': test_base.DbFixture.PASSWORD, + 'db_name': self.engine.url.database}) self.original_conn = cfg.CONF.database.connection self.addCleanup(self._revert_connection_address) diff --git a/neutron/tests/functional/requirements.txt b/neutron/tests/functional/requirements.txt index 0c5f2215b44..2d664cb354a 100644 --- a/neutron/tests/functional/requirements.txt +++ b/neutron/tests/functional/requirements.txt @@ -5,4 +5,4 @@ # process, which may cause wedges in the gate later. psycopg2 -MySQL-python +PyMySQL>=0.6.2 # MIT License From e33d92c894df4664d01d040ba4305c7cb4ef6e27 Mon Sep 17 00:00:00 2001 From: Fawad Khaliq Date: Tue, 9 Jun 2015 22:18:18 -0700 Subject: [PATCH 161/292] Added networking-plumgrid in plugin requirements Closes-Bug: 1463665 Change-Id: I7152dedd83659ee51274be31ef305af9e82d695a --- neutron/plugins/plumgrid/requirements.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 neutron/plugins/plumgrid/requirements.txt diff --git a/neutron/plugins/plumgrid/requirements.txt b/neutron/plugins/plumgrid/requirements.txt new file mode 100644 index 00000000000..9d9d8a09cff --- /dev/null +++ b/neutron/plugins/plumgrid/requirements.txt @@ -0,0 +1 @@ +networking-plumgrid From 328b72cf8c5f514434de0b73c9137bde52b5eeea Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Wed, 10 Jun 2015 07:04:25 +0000 Subject: [PATCH 162/292] Revert "Defer segment lookup in NetworkContext object" This reverts commit e61865807c4c8ff959a7746fe3e17f1ae574c9d0. This patch likely violated the idea of a NetworkContext being a snapshot of the network at the time it was created. This needs a different approach. Change-Id: I20b132a0181d35b0517330fb7fbf293c3e979d0e --- neutron/plugins/ml2/driver_context.py | 7 +- .../unit/plugins/ml2/test_driver_context.py | 69 +++++++------------ 2 files changed, 25 insertions(+), 51 deletions(-) diff --git a/neutron/plugins/ml2/driver_context.py b/neutron/plugins/ml2/driver_context.py index 6e9b295b594..ef418fe16e2 100644 --- a/neutron/plugins/ml2/driver_context.py +++ b/neutron/plugins/ml2/driver_context.py @@ -42,8 +42,8 @@ class NetworkContext(MechanismDriverContext, api.NetworkContext): super(NetworkContext, self).__init__(plugin, plugin_context) self._network = network self._original_network = original_network - self._segments = None - self._session = plugin_context.session + self._segments = db.get_network_segments(plugin_context.session, + network['id']) @property def current(self): @@ -55,9 +55,6 @@ class NetworkContext(MechanismDriverContext, api.NetworkContext): @property def network_segments(self): - if not self._segments: - self._segments = db.get_network_segments(self._session, - self._network['id']) return self._segments diff --git a/neutron/tests/unit/plugins/ml2/test_driver_context.py b/neutron/tests/unit/plugins/ml2/test_driver_context.py index 8171071b6c2..e30349c9c6c 100644 --- a/neutron/tests/unit/plugins/ml2/test_driver_context.py +++ b/neutron/tests/unit/plugins/ml2/test_driver_context.py @@ -37,12 +37,13 @@ class TestPortContext(base.BaseTestCase): port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.host = 'foohost' - ctx = driver_context.PortContext(plugin, - plugin_context, - port, - network, - binding, - None) + with mock.patch.object(driver_context.db, 'get_network_segments'): + ctx = driver_context.PortContext(plugin, + plugin_context, + port, + network, + binding, + None) self.assertEqual('foohost', ctx.host) def test_host_super(self): @@ -55,12 +56,13 @@ class TestPortContext(base.BaseTestCase): portbindings.HOST_ID: 'host'} binding.host = 'foohost' - ctx = driver_context.PortContext(plugin, - plugin_context, - port, - network, - binding, - None) + with mock.patch.object(driver_context.db, 'get_network_segments'): + ctx = driver_context.PortContext(plugin, + plugin_context, + port, + network, + binding, + None) self.assertEqual('host', ctx.host) def test_status(self): @@ -72,12 +74,13 @@ class TestPortContext(base.BaseTestCase): port = {'device_owner': constants.DEVICE_OWNER_DVR_INTERFACE} binding.status = 'foostatus' - ctx = driver_context.PortContext(plugin, - plugin_context, - port, - network, - binding, - None) + with mock.patch.object(driver_context.db, 'get_network_segments'): + ctx = driver_context.PortContext(plugin, + plugin_context, + port, + network, + binding, + None) self.assertEqual('foostatus', ctx.status) def test_status_super(self): @@ -90,37 +93,11 @@ class TestPortContext(base.BaseTestCase): 'status': 'status'} binding.status = 'foostatus' - ctx = driver_context.PortContext(plugin, - plugin_context, - port, - network, - binding, - None) - self.assertEqual('status', ctx.status) - - def test_segments_lazy_lookup(self): - plugin = mock.Mock() - plugin_context = mock.Mock() - network = mock.MagicMock() - binding = mock.Mock() - - port = {'device_owner': 'compute', - 'status': 'status'} - binding.status = 'foostatus' - - with mock.patch.object(driver_context.db, - 'get_network_segments') as gs: + with mock.patch.object(driver_context.db, 'get_network_segments'): ctx = driver_context.PortContext(plugin, plugin_context, port, network, binding, None) - self.assertFalse(gs.called) - # accessing the network_segments property should trigger - # a lookup the first time - seg = ctx.network.network_segments - self.assertTrue(gs.called) - gs.reset_mock() - self.assertEqual(seg, ctx.network.network_segments) - self.assertFalse(gs.called) + self.assertEqual('status', ctx.status) From da42745c466c14e6dbe58cdbc830ae5d1c8bb114 Mon Sep 17 00:00:00 2001 From: Jakub Libosvar Date: Tue, 9 Jun 2015 16:08:50 +0000 Subject: [PATCH 163/292] Introduce functions using arping executable The arpinger is gonna be used in the next changeset introducing connection testers. Change-Id: I90ae32c2f52f1debfb11ae2a08b2828ee2be04cc --- neutron/tests/common/net_helpers.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/neutron/tests/common/net_helpers.py b/neutron/tests/common/net_helpers.py index 8884c66966c..cce352d0cbf 100644 --- a/neutron/tests/common/net_helpers.py +++ b/neutron/tests/common/net_helpers.py @@ -81,6 +81,31 @@ def assert_no_ping(src_namespace, dst_ip, timeout=1, count=1): {'ns': src_namespace, 'destination': dst_ip}) +def assert_arping(src_namespace, dst_ip, source=None, timeout=1, count=1): + """Send arp request using arping executable. + + NOTE: ARP protocol is used in IPv4 only. IPv6 uses Neighbour Discovery + Protocol instead. + """ + ns_ip_wrapper = ip_lib.IPWrapper(src_namespace) + arping_cmd = ['arping', '-c', count, '-w', timeout] + if source: + arping_cmd.extend(['-s', source]) + arping_cmd.append(dst_ip) + ns_ip_wrapper.netns.execute(arping_cmd) + + +def assert_no_arping(src_namespace, dst_ip, source=None, timeout=1, count=1): + try: + assert_arping(src_namespace, dst_ip, source, timeout, count) + except RuntimeError: + pass + else: + tools.fail("destination ip %(destination)s is replying to arp from " + "namespace %(ns)s, but it shouldn't" % + {'ns': src_namespace, 'destination': dst_ip}) + + class NamespaceFixture(fixtures.Fixture): """Create a namespace. From 1c124a309bc941c078b8bb622ea248a3ed3829e1 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 9 Jun 2015 11:28:10 +0200 Subject: [PATCH 164/292] Switch to os-testr to control testr It's a nice wrapper spinned out recently from tempest-lib that should cover all our needs that we currently fulfill with pretty_tox.sh. Change-Id: I2268ed45ab628fe5dcab657d6287594847ab587c --- test-requirements.txt | 1 + tools/pretty_tox.sh | 6 ------ tox.ini | 4 ++-- 3 files changed, 3 insertions(+), 8 deletions(-) delete mode 100755 tools/pretty_tox.sh diff --git a/test-requirements.txt b/test-requirements.txt index be4bd087cbc..6a6436aa3fd 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -16,4 +16,5 @@ testtools>=0.9.36,!=1.2.0 testscenarios>=0.4 WebTest>=2.0 oslotest>=1.5.1 # Apache-2.0 +os-testr>=0.1.0 tempest-lib>=0.5.0 diff --git a/tools/pretty_tox.sh b/tools/pretty_tox.sh deleted file mode 100755 index 0fc360530d5..00000000000 --- a/tools/pretty_tox.sh +++ /dev/null @@ -1,6 +0,0 @@ -#! /bin/sh - -TESTRARGS=$1 - -exec 3>&1 -status=$(exec 4>&1 >&3; ( python setup.py testr --slowest --testr-args="--subunit $TESTRARGS"; echo $? >&4 ) | subunit-trace -f) && exit $status diff --git a/tox.ini b/tox.ini index bd28ed3e269..8f9cf95dc9d 100644 --- a/tox.ini +++ b/tox.ini @@ -12,8 +12,8 @@ deps = -r{toxinidir}/requirements.txt whitelist_externals = sh commands = dsvm-functional: {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin - sh tools/pretty_tox.sh '{posargs}' -# there is also secret magic in pretty_tox.sh which lets you run in a fail only + ostestr --regex '{posargs}' +# there is also secret magic in ostestr which lets you run in a fail only # mode. To do this define the TRACE_FAILONLY environmental variable. [testenv:api] From eeacb95e65a749ce3a032246c36d10cad9df22b1 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Wed, 10 Jun 2015 13:10:54 +0200 Subject: [PATCH 165/292] Actually allow to pass TRACE_FAILONLY to ostestr The comment below suggests to use TRACE_FAILONLY to fail quickly when running unit tests, while tox 2.0 does not allow to pass envvars from the cli caller unless they are explicitly mentioned in passenv= directive. Change-Id: I6861498e7609b0c21fad844009420ea9734e2352 --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 8f9cf95dc9d..ab568012d70 100644 --- a/tox.ini +++ b/tox.ini @@ -5,6 +5,7 @@ skipsdist = True [testenv] setenv = VIRTUAL_ENV={envdir} +passenv = TRACE_FAILONLY usedevelop = True install_command = pip install -U {opts} {packages} deps = -r{toxinidir}/requirements.txt From 87fecfcc50f371d8dd593b3cd372da9db56f39c6 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Wed, 10 Jun 2015 10:29:33 -0400 Subject: [PATCH 166/292] Make Vlantransparent extension inherit from ExtensionDescriptor Change-Id: Ic615578a1fe1d401b53d0b44ff5275d9518b97fd --- neutron/extensions/vlantransparent.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/neutron/extensions/vlantransparent.py b/neutron/extensions/vlantransparent.py index 4c2d8f980d8..3965d476355 100644 --- a/neutron/extensions/vlantransparent.py +++ b/neutron/extensions/vlantransparent.py @@ -15,6 +15,7 @@ from oslo_config import cfg from oslo_log import log as logging +from neutron.api import extensions from neutron.api.v2 import attributes from neutron.common import exceptions as nexception from neutron.i18n import _LI @@ -45,7 +46,7 @@ def disable_extension_by_config(aliases): LOG.info(_LI('Disabled vlantransparent extension.')) -class Vlantransparent(object): +class Vlantransparent(extensions.ExtensionDescriptor): """Extension class supporting vlan transparent networks.""" @classmethod From ca63dfd0f39c7d691247c146b7529937c5804c9e Mon Sep 17 00:00:00 2001 From: Romil Gupta Date: Wed, 10 Jun 2015 09:43:56 -0700 Subject: [PATCH 167/292] Remove useless pass from methods in type_tunnel.py The pass is useless because there is a docstring in the methods. Generally considered as uncovered by coverage tool. Change-Id: Id1275c51e9adb865a3da9f0db007f3092b55b140 --- neutron/plugins/ml2/drivers/type_tunnel.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/neutron/plugins/ml2/drivers/type_tunnel.py b/neutron/plugins/ml2/drivers/type_tunnel.py index 12dce86f48f..02025cb65a3 100644 --- a/neutron/plugins/ml2/drivers/type_tunnel.py +++ b/neutron/plugins/ml2/drivers/type_tunnel.py @@ -53,7 +53,6 @@ class TunnelTypeDriver(helpers.SegmentTypeDriver): param ip: the IP address of the endpoint param host: the Host name of the endpoint """ - pass @abc.abstractmethod def get_endpoints(self): @@ -62,7 +61,6 @@ class TunnelTypeDriver(helpers.SegmentTypeDriver): :returns a list of dict [{ip_address:endpoint_ip, host:endpoint_host}, ..] """ - pass @abc.abstractmethod def get_endpoint_by_host(self, host): @@ -75,7 +73,6 @@ class TunnelTypeDriver(helpers.SegmentTypeDriver): else :returns None """ - pass @abc.abstractmethod def get_endpoint_by_ip(self, ip): @@ -88,7 +85,6 @@ class TunnelTypeDriver(helpers.SegmentTypeDriver): else :returns None """ - pass @abc.abstractmethod def delete_endpoint(self, ip): @@ -96,7 +92,6 @@ class TunnelTypeDriver(helpers.SegmentTypeDriver): param ip: the IP address of the endpoint """ - pass def _initialize(self, raw_tunnel_ranges): self.tunnel_ranges = [] From 27df3e9fb98407e94bdeb9df493a9a3a0be639ca Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Mon, 1 Jun 2015 22:29:39 +0200 Subject: [PATCH 168/292] Ensure no "agent" functional tests are skipped in the gate Some "agent" functional tests[1] can be skipped if some requirements are not satisfied in order to allow developers to run functional tests on various environments. These tests should not be skipped in the gate. This change defines the decorator no_skip_on_missing_deps[2] to ensure no "agent" functional tests are skipped in the gate. More precisely no_skip_on_missing_deps transforms a skipTest into an error in: * dsvm-functional and dsvm-fullstack jobs, * functional and fullstack jobs when OS_FAIL_ON_MISSING_DEPS is evaluated as True. The change enlarges OS_FAIL_ON_MISSING_DEPS environment variable scope (ie: missing dependencies + system requirements). [1] in neutron.tests.functional [2] in neutron.tests.common.base Change-Id: Iacd4a5ef249fc1d7c75135ead9d0cf99d8a98a06 Closes-Bug: #1459844 --- neutron/tests/common/base.py | 27 +++++++++++++++++++ .../tests/functional/agent/test_ovs_flows.py | 9 +++++-- neutron/tests/functional/base.py | 7 +++-- 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/neutron/tests/common/base.py b/neutron/tests/common/base.py index 5d1fbb3f031..11499e8050e 100644 --- a/neutron/tests/common/base.py +++ b/neutron/tests/common/base.py @@ -11,8 +11,14 @@ # under the License. # +import functools +import unittest.case + +import testtools.testcase + from neutron.common import constants as n_const from neutron.tests import base +from neutron.tests import tools def create_resource(prefix, creation_func, *args, **kwargs): @@ -40,3 +46,24 @@ def create_resource(prefix, creation_func, *args, **kwargs): return creation_func(name, *args, **kwargs) except RuntimeError: pass + + +def no_skip_on_missing_deps(wrapped): + """Do not allow a method/test to skip on missing dependencies. + + This decorator raises an error if a skip is raised by wrapped method when + OS_FAIL_ON_MISSING_DEPS is evaluated to True. This decorator should be used + only for missing dependencies (including missing system requirements). + """ + + @functools.wraps(wrapped) + def wrapper(*args, **kwargs): + try: + return wrapped(*args, **kwargs) + except (testtools.TestCase.skipException, unittest.case.SkipTest) as e: + if base.bool_from_env('OS_FAIL_ON_MISSING_DEPS'): + tools.fail( + '%s cannot be skipped because OS_FAIL_ON_MISSING_DEPS ' + 'is enabled, skip reason: %s' % (wrapped.__name__, e)) + raise + return wrapper diff --git a/neutron/tests/functional/agent/test_ovs_flows.py b/neutron/tests/functional/agent/test_ovs_flows.py index 90107d85552..0108577bb0b 100644 --- a/neutron/tests/functional/agent/test_ovs_flows.py +++ b/neutron/tests/functional/agent/test_ovs_flows.py @@ -23,6 +23,7 @@ from neutron.agent.linux import ip_lib from neutron.cmd.sanity import checks from neutron.plugins.openvswitch.agent import ovs_neutron_agent as ovsagt from neutron.plugins.openvswitch.common import constants +from neutron.tests.common import base as common_base from neutron.tests.common import net_helpers from neutron.tests.functional.agent import test_ovs_lib from neutron.tests.functional import base @@ -85,12 +86,11 @@ class _OVSAgentOFCtlTestBase(_OVSAgentTestBase): class _ARPSpoofTestCase(object): def setUp(self): - if not checks.arp_header_match_supported(): - self.skipTest("ARP header matching not supported") # NOTE(kevinbenton): it would be way cooler to use scapy for # these but scapy requires the python process to be running as # root to bind to the ports. super(_ARPSpoofTestCase, self).setUp() + self.skip_without_arp_support() self.src_addr = '192.168.0.1' self.dst_addr = '192.168.0.2' self.src_namespace = self.useFixture( @@ -104,6 +104,11 @@ class _ARPSpoofTestCase(object): # wait to add IPs until after anti-spoof rules to ensure ARP doesn't # happen before + @common_base.no_skip_on_missing_deps + def skip_without_arp_support(self): + if not checks.arp_header_match_supported(): + self.skipTest("ARP header matching not supported") + def test_arp_spoof_doesnt_block_normal_traffic(self): self._setup_arp_spoof_for_port(self.src_p.name, [self.src_addr]) self._setup_arp_spoof_for_port(self.dst_p.name, [self.dst_addr]) diff --git a/neutron/tests/functional/base.py b/neutron/tests/functional/base.py index 0907ea729bc..ea4997f6f09 100644 --- a/neutron/tests/functional/base.py +++ b/neutron/tests/functional/base.py @@ -20,6 +20,7 @@ from oslo_config import cfg from neutron.agent.common import config from neutron.agent.linux import utils from neutron.tests import base +from neutron.tests.common import base as common_base SUDO_CMD = 'sudo -n' @@ -51,9 +52,6 @@ class BaseSudoTestCase(base.BaseTestCase): if not base.bool_from_env('OS_SUDO_TESTING'): self.skipTest('Testing with sudo is not enabled') - self.fail_on_missing_deps = ( - base.bool_from_env('OS_FAIL_ON_MISSING_DEPS')) - config.register_root_helper(cfg.CONF) self.config(group='AGENT', root_helper=os.environ.get('OS_ROOTWRAP_CMD', SUDO_CMD)) @@ -61,10 +59,11 @@ class BaseSudoTestCase(base.BaseTestCase): root_helper_daemon=os.environ.get( 'OS_ROOTWRAP_DAEMON_CMD')) + @common_base.no_skip_on_missing_deps def check_command(self, cmd, error_text, skip_msg, run_as_root=False): try: utils.execute(cmd, run_as_root=run_as_root) except RuntimeError as e: - if error_text in str(e) and not self.fail_on_missing_deps: + if error_text in str(e): self.skipTest(skip_msg) raise From c34ce7c9845cc56f981e0ee8714d1f9345df5852 Mon Sep 17 00:00:00 2001 From: Saksham Varma Date: Tue, 7 Apr 2015 18:12:02 -0700 Subject: [PATCH 169/292] Moving out the cisco n1kv section to stackforge Since most of the n1kv plugin code resides in stackforge/networking-cisco repo, it's best to move the n1kv section there Change-Id: Ic1388980dea0d27dfa5e84869f1f20cc9bff78e5 Closes-Bug: #1441400 --- etc/neutron/plugins/ml2/ml2_conf_cisco.ini | 39 ---------------------- 1 file changed, 39 deletions(-) diff --git a/etc/neutron/plugins/ml2/ml2_conf_cisco.ini b/etc/neutron/plugins/ml2/ml2_conf_cisco.ini index fa4b5b0d529..699b2ec3724 100644 --- a/etc/neutron/plugins/ml2/ml2_conf_cisco.ini +++ b/etc/neutron/plugins/ml2/ml2_conf_cisco.ini @@ -207,45 +207,6 @@ # cidr_exposed=10.10.40.2/16 # gateway_ip=10.10.40.1 - -[ml2_cisco_n1kv] - -# (StrOpt) Name of the policy profile to be associated with a port when no -# policy profile is specified during port creates. -# default_policy_profile = default-pp - -# (StrOpt) Name of the VLAN network profile to be associated with a network. -# default_vlan_network_profile = default-vlan-np - -# (StrOpt) Name of the VXLAN network profile to be associated with a network. -# default_vxlan_network_profile = default-vxlan-np - -# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in -# policy profiles. -# poll_duration = 60 - -# (IntOpt) Timeout duration in seconds for the http request -# http_timeout = 15 - -# (BoolOpt) Specify whether tenants are restricted from accessing all the -# policy profiles. -# Default value: False, indicating all tenants can access all policy profiles. -# -# restrict_policy_profiles = False - -# Describe Cisco N1KV VSM connectivity -# In this section you can specify connectivity details in order for plugin -# to connect to N1KV Virtual Supervisor Module (VSM). -# -# n1kv_vsm_ips =,,.... -# username = -# password = -# -# An example would be: -# n1kv_vsm_ips = 1.1.1.1,1.1.1.2 -# username = user -# password = password - [ml2_cisco_ucsm] # Cisco UCS Manager IP address From fd85b3ead32cd988e93f1d33d219ffd52cd77a51 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Wed, 10 Jun 2015 10:20:58 +0000 Subject: [PATCH 170/292] Python3: replace 'unicode' with 'six.text_type' In Python 3, 'unicode' does not exist; 'six.text_type' should be used instead. Change-Id: I71011b4beee9817a61278eb473804cfb798de74a Blueprint: neutron-python3 --- neutron/agent/metadata/agent.py | 7 +++++-- neutron/agent/metadata/namespace_proxy.py | 7 +++++-- neutron/api/extensions.py | 3 ++- neutron/common/exceptions.py | 6 ++++-- neutron/policy.py | 2 +- neutron/tests/unit/api/v2/test_base.py | 4 +++- neutron/wsgi.py | 2 +- 7 files changed, 21 insertions(+), 10 deletions(-) diff --git a/neutron/agent/metadata/agent.py b/neutron/agent/metadata/agent.py index e2cad9c9ace..769d8039bc0 100644 --- a/neutron/agent/metadata/agent.py +++ b/neutron/agent/metadata/agent.py @@ -20,6 +20,7 @@ from neutronclient.v2_0 import client from oslo_config import cfg from oslo_log import log as logging import oslo_messaging +import six import six.moves.urllib.parse as urlparse import webob @@ -116,7 +117,8 @@ class MetadataProxyHandler(object): LOG.exception(_LE("Unexpected error.")) msg = _('An unknown error has occurred. ' 'Please try your request again.') - return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + explanation = six.text_type(msg) + return webob.exc.HTTPInternalServerError(explanation=explanation) def _get_ports_from_server(self, router_id=None, ip_address=None, networks=None): @@ -257,7 +259,8 @@ class MetadataProxyHandler(object): 'Remote metadata server experienced an internal server error.' ) LOG.warn(msg) - return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + explanation = six.text_type(msg) + return webob.exc.HTTPInternalServerError(explanation=explanation) else: raise Exception(_('Unexpected response code: %s') % resp.status) diff --git a/neutron/agent/metadata/namespace_proxy.py b/neutron/agent/metadata/namespace_proxy.py index e84a256de69..d68cb2493a5 100644 --- a/neutron/agent/metadata/namespace_proxy.py +++ b/neutron/agent/metadata/namespace_proxy.py @@ -15,6 +15,7 @@ import httplib2 from oslo_config import cfg from oslo_log import log as logging +import six import six.moves.urllib.parse as urlparse import webob @@ -56,7 +57,8 @@ class NetworkMetadataProxyHandler(object): LOG.exception(_LE("Unexpected error.")) msg = _('An unknown error has occurred. ' 'Please try your request again.') - return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + explanation = six.text_type(msg) + return webob.exc.HTTPInternalServerError(explanation=explanation) def _proxy_request(self, remote_address, method, path_info, query_string, body): @@ -103,7 +105,8 @@ class NetworkMetadataProxyHandler(object): 'Remote metadata server experienced an internal server error.' ) LOG.debug(msg) - return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) + explanation = six.text_type(msg) + return webob.exc.HTTPInternalServerError(explanation=explanation) else: raise Exception(_('Unexpected response code: %s') % resp.status) diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py index fa275bfe02f..f6b4601ba21 100644 --- a/neutron/api/extensions.py +++ b/neutron/api/extensions.py @@ -506,7 +506,8 @@ class ExtensionManager(object): LOG.debug('Ext namespace: %s', extension.get_namespace()) LOG.debug('Ext updated: %s', extension.get_updated()) except AttributeError as ex: - LOG.exception(_LE("Exception loading extension: %s"), unicode(ex)) + LOG.exception(_LE("Exception loading extension: %s"), + six.text_type(ex)) return False return True diff --git a/neutron/common/exceptions.py b/neutron/common/exceptions.py index 5d29d2afe04..c6ec6ccca54 100644 --- a/neutron/common/exceptions.py +++ b/neutron/common/exceptions.py @@ -18,6 +18,7 @@ Neutron base exception handling. """ from oslo_utils import excutils +import six class NeutronException(Exception): @@ -40,8 +41,9 @@ class NeutronException(Exception): # at least get the core message out if something happened super(NeutronException, self).__init__(self.message) - def __unicode__(self): - return unicode(self.msg) + if six.PY2: + def __unicode__(self): + return unicode(self.msg) def use_fatal_exceptions(self): return False diff --git a/neutron/policy.py b/neutron/policy.py index a2d099f6761..63a0820b3f1 100644 --- a/neutron/policy.py +++ b/neutron/policy.py @@ -313,7 +313,7 @@ class OwnerCheck(policy.Check): f) match = self.match % target if self.kind in creds: - return match == unicode(creds[self.kind]) + return match == six.text_type(creds[self.kind]) return False diff --git a/neutron/tests/unit/api/v2/test_base.py b/neutron/tests/unit/api/v2/test_base.py index ab05215e3f9..ccb4f44b14f 100644 --- a/neutron/tests/unit/api/v2/test_base.py +++ b/neutron/tests/unit/api/v2/test_base.py @@ -929,7 +929,9 @@ class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase): return_value.update(initial_input['port']) instance = self.plugin.return_value - instance.get_network.return_value = {'tenant_id': unicode(tenant_id)} + instance.get_network.return_value = { + 'tenant_id': six.text_type(tenant_id) + } instance.get_ports_count.return_value = 1 instance.create_port.return_value = return_value res = self.api.post(_get_path('ports', fmt=self.fmt), diff --git a/neutron/wsgi.py b/neutron/wsgi.py index 437e57b0984..7adba758e4b 100644 --- a/neutron/wsgi.py +++ b/neutron/wsgi.py @@ -412,7 +412,7 @@ class JSONDictSerializer(DictSerializer): def default(self, data): def sanitizer(obj): - return unicode(obj) + return six.text_type(obj) return jsonutils.dumps(data, default=sanitizer) From 9c8a19ba4032f98ecbffe53c4e731587550ded96 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Wed, 10 Jun 2015 22:08:45 +0200 Subject: [PATCH 171/292] Correct indentation in neutron.api.v2.attributes This change corrects subnetpool resource definition indentation in neutron.api.v2.attributes. Change-Id: I6738ff6b73bd0b943cec32f14ccb8946ba28d2e3 --- neutron/api/v2/attributes.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/neutron/api/v2/attributes.py b/neutron/api/v2/attributes.py index 8adecc1ed38..bfc056010fc 100644 --- a/neutron/api/v2/attributes.py +++ b/neutron/api/v2/attributes.py @@ -816,23 +816,23 @@ RESOURCE_ATTRIBUTE_MAP = { 'allow_put': False, 'is_visible': True}, 'default_prefixlen': {'allow_post': True, - 'allow_put': True, - 'validate': {'type:non_negative': None}, - 'convert_to': convert_to_int, - 'default': ATTR_NOT_SPECIFIED, - 'is_visible': True}, + 'allow_put': True, + 'validate': {'type:non_negative': None}, + 'convert_to': convert_to_int, + 'default': ATTR_NOT_SPECIFIED, + 'is_visible': True}, 'min_prefixlen': {'allow_post': True, - 'allow_put': True, - 'default': ATTR_NOT_SPECIFIED, - 'validate': {'type:non_negative': None}, - 'convert_to': convert_to_int, - 'is_visible': True}, + 'allow_put': True, + 'default': ATTR_NOT_SPECIFIED, + 'validate': {'type:non_negative': None}, + 'convert_to': convert_to_int, + 'is_visible': True}, 'max_prefixlen': {'allow_post': True, - 'allow_put': True, - 'default': ATTR_NOT_SPECIFIED, - 'validate': {'type:non_negative': None}, - 'convert_to': convert_to_int, - 'is_visible': True}, + 'allow_put': True, + 'default': ATTR_NOT_SPECIFIED, + 'validate': {'type:non_negative': None}, + 'convert_to': convert_to_int, + 'is_visible': True}, SHARED: {'allow_post': True, 'allow_put': False, 'default': False, From 7c331be77fb6a835f1fb79c674d8d6c39c7eb357 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 10 Jun 2015 16:53:25 -0700 Subject: [PATCH 172/292] Document existence of br-tun and br-int in the OVS agent Question about the use of the two bridges has come up in the past multiple times, so let's fill the gap in the developer documentation. A user-facing documentation patch will have to follow up, if we want to be very thorough. Change-Id: I6dac0f9bdaf7b3b7bff8745d4103ccc71df61a0a --- doc/source/devref/openvswitch_agent.rst | 35 +++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/doc/source/devref/openvswitch_agent.rst b/doc/source/devref/openvswitch_agent.rst index ae8660af3c5..6f3045a030b 100644 --- a/doc/source/devref/openvswitch_agent.rst +++ b/doc/source/devref/openvswitch_agent.rst @@ -36,6 +36,41 @@ More information can be found in `The VXLAN wiki page. `_ +Bridge Management +----------------- + +In order to make the agent capable of handling more than one tunneling +technology, to decouple the requirements of segmentation technology +from tenant isolation, and to preserve backward compatibility for OVS +agents working without tunneling, the agent relies on a tunneling bridge, +or br-tun, and the well known integration bridge, or br-int. + +All VM VIFs are plugged into the integration bridge. VM VIFs on a given +virtual network share a common "local" VLAN (i.e. not propagated +externally). The VLAN id of this local VLAN is mapped to the physical +networking details realizing that virtual network. + +For virtual networks realized as VXLAN/GRE tunnels, a Logical Switch +(LS) identifier is used to differentiate tenant traffic on inter-HV +tunnels. A mesh of tunnels is created to other Hypervisors in the +cloud. These tunnels originate and terminate on the tunneling bridge +of each hypervisor, leaving br-int unaffected. Port patching is done +to connect local VLANs on the integration bridge to inter-hypervisor +tunnels on the tunnel bridge. + +For each virtual network realized as a VLAN or flat network, a veth +or a pair of patch ports is used to connect the local VLAN on +the integration bridge with the physical network bridge, with flow +rules adding, modifying, or stripping VLAN tags as necessary, thus +preserving backward compatibility with the way the OVS agent used +to work prior to the tunneling capability (for more details, please +look at https://review.openstack.org/#/c/4367). + +Bear in mind, that this design decision may be overhauled in the +future to support existing VLAN-tagged traffic (coming from NFV VMs +for instance) and/or to deal with potential QinQ support natively +available in the Open vSwitch. + Further Reading --------------- From 1c29fab7cb3e586be72dd7910e2022b45c809c5f Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Thu, 4 Jun 2015 23:54:31 -0400 Subject: [PATCH 173/292] Change ensure_dir to not check directory exists first I224be69168ede8a496a5f7d59b04b722f4de7192 added an EEXIST check, so no need to check if the directory is already there, just try and create it. Change-Id: Iba51fc8263bf59326489319d0dd3f69af00a8eeb --- neutron/agent/linux/utils.py | 14 ++++++-------- neutron/tests/unit/agent/linux/test_dhcp.py | 2 -- neutron/tests/unit/agent/linux/test_utils.py | 8 ++++++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/neutron/agent/linux/utils.py b/neutron/agent/linux/utils.py index dc22a0e069b..f57a1c919e7 100644 --- a/neutron/agent/linux/utils.py +++ b/neutron/agent/linux/utils.py @@ -191,14 +191,12 @@ def find_child_pids(pid): def ensure_dir(dir_path): """Ensure a directory with 755 permissions mode.""" - if not os.path.isdir(dir_path): - try: - os.makedirs(dir_path, 0o755) - except OSError as e: - # Make sure that the error was that the directory was created - # by a different (concurrent) worker. If not, raise the error. - if e.errno != errno.EEXIST: - raise + try: + os.makedirs(dir_path, 0o755) + except OSError as e: + # If the directory already existed, don't raise the error. + if e.errno != errno.EEXIST: + raise def _get_conf_base(cfg_root, uuid, ensure_conf_dir): diff --git a/neutron/tests/unit/agent/linux/test_dhcp.py b/neutron/tests/unit/agent/linux/test_dhcp.py index 483680326a4..0e5014bd6e1 100644 --- a/neutron/tests/unit/agent/linux/test_dhcp.py +++ b/neutron/tests/unit/agent/linux/test_dhcp.py @@ -661,8 +661,6 @@ class TestBase(base.BaseTestCase): self.execute = self.execute_p.start() self.makedirs = mock.patch('os.makedirs').start() - self.isdir = mock.patch('os.path.isdir').start() - self.isdir.return_value = False self.rmtree = mock.patch('shutil.rmtree').start() self.external_process = mock.patch( diff --git a/neutron/tests/unit/agent/linux/test_utils.py b/neutron/tests/unit/agent/linux/test_utils.py index aa510f96de7..9958d0422f8 100644 --- a/neutron/tests/unit/agent/linux/test_utils.py +++ b/neutron/tests/unit/agent/linux/test_utils.py @@ -283,13 +283,17 @@ class TestBaseOSUtils(base.BaseTestCase): getgrgid.assert_called_once_with(self.EGID) @mock.patch('os.makedirs') - @mock.patch('os.path.exists', return_value=False) - def test_ensure_dir_no_fail_if_exists(self, path_exists, makedirs): + def test_ensure_dir_no_fail_if_exists(self, makedirs): error = OSError() error.errno = errno.EEXIST makedirs.side_effect = error utils.ensure_dir("/etc/create/concurrently") + @mock.patch('os.makedirs') + def test_ensure_dir_calls_makedirs(self, makedirs): + utils.ensure_dir("/etc/create/directory") + makedirs.assert_called_once_with("/etc/create/directory", 0o755) + class TestUnixDomainHttpConnection(base.BaseTestCase): def test_connect(self): From 7b51521e31f896d0095510b52644b728aaadca5a Mon Sep 17 00:00:00 2001 From: Kevin Benton Date: Wed, 10 Jun 2015 21:45:41 -0700 Subject: [PATCH 174/292] power grab The current core reviewers hierarchy didn't have a place for the parts of ML2 that weren't related to agent communication. For now we can put all of ML2 under the built-in control-plane until we decide it needs to be put somewhere else. Change-Id: Ic4924e0041c4cbb955d8fac0f96ec56406d6466e --- doc/source/policies/core-reviewers.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/policies/core-reviewers.rst b/doc/source/policies/core-reviewers.rst index 09a1c451a6d..0c690aa3ae9 100644 --- a/doc/source/policies/core-reviewers.rst +++ b/doc/source/policies/core-reviewers.rst @@ -83,7 +83,7 @@ The following are the current Neutron Lieutenants. Some notes on the above: * "Built-In Control Plane" means the L2 agents, DHCP agents, SGs, metadata - agents and the portion of ML2 which communicates with the agents. + agents and ML2. * The client includes commands installed server side. * L3 includes the L3 agent, DVR, and IPAM. * Services includes FWaaS, LBaaS, and VPNaaS. From cd56a657a19a5a756d191c614becfd3e386b3c80 Mon Sep 17 00:00:00 2001 From: OpenStack Proposal Bot Date: Thu, 11 Jun 2015 06:03:07 +0000 Subject: [PATCH 175/292] Imported Translations from Transifex For more information about this automatic import see: https://wiki.openstack.org/wiki/Translations/Infrastructure Change-Id: If91f3ac94562cc5130dd5ea5ac5d71aec64b74e3 --- .../locale/es/LC_MESSAGES/neutron-log-info.po | 7 +- neutron/locale/neutron-log-error.pot | 267 +++++----- neutron/locale/neutron-log-info.pot | 235 ++++----- neutron/locale/neutron-log-warning.pot | 119 ++--- neutron/locale/neutron.pot | 487 ++++++++---------- .../pt_BR/LC_MESSAGES/neutron-log-info.po | 11 +- .../zh_CN/LC_MESSAGES/neutron-log-info.po | 9 +- 7 files changed, 538 insertions(+), 597 deletions(-) diff --git a/neutron/locale/es/LC_MESSAGES/neutron-log-info.po b/neutron/locale/es/LC_MESSAGES/neutron-log-info.po index 862493ba88c..db80f74801c 100644 --- a/neutron/locale/es/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/es/LC_MESSAGES/neutron-log-info.po @@ -7,8 +7,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-29 06:16+0000\n" -"PO-Revision-Date: 2015-05-28 20:54+0000\n" +"POT-Creation-Date: 2015-06-11 06:02+0000\n" +"PO-Revision-Date: 2015-06-10 23:52+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Spanish (http://www.transifex.com/projects/p/neutron/language/" "es/)\n" @@ -265,6 +265,9 @@ msgstr "" "Se ha encontrado un error en validación para CIDR: %(new_cidr)s; se solapa " "con la subred %(subnet_id)s (CIDR: %(cidr)s)" +msgid "Wait called after thread killed. Cleaning up." +msgstr "Esperar llamado después de cortar la línea. Limpiando." + #, python-format msgid "Waiting on %d children to exit" msgstr "En espera de %d hijos para salir" diff --git a/neutron/locale/neutron-log-error.pot b/neutron/locale/neutron-log-error.pot index a5b1b8ea5a9..1214d575b1c 100644 --- a/neutron/locale/neutron-log-error.pot +++ b/neutron/locale/neutron-log-error.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev533\n" +"Project-Id-Version: neutron 2015.2.0.dev464\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-06-02 06:15+0000\n" +"POT-Creation-Date: 2015-06-11 06:02+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,83 +17,76 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: neutron/manager.py:133 +#: neutron/manager.py:134 msgid "Error, plugin is not set" msgstr "" -#: neutron/manager.py:144 -#, python-format -msgid "Error loading plugin by name, %s" -msgstr "" - #: neutron/manager.py:145 #, python-format +msgid "Error loading plugin by name, %s" +msgstr "" + +#: neutron/manager.py:146 +#, python-format msgid "Error loading plugin by class, %s" msgstr "" -#: neutron/policy.py:123 -#, python-format -msgid "" -"Backward compatibility unavailable for deprecated policy %s. The policy " -"will not be enforced" -msgstr "" - -#: neutron/policy.py:311 +#: neutron/policy.py:266 #, python-format msgid "Policy check error while calling %s!" msgstr "" -#: neutron/service.py:107 neutron/service.py:165 +#: neutron/service.py:106 neutron/service.py:167 msgid "Unrecoverable error: please check log for details." msgstr "" -#: neutron/service.py:148 +#: neutron/service.py:146 #, python-format msgid "'rpc_workers = %d' ignored because start_rpc_listeners is not implemented." msgstr "" -#: neutron/service.py:172 +#: neutron/service.py:174 msgid "No known API applications configured." msgstr "" -#: neutron/service.py:279 +#: neutron/service.py:281 msgid "Exception occurs when timer stops" msgstr "" -#: neutron/service.py:288 +#: neutron/service.py:290 msgid "Exception occurs when waiting for timer" msgstr "" -#: neutron/wsgi.py:152 +#: neutron/wsgi.py:159 #, python-format msgid "Unable to listen on %(host)s:%(port)s" msgstr "" -#: neutron/wsgi.py:788 +#: neutron/wsgi.py:799 #, python-format msgid "InvalidContentType: %s" msgstr "" -#: neutron/wsgi.py:792 +#: neutron/wsgi.py:803 #, python-format msgid "MalformedRequestBody: %s" msgstr "" -#: neutron/wsgi.py:801 +#: neutron/wsgi.py:812 msgid "Internal error" msgstr "" -#: neutron/agent/common/ovs_lib.py:218 neutron/agent/common/ovs_lib.py:313 +#: neutron/agent/common/ovs_lib.py:219 neutron/agent/common/ovs_lib.py:314 #, python-format msgid "Unable to execute %(cmd)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:239 +#: neutron/agent/common/ovs_lib.py:240 #, python-format msgid "Timed out retrieving ofport on port %(pname)s. Exception: %(exception)s" msgstr "" -#: neutron/agent/common/ovs_lib.py:532 +#: neutron/agent/common/ovs_lib.py:533 #, python-format msgid "OVS flows could not be applied on bridge %s" msgstr "" @@ -117,61 +110,61 @@ msgstr "" msgid "Network %s info call failed." msgstr "" -#: neutron/agent/dhcp/agent.py:582 neutron/agent/l3/agent.py:616 +#: neutron/agent/dhcp/agent.py:573 neutron/agent/l3/agent.py:627 #: neutron/agent/metadata/agent.py:311 #: neutron/plugins/hyperv/agent/l2_agent.py:94 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:108 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:787 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:295 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:109 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:812 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:300 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:129 #: neutron/services/metering/agents/metering_agent.py:283 msgid "Failed reporting state!" msgstr "" -#: neutron/agent/l3/agent.py:172 neutron/tests/unit/agent/l3/test_agent.py:2150 +#: neutron/agent/l3/agent.py:173 neutron/tests/unit/agent/l3/test_agent.py:2193 #, python-format msgid "Error importing interface driver '%s'" msgstr "" -#: neutron/agent/l3/agent.py:234 neutron/agent/linux/dhcp.py:875 +#: neutron/agent/l3/agent.py:235 neutron/agent/linux/dhcp.py:879 msgid "An interface driver must be specified" msgstr "" -#: neutron/agent/l3/agent.py:239 +#: neutron/agent/l3/agent.py:240 msgid "Router id is required if not using namespaces." msgstr "" -#: neutron/agent/l3/agent.py:246 +#: neutron/agent/l3/agent.py:247 #, python-format msgid "%s used in config as ipv6_gateway is not a valid IPv6 link-local address." msgstr "" -#: neutron/agent/l3/agent.py:328 +#: neutron/agent/l3/agent.py:329 #, python-format msgid "Error while deleting router %s" msgstr "" -#: neutron/agent/l3/agent.py:392 +#: neutron/agent/l3/agent.py:398 #, python-format msgid "The external network bridge '%s' does not exist" msgstr "" -#: neutron/agent/l3/agent.py:446 +#: neutron/agent/l3/agent.py:452 #, python-format msgid "Failed to fetch router information for '%s'" msgstr "" -#: neutron/agent/l3/agent.py:469 +#: neutron/agent/l3/agent.py:475 #, python-format msgid "Removing incompatible router '%s'" msgstr "" -#: neutron/agent/l3/agent.py:473 +#: neutron/agent/l3/agent.py:479 #, python-format msgid "Failed to process compatible router '%s'" msgstr "" -#: neutron/agent/l3/agent.py:525 +#: neutron/agent/l3/agent.py:531 msgid "Failed synchronizing routers due to RPC error" msgstr "" @@ -248,7 +241,7 @@ msgstr "" msgid "Pidfile %s already exist. Daemon already running?" msgstr "" -#: neutron/agent/linux/dhcp.py:881 +#: neutron/agent/linux/dhcp.py:885 #, python-format msgid "Error importing interface driver '%(driver)s': %(inner)s" msgstr "" @@ -276,36 +269,36 @@ msgid "" "identified by uuid %(uuid)s" msgstr "" -#: neutron/agent/linux/interface.py:158 -#, python-format -msgid "Failed deleting ingress connection state of floatingip %s" -msgstr "" - -#: neutron/agent/linux/interface.py:167 -#, python-format -msgid "Failed deleting egress connection state of floatingip %s" -msgstr "" - -#: neutron/agent/linux/interface.py:294 neutron/agent/linux/interface.py:331 -#: neutron/agent/linux/interface.py:389 neutron/agent/linux/interface.py:425 +#: neutron/agent/linux/interface.py:262 neutron/agent/linux/interface.py:299 +#: neutron/agent/linux/interface.py:357 neutron/agent/linux/interface.py:393 #, python-format msgid "Failed unplugging interface '%s'" msgstr "" -#: neutron/agent/linux/ip_lib.py:407 +#: neutron/agent/linux/ip_lib.py:237 +#, python-format +msgid "Failed deleting ingress connection state of floatingip %s" +msgstr "" + +#: neutron/agent/linux/ip_lib.py:246 +#, python-format +msgid "Failed deleting egress connection state of floatingip %s" +msgstr "" + +#: neutron/agent/linux/ip_lib.py:442 msgid "Address not present on interface" msgstr "" -#: neutron/agent/linux/ip_lib.py:412 +#: neutron/agent/linux/ip_lib.py:447 msgid "Duplicate adddress detected" msgstr "" -#: neutron/agent/linux/ip_lib.py:413 +#: neutron/agent/linux/ip_lib.py:448 #, python-format msgid "Exceeded %s second limit waiting for address to leave the tentative state." msgstr "" -#: neutron/agent/linux/ip_lib.py:718 +#: neutron/agent/linux/ip_lib.py:753 #, python-format msgid "Failed sending gratuitous ARP to %(addr)s on %(iface)s in namespace %(ns)s" msgstr "" @@ -322,11 +315,11 @@ msgstr "" msgid "Unable to parse route \"%s\"" msgstr "" -#: neutron/agent/linux/iptables_manager.py:403 +#: neutron/agent/linux/iptables_manager.py:404 msgid "Failure applying iptables rules" msgstr "" -#: neutron/agent/linux/iptables_manager.py:481 +#: neutron/agent/linux/iptables_manager.py:482 #, python-format msgid "" "IPTablesManager.apply failed to apply the following set of iptables " @@ -339,7 +332,7 @@ msgstr "" msgid "Error received from ovsdb monitor: %s" msgstr "" -#: neutron/agent/linux/utils.py:220 +#: neutron/agent/linux/utils.py:227 #, python-format msgid "Unable to convert value in %s" msgstr "" @@ -354,12 +347,12 @@ msgstr "" msgid "OVSDB Error: %s" msgstr "" -#: neutron/agent/ovsdb/impl_vsctl.py:68 +#: neutron/agent/ovsdb/impl_vsctl.py:67 #, python-format msgid "Unable to execute %(cmd)s." msgstr "" -#: neutron/agent/ovsdb/impl_vsctl.py:127 +#: neutron/agent/ovsdb/impl_vsctl.py:126 #, python-format msgid "Could not parse: %s" msgstr "" @@ -419,13 +412,13 @@ msgid "" "message %s" msgstr "" -#: neutron/api/rpc/handlers/l3_rpc.py:73 +#: neutron/api/rpc/handlers/l3_rpc.py:74 msgid "" "No plugin for L3 routing registered! Will reply to l3 agent with empty " "router dictionary." msgstr "" -#: neutron/api/v2/base.py:374 +#: neutron/api/v2/base.py:375 #, python-format msgid "Unable to undo add for %(resource)s %(id)s" msgstr "" @@ -540,19 +533,23 @@ msgstr "" msgid "Failed to schedule network %s" msgstr "" -#: neutron/db/agentschedulers_db.py:299 +#: neutron/db/agentschedulers_db.py:301 #, python-format msgid "" "Unexpected exception occurred while removing network %(net)s from agent " "%(agent)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:953 neutron/plugins/ml2/plugin.py:570 +#: neutron/db/agentschedulers_db.py:312 +msgid "Exception encountered during network rescheduling" +msgstr "" + +#: neutron/db/db_base_plugin_v2.py:359 neutron/plugins/ml2/plugin.py:566 #, python-format msgid "An exception occurred while creating the %(resource)s:%(item)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1821 +#: neutron/db/db_base_plugin_v2.py:1141 #, python-format msgid "Unable to generate mac address after %s attempts" msgstr "" @@ -567,16 +564,16 @@ msgstr "" msgid "Could not retrieve gateway port for subnet %s" msgstr "" -#: neutron/db/l3_agentschedulers_db.py:117 +#: neutron/db/l3_agentschedulers_db.py:118 #, python-format msgid "Failed to reschedule router %s" msgstr "" -#: neutron/db/l3_agentschedulers_db.py:122 +#: neutron/db/l3_agentschedulers_db.py:123 msgid "Exception encountered during router rescheduling." msgstr "" -#: neutron/db/l3_db.py:541 +#: neutron/db/l3_db.py:542 msgid "Cannot have multiple IPv4 subnets on router port" msgstr "" @@ -718,210 +715,210 @@ msgstr "" msgid "Delete floatingip failed in SDN-VE: %s" msgstr "" -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:195 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:196 #, python-format msgid "" "Interface %(interface)s for physical network %(physical_network)s does " "not exist. Agent terminated!" msgstr "" -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:255 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1658 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:256 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1688 #, python-format msgid "%s Agent terminated!" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:184 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:185 #, python-format msgid "Failed creating vxlan interface for %(segmentation_id)s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:339 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:340 #, python-format msgid "Unable to add %(interface)s to %(bridge_name)s! Exception: %(e)s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:352 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:353 #, python-format msgid "Unable to add vxlan interface for network %s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:359 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:360 #, python-format msgid "No mapping for physical network %s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:368 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:369 #, python-format msgid "Unknown network_type %(network_type)s for network %(network_id)s." msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:461 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:462 #, python-format msgid "Cannot delete bridge %s, does not exist" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:540 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:541 msgid "No valid Segmentation ID to perform UCAST test." msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:797 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:822 msgid "Unable to obtain MAC address for unique ID. Agent terminated!" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:994 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1019 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:270 #, python-format msgid "Error in agent loop. Devices info: %s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1017 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1047 #: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:40 #, python-format msgid "Parsing physical_interface_mappings failed: %s. Agent terminated!" msgstr "" -#: neutron/plugins/ml2/db.py:241 neutron/plugins/ml2/db.py:325 +#: neutron/plugins/ml2/db.py:242 neutron/plugins/ml2/db.py:326 #: neutron/plugins/ml2/plugin.py:1341 #, python-format msgid "Multiple ports have port_id starting with %s" msgstr "" -#: neutron/plugins/ml2/managers.py:59 +#: neutron/plugins/ml2/managers.py:60 #, python-format msgid "" "Type driver '%(new_driver)s' ignored because type driver '%(old_driver)s'" " is already registered for type '%(type)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:75 +#: neutron/plugins/ml2/managers.py:76 #, python-format msgid "No type driver for tenant network_type: %s. Service terminated!" msgstr "" -#: neutron/plugins/ml2/managers.py:82 +#: neutron/plugins/ml2/managers.py:83 #, python-format msgid "No type driver for external network_type: %s. Service terminated!" msgstr "" -#: neutron/plugins/ml2/managers.py:151 +#: neutron/plugins/ml2/managers.py:152 #, python-format msgid "Network %s has no segments" msgstr "" -#: neutron/plugins/ml2/managers.py:250 neutron/plugins/ml2/managers.py:277 +#: neutron/plugins/ml2/managers.py:251 neutron/plugins/ml2/managers.py:278 #, python-format msgid "Failed to release segment '%s' because network type is not supported." msgstr "" -#: neutron/plugins/ml2/managers.py:352 +#: neutron/plugins/ml2/managers.py:353 #, python-format msgid "Mechanism driver '%(name)s' failed in %(method)s" msgstr "" -#: neutron/plugins/ml2/managers.py:638 neutron/plugins/ml2/managers.py:700 +#: neutron/plugins/ml2/managers.py:639 neutron/plugins/ml2/managers.py:701 #, python-format msgid "Failed to bind port %(port)s on host %(host)s" msgstr "" -#: neutron/plugins/ml2/managers.py:653 +#: neutron/plugins/ml2/managers.py:654 #, python-format msgid "" "Exceeded maximum binding levels attempting to bind port %(port)s on host " "%(host)s" msgstr "" -#: neutron/plugins/ml2/managers.py:696 +#: neutron/plugins/ml2/managers.py:697 #, python-format msgid "Mechanism driver %s failed in bind_port" msgstr "" -#: neutron/plugins/ml2/managers.py:767 +#: neutron/plugins/ml2/managers.py:768 #, python-format msgid "Extension driver '%(name)s' failed in %(method)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:288 +#: neutron/plugins/ml2/plugin.py:284 #, python-format msgid "Failed to commit binding results for %(port)s after %(max)s tries" msgstr "" -#: neutron/plugins/ml2/plugin.py:450 +#: neutron/plugins/ml2/plugin.py:446 #, python-format msgid "Serialized vif_details DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:461 +#: neutron/plugins/ml2/plugin.py:457 #, python-format msgid "Serialized profile DB value '%(value)s' for port %(port)s is invalid" msgstr "" -#: neutron/plugins/ml2/plugin.py:547 +#: neutron/plugins/ml2/plugin.py:543 #, python-format msgid "Could not find %s to delete." msgstr "" -#: neutron/plugins/ml2/plugin.py:550 +#: neutron/plugins/ml2/plugin.py:546 #, python-format msgid "Could not delete %(res)s %(id)s." msgstr "" -#: neutron/plugins/ml2/plugin.py:583 +#: neutron/plugins/ml2/plugin.py:579 #, python-format msgid "" "mechanism_manager.create_%(res)s_postcommit failed for %(res)s: " "'%(failed_id)s'. Deleting %(res)ss %(resource_ids)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:629 +#: neutron/plugins/ml2/plugin.py:625 #, python-format msgid "mechanism_manager.create_network_postcommit failed, deleting network '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:699 +#: neutron/plugins/ml2/plugin.py:695 #, python-format msgid "Exception auto-deleting port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:712 +#: neutron/plugins/ml2/plugin.py:708 #, python-format msgid "Exception auto-deleting subnet %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:794 +#: neutron/plugins/ml2/plugin.py:790 msgid "mechanism_manager.delete_network_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:815 +#: neutron/plugins/ml2/plugin.py:811 #, python-format msgid "mechanism_manager.create_subnet_postcommit failed, deleting subnet '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:938 +#: neutron/plugins/ml2/plugin.py:934 #, python-format msgid "Exception deleting fixed_ip from port %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:947 +#: neutron/plugins/ml2/plugin.py:943 msgid "mechanism_manager.delete_subnet_postcommit failed" msgstr "" -#: neutron/plugins/ml2/plugin.py:1012 +#: neutron/plugins/ml2/plugin.py:1008 #, python-format msgid "mechanism_manager.create_port_postcommit failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1024 +#: neutron/plugins/ml2/plugin.py:1020 #, python-format msgid "_bind_port_if_needed failed, deleting port '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1055 +#: neutron/plugins/ml2/plugin.py:1051 #, python-format msgid "_bind_port_if_needed failed. Deleting all ports from create bulk '%s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:1200 +#: neutron/plugins/ml2/plugin.py:1198 #, python-format msgid "No Host supplied to bind DVR Port %s" msgstr "" @@ -936,11 +933,11 @@ msgstr "" msgid "Binding info for DVR port %s not found" msgstr "" -#: neutron/plugins/ml2/drivers/type_gre.py:81 +#: neutron/plugins/ml2/drivers/type_gre.py:82 msgid "Failed to parse tunnel_id_ranges. Service terminated!" msgstr "" -#: neutron/plugins/ml2/drivers/type_gre.py:92 +#: neutron/plugins/ml2/drivers/type_gre.py:93 #, python-format msgid "Skipping unreasonable gre ID range %(tun_min)s:%(tun_max)s" msgstr "" @@ -1098,110 +1095,110 @@ msgid "" "a different subnet %(orig_subnet)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:384 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:404 msgid "No tunnel_type specified, cannot create tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:387 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:410 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:407 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:430 #, python-format msgid "tunnel_type %s not supported by agent" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:403 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:423 msgid "No tunnel_ip specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:407 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:427 msgid "No tunnel_type specified, cannot delete tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:553 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:573 #, python-format msgid "No local VLAN available for net-id=%s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:584 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:604 #, python-format msgid "" "Cannot provision %(network_type)s network for net-id=%(net_uuid)s - " "tunneling disabled" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:592 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:612 #, python-format msgid "" "Cannot provision flat network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:602 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:622 #, python-format msgid "" "Cannot provision VLAN network for net-id=%(net_uuid)s - no bridge for " "physical_network %(physical_network)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:611 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:631 #, python-format msgid "" "Cannot provision unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:671 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:691 #, python-format msgid "" "Cannot reclaim unknown network type %(network_type)s for net-" "id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:868 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:889 msgid "" "Failed to create OVS patch port. Cannot have tunneling enabled on this " "agent, since this version of OVS does not support tunnels or patch ports." " Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:927 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:948 #, python-format msgid "" "Bridge %(bridge)s for physical network %(physical_network)s does not " "exist. Agent terminated!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1121 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1142 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1310 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1331 #, python-format msgid "" "process_network_ports - iteration:%d - failure while retrieving port " "details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1346 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1367 #, python-format msgid "" "process_ancillary_network_ports - iteration:%d - failure while retrieving" " port details from server" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1488 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1509 msgid "Error while synchronizing tunnels" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1563 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1585 msgid "Error while processing VIF ports" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1652 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1682 msgid "Agent failed to create agent config map" msgstr "" -#: neutron/plugins/sriovnicagent/eswitch_manager.py:48 +#: neutron/plugins/sriovnicagent/eswitch_manager.py:49 #, python-format msgid "Failed to get devices for %s" msgstr "" diff --git a/neutron/locale/neutron-log-info.pot b/neutron/locale/neutron-log-info.pot index 056383ecbc4..215c34c4342 100644 --- a/neutron/locale/neutron-log-info.pot +++ b/neutron/locale/neutron-log-info.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev485\n" +"Project-Id-Version: neutron 2015.2.0.dev464\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-29 06:16+0000\n" +"POT-Creation-Date: 2015-06-11 06:02+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,28 +17,21 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: neutron/manager.py:115 +#: neutron/manager.py:116 #, python-format msgid "Loading core plugin: %s" msgstr "" -#: neutron/manager.py:155 +#: neutron/manager.py:164 #, python-format msgid "Service %s is supported by the core plugin" msgstr "" -#: neutron/manager.py:173 +#: neutron/manager.py:182 #, python-format msgid "Loading Plugin: %s" msgstr "" -#: neutron/policy.py:114 -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated " -"policy:%(old_policy)s" -msgstr "" - #: neutron/quota.py:221 msgid "" "ConfDriver is used as quota_driver because the loaded plugin does not " @@ -50,27 +43,27 @@ msgstr "" msgid "Loaded quota_driver: %s." msgstr "" -#: neutron/service.py:179 +#: neutron/service.py:181 #, python-format msgid "Neutron service started, listening on %(host)s:%(port)s" msgstr "" -#: neutron/wsgi.py:781 +#: neutron/wsgi.py:792 #, python-format msgid "%(method)s %(url)s" msgstr "" -#: neutron/wsgi.py:798 +#: neutron/wsgi.py:809 #, python-format msgid "HTTP exception thrown: %s" msgstr "" -#: neutron/wsgi.py:814 +#: neutron/wsgi.py:825 #, python-format msgid "%(url)s returned with HTTP %(status)d" msgstr "" -#: neutron/wsgi.py:817 +#: neutron/wsgi.py:828 #, python-format msgid "%(url)s returned a fault: %(exception)s" msgstr "" @@ -122,12 +115,12 @@ msgstr "" msgid "No ports here to refresh firewall" msgstr "" -#: neutron/agent/common/ovs_lib.py:421 +#: neutron/agent/common/ovs_lib.py:423 #, python-format msgid "Port %(port_id)s not present in bridge %(br_name)s" msgstr "" -#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:594 +#: neutron/agent/dhcp/agent.py:93 neutron/agent/dhcp/agent.py:585 msgid "DHCP agent started" msgstr "" @@ -139,13 +132,13 @@ msgstr "" msgid "Synchronizing state complete" msgstr "" -#: neutron/agent/dhcp/agent.py:591 neutron/agent/l3/agent.py:619 +#: neutron/agent/dhcp/agent.py:582 neutron/agent/l3/agent.py:641 #: neutron/services/metering/agents/metering_agent.py:286 #, python-format msgid "agent_updated by server side %s!" msgstr "" -#: neutron/agent/l3/agent.py:550 +#: neutron/agent/l3/agent.py:563 neutron/agent/l3/agent.py:631 msgid "L3 agent started" msgstr "" @@ -166,24 +159,24 @@ msgstr "" msgid "Process runs with uid/gid: %(uid)s/%(gid)s" msgstr "" -#: neutron/agent/linux/dhcp.py:745 +#: neutron/agent/linux/dhcp.py:749 #, python-format msgid "" "Cannot apply dhcp option %(opt)s because it's ip_version %(version)d is " "not in port's address IP versions" msgstr "" -#: neutron/agent/linux/interface.py:196 +#: neutron/agent/linux/interface.py:164 #, python-format msgid "Device %s already exists" msgstr "" -#: neutron/agent/linux/iptables_firewall.py:114 +#: neutron/agent/linux/iptables_firewall.py:115 #, python-format msgid "Attempted to update port filter which is not filtered %s" msgstr "" -#: neutron/agent/linux/iptables_firewall.py:125 +#: neutron/agent/linux/iptables_firewall.py:126 #, python-format msgid "Attempted to remove port filter which is not filtered %r" msgstr "" @@ -197,7 +190,7 @@ msgstr "" msgid "Loaded extension: %s" msgstr "" -#: neutron/api/v2/base.py:93 +#: neutron/api/v2/base.py:94 msgid "Allow sorting is enabled because native pagination requires native sorting" msgstr "" @@ -221,23 +214,23 @@ msgid "OVS cleanup completed successfully" msgstr "" #: neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py:43 -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:261 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1025 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1570 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:262 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1057 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1594 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:345 msgid "Agent initialized successfully, now running... " msgstr "" -#: neutron/common/config.py:204 +#: neutron/common/config.py:207 msgid "Logging enabled!" msgstr "" -#: neutron/common/config.py:205 +#: neutron/common/config.py:208 #, python-format msgid "%(prog)s version %(version)s" msgstr "" -#: neutron/common/config.py:224 +#: neutron/common/config.py:235 #, python-format msgid "Config paste file: %s" msgstr "" @@ -269,51 +262,51 @@ msgstr "" msgid "Adding network %(net)s to agent %(agent)s on host %(host)s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:642 +#: neutron/db/db_base_plugin_v2.py:201 #, python-format msgid "" "Validation for CIDR: %(new_cidr)s failed - overlaps with subnet " "%(subnet_id)s (CIDR: %(cidr)s)" msgstr "" -#: neutron/db/db_base_plugin_v2.py:679 -#, python-format -msgid "Found invalid IP address in pool: %(start)s - %(end)s:" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:686 -msgid "Specified IP addresses do not match the subnet IP version" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:690 -#, python-format -msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:695 -#, python-format -msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:719 -#, python-format -msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:1639 neutron/plugins/ml2/plugin.py:895 +#: neutron/db/db_base_plugin_v2.py:959 neutron/plugins/ml2/plugin.py:891 #, python-format msgid "" "Found port (%(port_id)s, %(ip)s) having IP allocation on subnet " "%(subnet)s, cannot delete" msgstr "" -#: neutron/db/l3_agentschedulers_db.py:78 +#: neutron/db/ipam_backend_mixin.py:103 +#, python-format +msgid "Found invalid IP address in pool: %(start)s - %(end)s:" +msgstr "" + +#: neutron/db/ipam_backend_mixin.py:110 +msgid "Specified IP addresses do not match the subnet IP version" +msgstr "" + +#: neutron/db/ipam_backend_mixin.py:114 +#, python-format +msgid "Start IP (%(start)s) is greater than end IP (%(end)s)" +msgstr "" + +#: neutron/db/ipam_backend_mixin.py:119 +#, python-format +msgid "Found pool larger than subnet CIDR:%(start)s - %(end)s" +msgstr "" + +#: neutron/db/ipam_backend_mixin.py:143 +#, python-format +msgid "Found overlapping ranges: %(l_range)s and %(r_range)s" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:79 msgid "" "Skipping period L3 agent status check because automatic router " "rescheduling is disabled." msgstr "" -#: neutron/db/l3_db.py:1160 +#: neutron/db/l3_db.py:1161 #, python-format msgid "Skipping port %s as no IP is configure on it" msgstr "" @@ -536,204 +529,208 @@ msgstr "" msgid "Set the controller to a new controller: %s" msgstr "" -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:189 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:190 #, python-format msgid "Mapping physical network %(physical_network)s to interface %(interface)s" msgstr "" -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:220 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:221 #, python-format msgid "Loop iteration exceeded interval (%(polling_interval)s vs. %(elapsed)s)!" msgstr "" -#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:234 +#: neutron/plugins/ibm/agent/sdnve_neutron_agent.py:235 #, python-format msgid "Controller IPs: %s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:801 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:796 +msgid "Stopping linuxbridge agent." +msgstr "" + +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:826 #: neutron/plugins/oneconvergence/agent/nvsd_neutron_agent.py:89 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:99 #, python-format msgid "RPC agent_id: %s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:871 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1187 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:896 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1210 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:209 #, python-format msgid "Port %(device)s updated. Details: %(details)s" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:904 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:929 #, python-format msgid "Device %s not defined on plugin" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:911 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1234 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1251 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:936 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1257 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1274 #, python-format msgid "Attachment %s removed" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:923 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1263 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:948 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1286 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:235 #, python-format msgid "Port %s updated." msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:976 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1001 msgid "LinuxBridge Agent RPC Daemon Started!" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:986 -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1454 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1011 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1477 #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:251 msgid "Agent out of sync with plugin!" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1020 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:1050 #: neutron/plugins/ml2/drivers/mlnx/agent/eswitch_neutron_agent.py:43 #, python-format msgid "Interface mappings: %s" msgstr "" -#: neutron/plugins/ml2/db.py:59 +#: neutron/plugins/ml2/db.py:60 #, python-format msgid "Added segment %(id)s of type %(network_type)s for network %(network_id)s" msgstr "" -#: neutron/plugins/ml2/managers.py:45 +#: neutron/plugins/ml2/managers.py:46 #, python-format msgid "Configured type driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:50 +#: neutron/plugins/ml2/managers.py:51 #, python-format msgid "Loaded type driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:67 +#: neutron/plugins/ml2/managers.py:68 #, python-format msgid "Registered types: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:78 +#: neutron/plugins/ml2/managers.py:79 #, python-format msgid "Tenant network_types: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:168 +#: neutron/plugins/ml2/managers.py:169 #, python-format msgid "Initializing driver for type '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:293 +#: neutron/plugins/ml2/managers.py:294 #, python-format msgid "Configured mechanism driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:299 +#: neutron/plugins/ml2/managers.py:300 #, python-format msgid "Loaded mechanism driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:311 +#: neutron/plugins/ml2/managers.py:312 #, python-format msgid "Registered mechanism drivers: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:316 +#: neutron/plugins/ml2/managers.py:317 #, python-format msgid "Initializing mechanism driver '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:725 +#: neutron/plugins/ml2/managers.py:726 #, python-format msgid "Configured extension driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:731 +#: neutron/plugins/ml2/managers.py:732 #, python-format msgid "Loaded extension driver names: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:742 +#: neutron/plugins/ml2/managers.py:743 #, python-format msgid "Registered extension drivers: %s" msgstr "" -#: neutron/plugins/ml2/managers.py:748 +#: neutron/plugins/ml2/managers.py:749 #, python-format msgid "Initializing extension driver '%s'" msgstr "" -#: neutron/plugins/ml2/managers.py:756 +#: neutron/plugins/ml2/managers.py:757 #, python-format msgid "Got %(alias)s extension from driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:805 +#: neutron/plugins/ml2/managers.py:806 #, python-format msgid "Extended network dict for driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:812 +#: neutron/plugins/ml2/managers.py:813 #, python-format msgid "Extended subnet dict for driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/managers.py:819 +#: neutron/plugins/ml2/managers.py:820 #, python-format msgid "Extended port dict for driver '%(drv)s'" msgstr "" -#: neutron/plugins/ml2/plugin.py:143 +#: neutron/plugins/ml2/plugin.py:139 msgid "Modular L2 Plugin initialization complete" msgstr "" -#: neutron/plugins/ml2/plugin.py:294 +#: neutron/plugins/ml2/plugin.py:290 #, python-format msgid "Attempt %(count)s to bind port %(port)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:696 +#: neutron/plugins/ml2/plugin.py:692 #, python-format msgid "Port %s was deleted concurrently" msgstr "" -#: neutron/plugins/ml2/plugin.py:708 +#: neutron/plugins/ml2/plugin.py:704 #, python-format msgid "Subnet %s was deleted concurrently" msgstr "" -#: neutron/plugins/ml2/plugin.py:1366 +#: neutron/plugins/ml2/plugin.py:1367 #, python-format msgid "" "Binding info for port %s was not found, it might have been deleted " "already." msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:72 +#: neutron/plugins/ml2/drivers/type_flat.py:73 msgid "Arbitrary flat physical_network names allowed" msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:78 +#: neutron/plugins/ml2/drivers/type_flat.py:79 #, python-format msgid "Allowable flat physical_network names: %s" msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:85 +#: neutron/plugins/ml2/drivers/type_flat.py:86 msgid "ML2 FlatTypeDriver initialization complete" msgstr "" -#: neutron/plugins/ml2/drivers/type_local.py:37 +#: neutron/plugins/ml2/drivers/type_local.py:38 msgid "ML2 LocalTypeDriver initialization complete" msgstr "" -#: neutron/plugins/ml2/drivers/type_tunnel.py:116 +#: neutron/plugins/ml2/drivers/type_tunnel.py:113 #, python-format msgid "%(type)s ID ranges: %(range)s" msgstr "" @@ -816,64 +813,72 @@ msgstr "" msgid "L2 Agent operating in DVR Mode with MAC %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:560 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:582 #, python-format msgid "Assigning %(vlan_id)s as local vlan for net-id=%(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:624 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:646 #, python-format msgid "Reclaiming vlan = %(vlan_id)s from net-id = %(net_uuid)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:743 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:765 #, python-format msgid "Configuration for device %s completed." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:750 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:772 #, python-format msgid "" "Skipping ARP spoofing rules for port '%s' because it has port security " "disabled" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:778 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:800 #, python-format msgid "port_unbound(): net_uuid %s not in local_vlan_map" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:843 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:866 #, python-format msgid "Adding %s to list of bridges." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:919 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:942 #, python-format msgid "Mapping physical network %(physical_network)s to bridge %(bridge)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1067 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1090 #, python-format msgid "Port '%(port_name)s' has lost its vlan tag '%(vlan_tag)d'!" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1181 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1204 #, python-format msgid "" "Port %s was not found on the integration bridge and will therefore not be" " processed" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1222 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1245 #, python-format msgid "Ancillary Port %s added" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1482 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1505 msgid "Agent tunnel out of sync with plugin!" msgstr "" +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1604 +msgid "Agent caught SIGTERM, quitting daemon loop." +msgstr "" + +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1610 +msgid "Agent caught SIGHUP, resetting." +msgstr "" + #: neutron/plugins/sriovnicagent/sriov_nic_agent.py:191 #, python-format msgid "No device with MAC %s defined on agent." diff --git a/neutron/locale/neutron-log-warning.pot b/neutron/locale/neutron-log-warning.pot index 0d10ecfd440..6fc2e6acec3 100644 --- a/neutron/locale/neutron-log-warning.pot +++ b/neutron/locale/neutron-log-warning.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev422\n" +"Project-Id-Version: neutron 2015.2.0.dev464\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-25 06:15+0000\n" +"POT-Creation-Date: 2015-06-11 06:02+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,14 +17,7 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: neutron/policy.py:102 -#, python-format -msgid "" -"Found deprecated policy rule:%s. Please consider upgrading your policy " -"configuration file" -msgstr "" - -#: neutron/policy.py:160 +#: neutron/policy.py:115 #, python-format msgid "Unable to find data type descriptor for attribute %s" msgstr "" @@ -66,17 +59,17 @@ msgid "" "falling back to old security_group_rules_for_devices which scales worse." msgstr "" -#: neutron/agent/common/ovs_lib.py:361 +#: neutron/agent/common/ovs_lib.py:368 #, python-format msgid "Found not yet ready openvswitch port: %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:364 +#: neutron/agent/common/ovs_lib.py:371 #, python-format msgid "Found failed openvswitch port: %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:410 +#: neutron/agent/common/ovs_lib.py:417 #, python-format msgid "ofport: %(ofport)s for VIF: %(vif)s is not a positive integer" msgstr "" @@ -108,7 +101,7 @@ msgid "" "port %(port_id)s, for router %(router_id)s will be considered" msgstr "" -#: neutron/agent/dhcp/agent.py:576 neutron/agent/l3/agent.py:609 +#: neutron/agent/dhcp/agent.py:567 neutron/agent/l3/agent.py:622 #: neutron/agent/metadata/agent.py:306 #: neutron/services/metering/agents/metering_agent.py:278 msgid "" @@ -116,7 +109,7 @@ msgid "" " will be disabled." msgstr "" -#: neutron/agent/l3/agent.py:192 +#: neutron/agent/l3/agent.py:193 #, python-format msgid "" "l3-agent cannot check service plugins enabled at the neutron server when " @@ -125,29 +118,29 @@ msgid "" "warning. Detail message: %s" msgstr "" -#: neutron/agent/l3/agent.py:204 +#: neutron/agent/l3/agent.py:205 #, python-format msgid "" "l3-agent cannot check service plugins enabled on the neutron server. " "Retrying. Detail message: %s" msgstr "" -#: neutron/agent/l3/agent.py:334 +#: neutron/agent/l3/agent.py:337 #, python-format msgid "Info for router %s was not found. Skipping router removal" msgstr "" -#: neutron/agent/l3/router_info.py:206 +#: neutron/agent/l3/router_info.py:208 #, python-format msgid "Unable to configure IP address for floating IP: %s" msgstr "" -#: neutron/agent/linux/dhcp.py:227 +#: neutron/agent/linux/dhcp.py:228 #, python-format msgid "Failed trying to delete interface: %s" msgstr "" -#: neutron/agent/linux/dhcp.py:235 +#: neutron/agent/linux/dhcp.py:236 #, python-format msgid "Failed trying to delete namespace: %s" msgstr "" @@ -158,14 +151,14 @@ msgid "Attempted to remove chain %s which does not exist" msgstr "" #: neutron/agent/linux/ebtables_manager.py:237 -#: neutron/agent/linux/iptables_manager.py:247 +#: neutron/agent/linux/iptables_manager.py:249 #, python-format msgid "" "Tried to remove rule that was not there: %(chain)r %(rule)r %(wrap)r " "%(top)r" msgstr "" -#: neutron/agent/linux/iptables_manager.py:696 +#: neutron/agent/linux/iptables_manager.py:702 #, python-format msgid "Attempted to get traffic counters of chain %s which does not exist" msgstr "" @@ -180,7 +173,7 @@ msgid "" "usually occurs when shared secrets do not match." msgstr "" -#: neutron/api/api_common.py:102 +#: neutron/api/api_common.py:103 #, python-format msgid "" "Invalid value for pagination_max_limit: %s. It should be an integer " @@ -222,20 +215,19 @@ msgid "" "inactive agents." msgstr "" -#: neutron/api/rpc/handlers/dhcp_rpc.py:94 +#: neutron/api/rpc/handlers/dhcp_rpc.py:100 #, python-format msgid "" "Action %(action)s for network %(net_id)s could not complete successfully:" " %(reason)s" msgstr "" -#: neutron/api/rpc/handlers/dhcp_rpc.py:146 -#: neutron/api/rpc/handlers/dhcp_rpc.py:210 +#: neutron/api/rpc/handlers/dhcp_rpc.py:152 #, python-format msgid "Network %s could not be found, it might have been deleted concurrently." msgstr "" -#: neutron/api/rpc/handlers/dhcp_rpc.py:279 +#: neutron/api/rpc/handlers/dhcp_rpc.py:203 #, python-format msgid "Updating lease expiration is now deprecated. Issued from host %s." msgstr "" @@ -271,14 +263,14 @@ msgid "" "in case there was a clock adjustment." msgstr "" -#: neutron/db/agentschedulers_db.py:274 +#: neutron/db/agentschedulers_db.py:275 #, python-format msgid "" "Removing network %(network)s from agent %(agent)s because the agent did " "not report to the server in the last %(dead_time)s seconds." msgstr "" -#: neutron/db/l3_agentschedulers_db.py:104 +#: neutron/db/l3_agentschedulers_db.py:105 #, python-format msgid "" "Rescheduling router %(router)s from agent %(agent)s because the agent did" @@ -289,7 +281,7 @@ msgstr "" msgid "No active L3 agents found for SNAT" msgstr "" -#: neutron/db/securitygroups_rpc_base.py:372 +#: neutron/db/securitygroups_rpc_base.py:383 #, python-format msgid "No valid gateway port on subnet %s is found for IPv6 RA" msgstr "" @@ -363,36 +355,36 @@ msgstr "" msgid "Ignoring admin_state_up=False for router=%r. Overriding with True" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:84 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:85 msgid "VXLAN is enabled, a valid local_ip must be provided" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:98 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:99 msgid "Invalid Network ID, will lead to incorrect bridge name" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:105 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:106 msgid "Invalid VLAN ID, will lead to incorrect subinterface name" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:112 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:113 msgid "Invalid Interface ID, will lead to incorrect tap device name" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:121 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:122 #, python-format msgid "Invalid Segmentation ID: %s, will lead to incorrect vxlan device name" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:526 -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:562 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:527 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:563 #, python-format msgid "" "Option \"%(option)s\" must be supported by command \"%(command)s\" to " "enable %(mode)s mode" msgstr "" -#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:556 +#: neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py:557 msgid "" "VXLAN muticast group must be provided in vxlan_group option to enable " "VXLAN MCAST mode" @@ -403,23 +395,23 @@ msgstr "" msgid "Could not expand segment %s" msgstr "" -#: neutron/plugins/ml2/plugin.py:532 +#: neutron/plugins/ml2/plugin.py:527 #, python-format msgid "" "In _notify_port_updated(), no bound segment for port %(port_id)s on " "network %(network_id)s" msgstr "" -#: neutron/plugins/ml2/plugin.py:783 +#: neutron/plugins/ml2/plugin.py:778 msgid "A concurrent port creation has occurred" msgstr "" -#: neutron/plugins/ml2/plugin.py:1391 +#: neutron/plugins/ml2/plugin.py:1396 #, python-format msgid "Port %(port)s updated up by agent not found" msgstr "" -#: neutron/plugins/ml2/plugin.py:1423 +#: neutron/plugins/ml2/plugin.py:1428 #, python-format msgid "Port %s not found during update" msgstr "" @@ -442,35 +434,30 @@ msgstr "" msgid "Attempting to bind with dead agent: %s" msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:134 +#: neutron/plugins/ml2/drivers/type_flat.py:135 #, python-format msgid "No flat network found on physical network %s" msgstr "" -#: neutron/plugins/ml2/drivers/type_gre.py:106 +#: neutron/plugins/ml2/drivers/type_gre.py:107 msgid "Gre allocations were already created." msgstr "" -#: neutron/plugins/ml2/drivers/type_gre.py:160 -#, python-format -msgid "Gre endpoint with ip %s already exists" -msgstr "" - -#: neutron/plugins/ml2/drivers/type_tunnel.py:182 +#: neutron/plugins/ml2/drivers/type_tunnel.py:179 #, python-format msgid "%(type)s tunnel %(id)s not found" msgstr "" +#: neutron/plugins/ml2/drivers/type_tunnel.py:236 +#, python-format +msgid "Endpoint with ip %s already exists" +msgstr "" + #: neutron/plugins/ml2/drivers/type_vlan.py:257 #, python-format msgid "No vlan_id %(vlan_id)s found on physical network %(physical_network)s" msgstr "" -#: neutron/plugins/ml2/drivers/type_vxlan.py:168 -#, python-format -msgid "Vxlan endpoint with ip %s already exists" -msgstr "" - #: neutron/plugins/ml2/drivers/cisco/apic/apic_sync.py:67 #, python-format msgid "Create network postcommit failed for network %s" @@ -529,57 +516,57 @@ msgid "" " %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:189 +#: neutron/plugins/openvswitch/agent/ovs_dvr_neutron_agent.py:190 #, python-format msgid "" "L2 agent could not get DVR MAC address from server. Retrying. Detailed " "message: %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:516 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:525 #, python-format msgid "Action %s not supported" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1010 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:920 #, python-format msgid "" "Creating an interface named %(name)s exceeds the %(limit)d character " "limitation. It was shortened to %(new_name)s to fit." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1211 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1120 #, python-format msgid "VIF port: %s has no ofport configured, and might not be able to transmit" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1323 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1228 #, python-format msgid "Device %s not defined on plugin" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1483 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1388 #, python-format msgid "Invalid remote IP: %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1526 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1431 msgid "OVS is restarted. OVSNeutronAgent will reset bridges and recover ports." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1530 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1434 msgid "" "OVS is dead. OVSNeutronAgent will keep running and checking OVS status " "periodically." msgstr "" -#: neutron/plugins/sriovnicagent/eswitch_manager.py:147 -#: neutron/plugins/sriovnicagent/eswitch_manager.py:160 +#: neutron/plugins/sriovnicagent/eswitch_manager.py:148 +#: neutron/plugins/sriovnicagent/eswitch_manager.py:161 #, python-format msgid "Cannot find vf index for pci slot %s" msgstr "" -#: neutron/plugins/sriovnicagent/eswitch_manager.py:283 +#: neutron/plugins/sriovnicagent/eswitch_manager.py:284 #, python-format msgid "device pci mismatch: %(device_mac)s - %(pci_slot)s" msgstr "" diff --git a/neutron/locale/neutron.pot b/neutron/locale/neutron.pot index abd550bbcf3..07a84488698 100644 --- a/neutron/locale/neutron.pot +++ b/neutron/locale/neutron.pot @@ -6,9 +6,9 @@ #, fuzzy msgid "" msgstr "" -"Project-Id-Version: neutron 2015.2.0.dev533\n" +"Project-Id-Version: neutron 2015.2.0.dev464\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-06-02 06:15+0000\n" +"POT-Creation-Date: 2015-06-11 06:02+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,42 +17,42 @@ msgstr "" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 1.3\n" -#: neutron/context.py:102 +#: neutron/context.py:97 #, python-format msgid "read_deleted can only be one of 'no', 'yes' or 'only', not %r" msgstr "" -#: neutron/manager.py:74 +#: neutron/manager.py:75 #, python-format msgid "dhcp_agents_per_network must be >= 1. '%s' is invalid." msgstr "" -#: neutron/manager.py:86 +#: neutron/manager.py:87 msgid "Neutron core_plugin not configured!" msgstr "" -#: neutron/manager.py:134 neutron/manager.py:146 +#: neutron/manager.py:135 neutron/manager.py:147 msgid "Plugin not found." msgstr "" -#: neutron/manager.py:189 +#: neutron/manager.py:190 #, python-format msgid "Multiple plugins for service %s were configured" msgstr "" -#: neutron/policy.py:246 +#: neutron/policy.py:201 #, python-format msgid "" "Unable to identify a target field from:%s. Match should be in the form " "%%()s" msgstr "" -#: neutron/policy.py:276 +#: neutron/policy.py:231 #, python-format msgid "Unable to find resource name in %s" msgstr "" -#: neutron/policy.py:285 +#: neutron/policy.py:240 #, python-format msgid "" "Unable to verify match:%(match)s as the parent resource: %(res)s was not " @@ -91,59 +91,59 @@ msgstr "" msgid "Access to this resource was denied." msgstr "" -#: neutron/service.py:42 +#: neutron/service.py:41 msgid "Seconds between running periodic tasks" msgstr "" -#: neutron/service.py:45 +#: neutron/service.py:44 msgid "Number of separate API worker processes for service" msgstr "" -#: neutron/service.py:48 +#: neutron/service.py:47 msgid "Number of RPC worker processes for service" msgstr "" -#: neutron/service.py:51 +#: neutron/service.py:50 msgid "" "Range of seconds to randomly delay when starting the periodic task " "scheduler to reduce stampeding. (Disable by setting to 0)" msgstr "" -#: neutron/wsgi.py:49 +#: neutron/wsgi.py:51 msgid "Number of backlog requests to configure the socket with" msgstr "" -#: neutron/wsgi.py:53 +#: neutron/wsgi.py:55 msgid "" "Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not " "supported on OS X." msgstr "" -#: neutron/wsgi.py:57 +#: neutron/wsgi.py:59 msgid "Number of seconds to keep retrying to listen" msgstr "" -#: neutron/wsgi.py:60 +#: neutron/wsgi.py:62 msgid "Max header line to accommodate large tokens" msgstr "" -#: neutron/wsgi.py:63 +#: neutron/wsgi.py:65 msgid "Enable SSL on the API server" msgstr "" -#: neutron/wsgi.py:65 +#: neutron/wsgi.py:67 msgid "CA certificate file to use to verify connecting clients" msgstr "" -#: neutron/wsgi.py:68 +#: neutron/wsgi.py:70 msgid "Certificate file to use when starting the server securely" msgstr "" -#: neutron/wsgi.py:71 +#: neutron/wsgi.py:73 msgid "Private key file to use when starting the server securely" msgstr "" -#: neutron/wsgi.py:75 +#: neutron/wsgi.py:77 msgid "" "Determines if connections are allowed to be held open by clients after a " "request is fulfilled. A value of False will ensure that the socket " @@ -151,62 +151,62 @@ msgid "" " client." msgstr "" -#: neutron/wsgi.py:81 +#: neutron/wsgi.py:83 msgid "" "Timeout for client connections socket operations. If an incoming " "connection is idle for this number of seconds it will be closed. A value " "of '0' means wait forever." msgstr "" -#: neutron/wsgi.py:169 +#: neutron/wsgi.py:176 #, python-format msgid "Could not bind to %(host)s:%(port)s after trying for %(time)d seconds" msgstr "" -#: neutron/wsgi.py:189 +#: neutron/wsgi.py:196 #, python-format msgid "Unable to find ssl_cert_file : %s" msgstr "" -#: neutron/wsgi.py:195 +#: neutron/wsgi.py:202 #, python-format msgid "Unable to find ssl_key_file : %s" msgstr "" -#: neutron/wsgi.py:200 +#: neutron/wsgi.py:207 #, python-format msgid "Unable to find ssl_ca_file : %s" msgstr "" -#: neutron/wsgi.py:484 +#: neutron/wsgi.py:495 msgid "Cannot understand JSON" msgstr "" -#: neutron/wsgi.py:650 +#: neutron/wsgi.py:661 msgid "You must implement __call__" msgstr "" -#: neutron/wsgi.py:738 neutron/api/v2/base.py:195 neutron/api/v2/base.py:343 -#: neutron/api/v2/base.py:486 neutron/api/v2/base.py:546 +#: neutron/wsgi.py:749 neutron/api/v2/base.py:196 neutron/api/v2/base.py:344 +#: neutron/api/v2/base.py:487 neutron/api/v2/base.py:547 #: neutron/extensions/l3agentscheduler.py:51 #: neutron/extensions/l3agentscheduler.py:94 msgid "The resource could not be found." msgstr "" -#: neutron/wsgi.py:787 +#: neutron/wsgi.py:798 msgid "Unsupported Content-Type" msgstr "" -#: neutron/wsgi.py:791 +#: neutron/wsgi.py:802 msgid "Malformed request body" msgstr "" -#: neutron/wsgi.py:928 +#: neutron/wsgi.py:939 #, python-format msgid "The requested content type %s is invalid." msgstr "" -#: neutron/wsgi.py:981 +#: neutron/wsgi.py:992 msgid "Could not deserialize data" msgstr "" @@ -275,20 +275,20 @@ msgstr "" msgid "Timeout in seconds for ovs-vsctl commands" msgstr "" -#: neutron/agent/common/ovs_lib.py:440 +#: neutron/agent/common/ovs_lib.py:441 #, python-format msgid "Unable to determine mac address for %s" msgstr "" -#: neutron/agent/common/ovs_lib.py:548 +#: neutron/agent/common/ovs_lib.py:549 msgid "Cannot match priority on flow deletion or modification" msgstr "" -#: neutron/agent/common/ovs_lib.py:553 +#: neutron/agent/common/ovs_lib.py:554 msgid "Must specify one or more actions on flow addition or modification" msgstr "" -#: neutron/agent/dhcp/agent.py:589 +#: neutron/agent/dhcp/agent.py:580 #, python-format msgid "Agent updated: %(payload)s" msgstr "" @@ -343,7 +343,7 @@ msgstr "" msgid "Use broadcast in DHCP replies" msgstr "" -#: neutron/agent/l3/agent.py:278 +#: neutron/agent/l3/agent.py:279 msgid "" "The 'gateway_external_network_id' option must be configured for this " "agent as Neutron has more than one external network." @@ -537,7 +537,7 @@ msgid "Location of temporary ebtables table files." msgstr "" #: neutron/agent/linux/ebtables_manager.py:210 -#: neutron/agent/linux/iptables_manager.py:211 +#: neutron/agent/linux/iptables_manager.py:212 #, python-format msgid "Unknown chain: %r" msgstr "" @@ -586,7 +586,7 @@ msgid "Authentication URL" msgstr "" #: neutron/agent/linux/interface.py:60 neutron/agent/metadata/config.py:63 -#: neutron/common/config.py:49 neutron/plugins/metaplugin/common/config.py:72 +#: neutron/common/config.py:50 neutron/plugins/metaplugin/common/config.py:72 msgid "The type of authentication to use" msgstr "" @@ -641,7 +641,7 @@ msgstr "" msgid "Location to store IPv6 RA config files" msgstr "" -#: neutron/agent/linux/utils.py:119 +#: neutron/agent/linux/utils.py:120 msgid "" "\n" "Command: {cmd}\n" @@ -796,7 +796,7 @@ msgid "" " log file." msgstr "" -#: neutron/agent/ovsdb/api.py:30 +#: neutron/agent/ovsdb/api.py:32 msgid "The interface for interacting with the OVSDB" msgstr "" @@ -833,33 +833,33 @@ msgid "" "Stderr: %(stderr)s" msgstr "" -#: neutron/api/api_common.py:116 +#: neutron/api/api_common.py:117 #, python-format msgid "Limit must be an integer 0 or greater and not '%d'" msgstr "" -#: neutron/api/api_common.py:133 +#: neutron/api/api_common.py:134 msgid "The number of sort_keys and sort_dirs must be same" msgstr "" -#: neutron/api/api_common.py:138 +#: neutron/api/api_common.py:139 #, python-format msgid "%s is invalid attribute for sort_keys" msgstr "" -#: neutron/api/api_common.py:142 +#: neutron/api/api_common.py:143 #, python-format msgid "" "%(invalid_dirs)s is invalid value for sort_dirs, valid value is '%(asc)s'" " and '%(desc)s'" msgstr "" -#: neutron/api/api_common.py:316 neutron/api/v2/base.py:617 +#: neutron/api/api_common.py:317 neutron/api/v2/base.py:618 #, python-format msgid "Unable to find '%s' in request body" msgstr "" -#: neutron/api/api_common.py:323 +#: neutron/api/api_common.py:324 #, python-format msgid "Failed to parse request. Parameter '%s' not specified" msgstr "" @@ -877,7 +877,7 @@ msgstr "" msgid "Unknown API version specified" msgstr "" -#: neutron/api/rpc/handlers/dhcp_rpc.py:77 +#: neutron/api/rpc/handlers/dhcp_rpc.py:83 msgid "Unrecognized action" msgstr "" @@ -1055,68 +1055,68 @@ msgstr "" msgid "'%s' is not of the form =[value]" msgstr "" -#: neutron/api/v2/base.py:90 +#: neutron/api/v2/base.py:91 msgid "Native pagination depend on native sorting" msgstr "" -#: neutron/api/v2/base.py:507 +#: neutron/api/v2/base.py:508 #, python-format msgid "Invalid format: %s" msgstr "" -#: neutron/api/v2/base.py:569 +#: neutron/api/v2/base.py:570 msgid "" "Specifying 'tenant_id' other than authenticated tenant in request " "requires admin privileges" msgstr "" -#: neutron/api/v2/base.py:577 +#: neutron/api/v2/base.py:578 msgid "Running without keystone AuthN requires that tenant_id is specified" msgstr "" -#: neutron/api/v2/base.py:595 +#: neutron/api/v2/base.py:596 msgid "Resource body required" msgstr "" -#: neutron/api/v2/base.py:601 +#: neutron/api/v2/base.py:602 msgid "Bulk operation not supported" msgstr "" -#: neutron/api/v2/base.py:604 +#: neutron/api/v2/base.py:605 msgid "Resources required" msgstr "" -#: neutron/api/v2/base.py:614 +#: neutron/api/v2/base.py:615 msgid "Body contains invalid data" msgstr "" -#: neutron/api/v2/base.py:628 +#: neutron/api/v2/base.py:629 #, python-format msgid "Failed to parse request. Required attribute '%s' not specified" msgstr "" -#: neutron/api/v2/base.py:635 +#: neutron/api/v2/base.py:636 #, python-format msgid "Attribute '%s' not allowed in POST" msgstr "" -#: neutron/api/v2/base.py:640 +#: neutron/api/v2/base.py:641 #, python-format msgid "Cannot update read-only attribute %s" msgstr "" -#: neutron/api/v2/base.py:658 +#: neutron/api/v2/base.py:659 #, python-format msgid "Invalid input for %(attr)s. Reason: %(reason)s." msgstr "" -#: neutron/api/v2/base.py:667 neutron/extensions/allowedaddresspairs.py:75 +#: neutron/api/v2/base.py:668 neutron/extensions/allowedaddresspairs.py:75 #: neutron/extensions/multiprovidernet.py:45 #, python-format msgid "Unrecognized attribute(s) '%s'" msgstr "" -#: neutron/api/v2/base.py:686 +#: neutron/api/v2/base.py:687 #, python-format msgid "Tenant %(tenant_id)s not allowed to create %(resource)s on this network" msgstr "" @@ -1185,191 +1185,191 @@ msgstr "" msgid "Check ebtables installation" msgstr "" -#: neutron/common/config.py:41 +#: neutron/common/config.py:42 msgid "The host IP to bind to" msgstr "" -#: neutron/common/config.py:43 +#: neutron/common/config.py:44 msgid "The port to bind to" msgstr "" -#: neutron/common/config.py:45 +#: neutron/common/config.py:46 msgid "The API paste config file to use" msgstr "" -#: neutron/common/config.py:47 +#: neutron/common/config.py:48 msgid "The path for API extensions" msgstr "" -#: neutron/common/config.py:51 +#: neutron/common/config.py:52 msgid "The core plugin Neutron will use" msgstr "" -#: neutron/common/config.py:53 neutron/db/migration/cli.py:40 +#: neutron/common/config.py:54 neutron/db/migration/cli.py:40 msgid "The service plugins Neutron will use" msgstr "" -#: neutron/common/config.py:55 +#: neutron/common/config.py:56 msgid "The base MAC address Neutron will use for VIFs" msgstr "" -#: neutron/common/config.py:57 +#: neutron/common/config.py:58 msgid "How many times Neutron will retry MAC generation" msgstr "" -#: neutron/common/config.py:59 +#: neutron/common/config.py:60 msgid "Allow the usage of the bulk API" msgstr "" -#: neutron/common/config.py:61 +#: neutron/common/config.py:62 msgid "Allow the usage of the pagination" msgstr "" -#: neutron/common/config.py:63 +#: neutron/common/config.py:64 msgid "Allow the usage of the sorting" msgstr "" -#: neutron/common/config.py:65 +#: neutron/common/config.py:66 msgid "" "The maximum number of items returned in a single response, value was " "'infinite' or negative integer means no limit" msgstr "" -#: neutron/common/config.py:69 +#: neutron/common/config.py:70 msgid "Maximum number of DNS nameservers" msgstr "" -#: neutron/common/config.py:71 +#: neutron/common/config.py:72 msgid "Maximum number of host routes per subnet" msgstr "" -#: neutron/common/config.py:73 +#: neutron/common/config.py:74 msgid "Maximum number of fixed ips per port" msgstr "" -#: neutron/common/config.py:75 +#: neutron/common/config.py:76 msgid "Default IPv4 subnet-pool to be used for automatic subnet CIDR allocation" msgstr "" -#: neutron/common/config.py:78 +#: neutron/common/config.py:79 msgid "Default IPv6 subnet-pool to be used for automatic subnet CIDR allocation" msgstr "" -#: neutron/common/config.py:82 +#: neutron/common/config.py:83 msgid "" "DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite " "lease times." msgstr "" -#: neutron/common/config.py:85 +#: neutron/common/config.py:86 msgid "Allow sending resource operation notification to DHCP agent" msgstr "" -#: neutron/common/config.py:88 +#: neutron/common/config.py:89 msgid "Allow overlapping IP support in Neutron" msgstr "" -#: neutron/common/config.py:90 +#: neutron/common/config.py:91 msgid "" "Hostname to be used by the neutron server, agents and services running on" " this machine. All the agents and services running on this machine must " "use the same host value." msgstr "" -#: neutron/common/config.py:95 +#: neutron/common/config.py:96 msgid "" "Ensure that configured gateway is on subnet. For IPv6, validate only if " "gateway is not a link local address. Deprecated, to be removed during the" " K release, at which point the check will be mandatory." msgstr "" -#: neutron/common/config.py:101 +#: neutron/common/config.py:102 msgid "Send notification to nova when port status changes" msgstr "" -#: neutron/common/config.py:103 +#: neutron/common/config.py:104 msgid "" "Send notification to nova when port data (fixed_ips/floatingip) changes " "so nova can update its cache." msgstr "" -#: neutron/common/config.py:107 +#: neutron/common/config.py:108 msgid "" "URL for connection to nova. Deprecated in favour of an auth plugin in " "[nova]." msgstr "" -#: neutron/common/config.py:110 +#: neutron/common/config.py:111 msgid "" "Username for connecting to nova in admin context. Deprecated in favour of" " an auth plugin in [nova]." msgstr "" -#: neutron/common/config.py:113 +#: neutron/common/config.py:114 msgid "" "Password for connection to nova in admin context. Deprecated in favour of" " an auth plugin in [nova]." msgstr "" -#: neutron/common/config.py:117 +#: neutron/common/config.py:118 msgid "" "The uuid of the admin nova tenant. Deprecated in favour of an auth plugin" " in [nova]." msgstr "" -#: neutron/common/config.py:120 +#: neutron/common/config.py:121 msgid "" "The name of the admin nova tenant. Deprecated in favour of an auth plugin" " in [nova]." msgstr "" -#: neutron/common/config.py:124 +#: neutron/common/config.py:125 msgid "" "Authorization URL for connecting to nova in admin context. Deprecated in " "favour of an auth plugin in [nova]." msgstr "" -#: neutron/common/config.py:128 +#: neutron/common/config.py:129 msgid "" "Number of seconds between sending events to nova if there are any events " "to send." msgstr "" -#: neutron/common/config.py:131 +#: neutron/common/config.py:132 msgid "" "If True, effort is made to advertise MTU settings to VMs via network " "methods (DHCP and RA MTU options) when the network's preferred MTU is " "known." msgstr "" -#: neutron/common/config.py:135 +#: neutron/common/config.py:136 msgid "IPAM driver to use." msgstr "" -#: neutron/common/config.py:137 +#: neutron/common/config.py:138 msgid "" "If True, then allow plugins that support it to create VLAN transparent " "networks." msgstr "" -#: neutron/common/config.py:144 +#: neutron/common/config.py:145 msgid "" "Where to store Neutron state files. This directory must be writable by " "the agent." msgstr "" -#: neutron/common/config.py:176 +#: neutron/common/config.py:177 msgid "" "Name of nova region to use. Useful if keystone manages more than one " "region." msgstr "" -#: neutron/common/config.py:198 +#: neutron/common/config.py:199 #, python-format msgid "Base MAC: %s" msgstr "" -#: neutron/common/config.py:231 +#: neutron/common/config.py:240 #, python-format msgid "Unable to load %(app_name)s from configuration file %(config_path)s." msgstr "" @@ -1831,34 +1831,34 @@ msgstr "" msgid "Bad prefix type for generate IPv6 address by EUI-64: %s" msgstr "" -#: neutron/common/utils.py:202 +#: neutron/common/utils.py:203 #: neutron/plugins/sriovnicagent/common/config.py:36 #, python-format msgid "Invalid mapping: '%s'" msgstr "" -#: neutron/common/utils.py:205 +#: neutron/common/utils.py:206 #: neutron/plugins/sriovnicagent/common/config.py:39 #, python-format msgid "Missing key in mapping: '%s'" msgstr "" -#: neutron/common/utils.py:208 +#: neutron/common/utils.py:209 #, python-format msgid "Missing value in mapping: '%s'" msgstr "" -#: neutron/common/utils.py:210 +#: neutron/common/utils.py:211 #, python-format msgid "Key %(key)s in mapping: '%(mapping)s' not unique" msgstr "" -#: neutron/common/utils.py:213 +#: neutron/common/utils.py:214 #, python-format msgid "Value %(value)s in mapping: '%(mapping)s' not unique" msgstr "" -#: neutron/common/utils.py:407 +#: neutron/common/utils.py:408 msgid "Illegal IP version number" msgstr "" @@ -1912,57 +1912,40 @@ msgid "" "such agents is available if this option is True." msgstr "" -#: neutron/db/common_db_mixin.py:123 +#: neutron/db/common_db_mixin.py:138 msgid "Cannot create resource for another tenant" msgstr "" -#: neutron/db/db_base_plugin_v2.py:393 -msgid "IP allocation requires subnet_id or ip_address" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:410 -#, python-format -msgid "" -"Failed to create port on network %(network_id)s, because fixed_ips " -"included invalid subnet %(subnet_id)s" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:436 -#, python-format -msgid "" -"IPv6 address %(address)s can not be directly assigned to a port on subnet" -" %(id)s since the subnet is configured for automatic addresses" -msgstr "" - -#: neutron/db/db_base_plugin_v2.py:455 neutron/db/db_base_plugin_v2.py:503 +#: neutron/db/db_base_plugin_v2.py:134 +#: neutron/db/ipam_non_pluggable_backend.py:263 #: neutron/plugins/opencontrail/contrail_plugin.py:390 msgid "Exceeded maximim amount of fixed ips per port" msgstr "" -#: neutron/db/db_base_plugin_v2.py:627 +#: neutron/db/db_base_plugin_v2.py:186 msgid "0 is not allowed as CIDR prefix length" msgstr "" -#: neutron/db/db_base_plugin_v2.py:637 +#: neutron/db/db_base_plugin_v2.py:196 #, python-format msgid "" "Requested subnet with cidr: %(cidr)s for network: %(network_id)s overlaps" " with another subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:732 neutron/db/db_base_plugin_v2.py:736 +#: neutron/db/db_base_plugin_v2.py:224 neutron/db/db_base_plugin_v2.py:228 #, python-format msgid "Invalid route: %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:814 +#: neutron/db/db_base_plugin_v2.py:306 #, python-format msgid "" "Invalid CIDR %s for IPv6 address mode. OpenStack uses the EUI-64 address " "format, which requires the prefix to be /64." msgstr "" -#: neutron/db/db_base_plugin_v2.py:822 +#: neutron/db/db_base_plugin_v2.py:314 #, python-format msgid "" "ipv6_ra_mode set to '%(ra_mode)s' with ipv6_address_mode set to " @@ -1970,77 +1953,77 @@ msgid "" "the same value" msgstr "" -#: neutron/db/db_base_plugin_v2.py:830 +#: neutron/db/db_base_plugin_v2.py:322 msgid "" "ipv6_ra_mode or ipv6_address_mode cannot be set when enable_dhcp is set " "to False." msgstr "" -#: neutron/db/db_base_plugin_v2.py:836 +#: neutron/db/db_base_plugin_v2.py:328 msgid "Cannot disable enable_dhcp with ipv6 attributes set" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1052 +#: neutron/db/db_base_plugin_v2.py:458 #, python-format msgid "%(name)s '%(addr)s' does not match the ip_version '%(ip_version)s'" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1079 +#: neutron/db/db_base_plugin_v2.py:485 msgid "Subnet has a prefix length that is incompatible with DHCP service enabled." msgstr "" -#: neutron/db/db_base_plugin_v2.py:1100 +#: neutron/db/db_base_plugin_v2.py:506 msgid "Gateway is not valid on subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1120 neutron/db/db_base_plugin_v2.py:1134 +#: neutron/db/db_base_plugin_v2.py:526 neutron/db/db_base_plugin_v2.py:540 #: neutron/plugins/opencontrail/contrail_plugin.py:313 msgid "new subnet" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1127 +#: neutron/db/db_base_plugin_v2.py:533 #, python-format msgid "Error parsing dns address %s" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1143 +#: neutron/db/db_base_plugin_v2.py:549 msgid "ipv6_ra_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1147 +#: neutron/db/db_base_plugin_v2.py:553 msgid "ipv6_address_mode is not valid when ip_version is 4" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1296 +#: neutron/db/db_base_plugin_v2.py:668 msgid "allocation_pools allowed only for specific subnet requests." msgstr "" -#: neutron/db/db_base_plugin_v2.py:1307 +#: neutron/db/db_base_plugin_v2.py:679 #, python-format msgid "Cannot allocate IPv%(req_ver)s subnet from IPv%(pool_ver)s subnet pool" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1385 +#: neutron/db/db_base_plugin_v2.py:757 msgid "ip_version must be specified in the absence of cidr and subnetpool_id" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1402 +#: neutron/db/db_base_plugin_v2.py:774 msgid "cidr and prefixlen must not be supplied together" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1413 +#: neutron/db/db_base_plugin_v2.py:785 msgid "A cidr must be specified in the absence of a subnet pool" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1717 +#: neutron/db/db_base_plugin_v2.py:1037 msgid "Existing prefixes must be a subset of the new prefixes" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1784 +#: neutron/db/db_base_plugin_v2.py:1104 msgid "Subnet pool has existing allocations" msgstr "" -#: neutron/db/db_base_plugin_v2.py:1791 +#: neutron/db/db_base_plugin_v2.py:1111 msgid "mac address update" msgstr "" @@ -2060,56 +2043,74 @@ msgstr "" msgid "the nexthop is used by router" msgstr "" -#: neutron/db/l3_agentschedulers_db.py:44 +#: neutron/db/ipam_non_pluggable_backend.py:201 +msgid "IP allocation requires subnet_id or ip_address" +msgstr "" + +#: neutron/db/ipam_non_pluggable_backend.py:218 +#, python-format +msgid "" +"Failed to create port on network %(network_id)s, because fixed_ips " +"included invalid subnet %(subnet_id)s" +msgstr "" + +#: neutron/db/ipam_non_pluggable_backend.py:244 +#, python-format +msgid "" +"IPv6 address %(address)s can not be directly assigned to a port on subnet" +" %(id)s since the subnet is configured for automatic addresses" +msgstr "" + +#: neutron/db/l3_agentschedulers_db.py:45 msgid "Driver to use for scheduling router to a default L3 agent" msgstr "" -#: neutron/db/l3_agentschedulers_db.py:47 +#: neutron/db/l3_agentschedulers_db.py:48 msgid "Allow auto scheduling of routers to L3 agent." msgstr "" -#: neutron/db/l3_agentschedulers_db.py:49 +#: neutron/db/l3_agentschedulers_db.py:50 msgid "" "Automatically reschedule routers from offline L3 agents to online L3 " "agents." msgstr "" -#: neutron/db/l3_db.py:271 +#: neutron/db/l3_db.py:272 #, python-format msgid "No eligible l3 agent associated with external network %s found" msgstr "" -#: neutron/db/l3_db.py:309 +#: neutron/db/l3_db.py:310 #, python-format msgid "Network %s is not an external network" msgstr "" -#: neutron/db/l3_db.py:319 +#: neutron/db/l3_db.py:320 #, python-format msgid "External IP %s is the same as the gateway IP" msgstr "" -#: neutron/db/l3_db.py:469 +#: neutron/db/l3_db.py:470 #, python-format msgid "Router already has a port on subnet %s" msgstr "" -#: neutron/db/l3_db.py:483 +#: neutron/db/l3_db.py:484 #, python-format msgid "" "Cidr %(subnet_cidr)s of subnet %(subnet_id)s overlaps with cidr %(cidr)s " "of subnet %(sub_id)s" msgstr "" -#: neutron/db/l3_db.py:499 neutron/plugins/opencontrail/contrail_plugin.py:501 +#: neutron/db/l3_db.py:500 neutron/plugins/opencontrail/contrail_plugin.py:501 msgid "Either subnet_id or port_id must be specified" msgstr "" -#: neutron/db/l3_db.py:503 neutron/plugins/opencontrail/contrail_plugin.py:511 +#: neutron/db/l3_db.py:504 neutron/plugins/opencontrail/contrail_plugin.py:511 msgid "Cannot specify both subnet-id and port-id" msgstr "" -#: neutron/db/l3_db.py:520 +#: neutron/db/l3_db.py:521 #, python-format msgid "" "Cannot have multiple router ports with the same network id if both " @@ -2117,63 +2118,63 @@ msgid "" "id %(nid)s" msgstr "" -#: neutron/db/l3_db.py:562 +#: neutron/db/l3_db.py:563 msgid "Subnet for router interface must have a gateway IP" msgstr "" -#: neutron/db/l3_db.py:566 +#: neutron/db/l3_db.py:567 #, python-format msgid "" "IPv6 subnet %s configured to receive RAs from an external router cannot " "be added to Neutron Router." msgstr "" -#: neutron/db/l3_db.py:778 +#: neutron/db/l3_db.py:779 #, python-format msgid "Cannot add floating IP to port on subnet %s which has no gateway_ip" msgstr "" -#: neutron/db/l3_db.py:819 +#: neutron/db/l3_db.py:820 #, python-format msgid "" "Port %(port_id)s is associated with a different tenant than Floating IP " "%(floatingip_id)s and therefore cannot be bound." msgstr "" -#: neutron/db/l3_db.py:823 +#: neutron/db/l3_db.py:824 #, python-format msgid "" "Cannot create floating IP and bind it to Port %s, since that port is " "owned by a different tenant." msgstr "" -#: neutron/db/l3_db.py:835 +#: neutron/db/l3_db.py:836 #, python-format msgid "Port %(id)s does not have fixed ip %(address)s" msgstr "" -#: neutron/db/l3_db.py:842 +#: neutron/db/l3_db.py:843 #, python-format msgid "Cannot add floating IP to port %s that has no fixed IP addresses" msgstr "" -#: neutron/db/l3_db.py:846 +#: neutron/db/l3_db.py:847 #, python-format msgid "" "Port %s has multiple fixed IPs. Must provide a specific IP when " "assigning a floating IP" msgstr "" -#: neutron/db/l3_db.py:875 +#: neutron/db/l3_db.py:876 msgid "fixed_ip_address cannot be specified without a port_id" msgstr "" -#: neutron/db/l3_db.py:915 +#: neutron/db/l3_db.py:916 #, python-format msgid "Network %s is not a valid external network" msgstr "" -#: neutron/db/l3_db.py:1059 +#: neutron/db/l3_db.py:1060 #, python-format msgid "has device owner %s" msgstr "" @@ -2360,7 +2361,7 @@ msgid "" "implemented" msgstr "" -#: neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py:44 +#: neutron/db/migration/alembic_migrations/versions/14be42f3d0a5_default_sec_group_table.py:45 #, python-format msgid "" "Some tenants have more than one security group named 'default': " @@ -2966,40 +2967,6 @@ msgstr "" msgid "Rules must be an instance of dict or Rules, got %s instead" msgstr "" -#: neutron/openstack/common/versionutils.py:108 -#, python-format -msgid "" -"%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s and " -"may be removed in %(remove_in)s." -msgstr "" - -#: neutron/openstack/common/versionutils.py:112 -#, python-format -msgid "" -"%(what)s is deprecated as of %(as_of)s and may be removed in " -"%(remove_in)s. It will not be superseded." -msgstr "" - -#: neutron/openstack/common/versionutils.py:116 -#, python-format -msgid "%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s." -msgstr "" - -#: neutron/openstack/common/versionutils.py:119 -#, python-format -msgid "%(what)s is deprecated as of %(as_of)s. It will not be superseded." -msgstr "" - -#: neutron/openstack/common/versionutils.py:241 -#, python-format -msgid "Deprecated: %s" -msgstr "" - -#: neutron/openstack/common/versionutils.py:259 -#, python-format -msgid "Fatal call to deprecated config: %(msg)s" -msgstr "" - #: neutron/plugins/brocade/NeutronPlugin.py:62 #: neutron/plugins/ml2/drivers/brocade/mechanism_brocade.py:22 #: neutron/services/l3_router/brocade/l3_router_plugin.py:23 @@ -3964,6 +3931,13 @@ msgstr "" msgid "Enable server RPC compatibility with old agents" msgstr "" +#: neutron/plugins/linuxbridge/common/config.py:66 +#: neutron/plugins/openvswitch/common/config.py:96 +msgid "" +"Set new timeout in seconds for new rpc calls after agent receives " +"SIGTERM. If value is set to 0, rpc timeout won't be changed" +msgstr "" + #: neutron/plugins/metaplugin/common/config.py:23 msgid "" "Comma separated list of flavor:neutron_plugin for plugins to load. " @@ -4072,16 +4046,16 @@ msgid "" "configured in type_drivers config option." msgstr "" -#: neutron/plugins/ml2/managers.py:98 +#: neutron/plugins/ml2/managers.py:99 msgid "network_type required" msgstr "" -#: neutron/plugins/ml2/managers.py:205 neutron/plugins/ml2/managers.py:214 +#: neutron/plugins/ml2/managers.py:206 neutron/plugins/ml2/managers.py:215 #, python-format msgid "network_type value '%s' not supported" msgstr "" -#: neutron/plugins/ml2/plugin.py:232 +#: neutron/plugins/ml2/plugin.py:228 msgid "binding:profile value too large" msgstr "" @@ -4090,26 +4064,26 @@ msgstr "" msgid "%(method)s failed." msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:33 +#: neutron/plugins/ml2/drivers/type_flat.py:34 msgid "" "List of physical_network names with which flat networks can be created. " "Use * to allow flat networks with arbitrary physical_network names." msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:75 +#: neutron/plugins/ml2/drivers/type_flat.py:76 msgid "physical network name is empty" msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:93 +#: neutron/plugins/ml2/drivers/type_flat.py:94 msgid "physical_network required for flat provider network" msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:96 +#: neutron/plugins/ml2/drivers/type_flat.py:97 #, python-format msgid "physical_network '%s' unknown for flat provider network" msgstr "" -#: neutron/plugins/ml2/drivers/type_flat.py:103 +#: neutron/plugins/ml2/drivers/type_flat.py:104 #, python-format msgid "%s prohibited for flat provider network" msgstr "" @@ -4120,35 +4094,35 @@ msgid "" "GRE tunnel IDs that are available for tenant network allocation" msgstr "" -#: neutron/plugins/ml2/drivers/type_local.py:51 +#: neutron/plugins/ml2/drivers/type_local.py:52 #, python-format msgid "%s prohibited for local provider network" msgstr "" -#: neutron/plugins/ml2/drivers/type_tunnel.py:125 +#: neutron/plugins/ml2/drivers/type_tunnel.py:122 #, python-format msgid "provider:physical_network specified for %s network" msgstr "" -#: neutron/plugins/ml2/drivers/type_tunnel.py:132 +#: neutron/plugins/ml2/drivers/type_tunnel.py:129 #, python-format msgid "%(key)s prohibited for %(tunnel)s provider network" msgstr "" -#: neutron/plugins/ml2/drivers/type_tunnel.py:213 +#: neutron/plugins/ml2/drivers/type_tunnel.py:254 msgid "Tunnel IP value needed by the ML2 plugin" msgstr "" -#: neutron/plugins/ml2/drivers/type_tunnel.py:218 +#: neutron/plugins/ml2/drivers/type_tunnel.py:259 msgid "Network type value needed by the ML2 plugin" msgstr "" -#: neutron/plugins/ml2/drivers/type_tunnel.py:245 +#: neutron/plugins/ml2/drivers/type_tunnel.py:286 #, python-format msgid "Tunnel IP %(ip)s in use with host %(host)s" msgstr "" -#: neutron/plugins/ml2/drivers/type_tunnel.py:264 +#: neutron/plugins/ml2/drivers/type_tunnel.py:305 #, python-format msgid "Network type value '%s' not supported" msgstr "" @@ -4180,13 +4154,13 @@ msgstr "" msgid "%s prohibited for VLAN provider network" msgstr "" -#: neutron/plugins/ml2/drivers/type_vxlan.py:35 +#: neutron/plugins/ml2/drivers/type_vxlan.py:34 msgid "" "Comma-separated list of : tuples enumerating ranges of " "VXLAN VNI IDs that are available for tenant network allocation" msgstr "" -#: neutron/plugins/ml2/drivers/type_vxlan.py:39 +#: neutron/plugins/ml2/drivers/type_vxlan.py:38 msgid "Multicast group for VXLAN. If unset, disables VXLAN multicast mode." msgstr "" @@ -4383,25 +4357,6 @@ msgstr "" msgid "Add new policy profile attribute to port resource." msgstr "" -#: neutron/plugins/ml2/drivers/cisco/ncs/driver.py:29 -msgid "HTTP URL of Tail-f NCS REST interface." -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/ncs/driver.py:31 -#: neutron/plugins/ml2/drivers/opendaylight/driver.py:32 -msgid "HTTP username for authentication" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/ncs/driver.py:33 -#: neutron/plugins/ml2/drivers/opendaylight/driver.py:34 -msgid "HTTP password for authentication" -msgstr "" - -#: neutron/plugins/ml2/drivers/cisco/ncs/driver.py:35 -#: neutron/plugins/ml2/drivers/opendaylight/driver.py:36 -msgid "HTTP timeout in seconds." -msgstr "" - #: neutron/plugins/ml2/drivers/freescale/config.py:28 msgid "CRD service Username." msgstr "" @@ -4499,6 +4454,18 @@ msgstr "" msgid "HTTP URL of OpenDaylight REST interface." msgstr "" +#: neutron/plugins/ml2/drivers/opendaylight/driver.py:32 +msgid "HTTP username for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/opendaylight/driver.py:34 +msgid "HTTP password for authentication" +msgstr "" + +#: neutron/plugins/ml2/drivers/opendaylight/driver.py:36 +msgid "HTTP timeout in seconds." +msgstr "" + #: neutron/plugins/ml2/drivers/opendaylight/driver.py:38 msgid "Tomcat session timeout in minutes." msgstr "" @@ -4688,30 +4655,30 @@ msgstr "" msgid "Unable to connect to NVSD controller. Exiting after %(retries)s attempts" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:60 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:63 #, python-format msgid "" "Unable to retrieve port details for devices: %(devices)s because of " "error: %(error)s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1594 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1624 msgid "" "DVR deployments for VXLAN/GRE underlays require L2-pop to be enabled, in " "both the Agent and Server side." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1608 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1638 #, python-format msgid "Parsing bridge_mappings failed: %s." msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1630 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1660 #, python-format msgid "Invalid tunnel type specified: %s" msgstr "" -#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1633 +#: neutron/plugins/openvswitch/agent/ovs_neutron_agent.py:1663 msgid "Tunneling cannot be enabled without a valid local_ip." msgstr "" @@ -4798,17 +4765,11 @@ msgstr "" msgid "Make the l2 agent run in DVR mode." msgstr "" -#: neutron/plugins/openvswitch/common/config.py:96 -msgid "" -"Set new timeout in seconds for new rpc calls after agent receives " -"SIGTERM. If value is set to 0, rpc timeout won't be changed" -msgstr "" - -#: neutron/plugins/sriovnicagent/eswitch_manager.py:50 +#: neutron/plugins/sriovnicagent/eswitch_manager.py:51 msgid "Device not found" msgstr "" -#: neutron/plugins/sriovnicagent/eswitch_manager.py:64 +#: neutron/plugins/sriovnicagent/eswitch_manager.py:65 msgid "Device has no virtual functions" msgstr "" @@ -5105,7 +5066,7 @@ msgstr "" msgid "An interface driver must be specified" msgstr "" -#: neutron/tests/base.py:108 +#: neutron/tests/base.py:109 #, python-format msgid "Unknown attribute '%s'." msgstr "" @@ -5144,12 +5105,12 @@ msgid "" "operation." msgstr "" -#: neutron/tests/unit/plugins/ml2/test_plugin.py:423 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:443 #, python-format msgid "Deleting port %s" msgstr "" -#: neutron/tests/unit/plugins/ml2/test_plugin.py:424 +#: neutron/tests/unit/plugins/ml2/test_plugin.py:444 #, python-format msgid "The port '%s' was deleted" msgstr "" @@ -5183,8 +5144,8 @@ msgstr "" msgid "Adds test attributes to core resources." msgstr "" -#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:881 -#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:898 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:893 +#: neutron/tests/unit/plugins/openvswitch/agent/test_ovs_neutron_agent.py:910 #, python-format msgid "Failed to set-up %(type)s tunnel port to %(ip)s" msgstr "" diff --git a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po index 81e426efea8..c9334cb8360 100644 --- a/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/pt_BR/LC_MESSAGES/neutron-log-info.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-29 06:16+0000\n" -"PO-Revision-Date: 2015-05-28 20:54+0000\n" +"POT-Creation-Date: 2015-06-11 06:02+0000\n" +"PO-Revision-Date: 2015-06-10 23:52+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Portuguese (Brazil) (http://www.transifex.com/projects/p/" "neutron/language/pt_BR/)\n" @@ -185,13 +185,6 @@ msgstr "Inicializando o Extension Manager." msgid "Initializing mechanism driver '%s'" msgstr "Inicializando driver de mecanismo '%s'" -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "" -"Inserindo política: %(new_policy)s no lugar de política deprecada: " -"%(old_policy)s" - #, python-format msgid "Interface mappings: %s" msgstr "Mapeamentos da interface: %s" diff --git a/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po index 9d896ed214e..f85f32f3d16 100644 --- a/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po +++ b/neutron/locale/zh_CN/LC_MESSAGES/neutron-log-info.po @@ -8,8 +8,8 @@ msgid "" msgstr "" "Project-Id-Version: Neutron\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2015-05-29 06:16+0000\n" -"PO-Revision-Date: 2015-05-28 20:54+0000\n" +"POT-Creation-Date: 2015-06-11 06:02+0000\n" +"PO-Revision-Date: 2015-06-10 23:52+0000\n" "Last-Translator: openstackjenkins \n" "Language-Team: Chinese (China) (http://www.transifex.com/projects/p/neutron/" "language/zh_CN/)\n" @@ -209,11 +209,6 @@ msgstr "初始化扩展驱动 '%s'" msgid "Initializing extension manager." msgstr "正在初始化扩展管理员。" -#, python-format -msgid "" -"Inserting policy:%(new_policy)s in place of deprecated policy:%(old_policy)s" -msgstr "在被废弃的策略:%(old_policy)s位置上插入策略:%(new_policy)s " - #, python-format msgid "Interface mappings: %s" msgstr "接口映射:%s" From 1552f311532fdbd03a79ecfc1fae488b072c5a14 Mon Sep 17 00:00:00 2001 From: Ann Kamyshnikova Date: Tue, 9 Jun 2015 11:30:06 +0300 Subject: [PATCH 176/292] Fix Enum usage in 589f9237ca0e_cisco_n1kv_ml2_driver_tables PostgreSQL is more sensitive with types than MySQL, it creates a separate type when a Enum is created. In migration 589f9237ca0e type profile_type is trying to be created, but the type with such name was already created in havana_initial migration. The solution for this is not to create type in 589f9237ca0e migration when dialect is PostgreSQL and use already created. Closes-bug: #1463301 Change-Id: I66e74d50cc70673de8635616076779cc20cde113 --- .../589f9237ca0e_cisco_n1kv_ml2_driver_tables.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/neutron/db/migration/alembic_migrations/versions/589f9237ca0e_cisco_n1kv_ml2_driver_tables.py b/neutron/db/migration/alembic_migrations/versions/589f9237ca0e_cisco_n1kv_ml2_driver_tables.py index 6c091ce5d27..c1f4422bdda 100644 --- a/neutron/db/migration/alembic_migrations/versions/589f9237ca0e_cisco_n1kv_ml2_driver_tables.py +++ b/neutron/db/migration/alembic_migrations/versions/589f9237ca0e_cisco_n1kv_ml2_driver_tables.py @@ -31,7 +31,6 @@ import sqlalchemy as sa network_profile_type = sa.Enum('vlan', 'vxlan', name='network_profile_type') -profile_type = sa.Enum('network', 'policy', name='profile_type') def upgrade(): @@ -103,7 +102,15 @@ def upgrade(): ondelete='CASCADE'), sa.PrimaryKeyConstraint('physical_network', 'vlan_id') ) - + # Bugfix for 1463301: PostgreSQL creates type when Enum assigned to column, + # but type profile_type was already created in cisco_init_opts, so it needs + # to be reused. MySQL do not create type for Enums. + if op.get_context().dialect.name == 'postgresql': + profile_type = sa.dialects.postgresql.ENUM('network', 'policy', + name='profile_type', + create_type=False) + else: + profile_type = sa.Enum('network', 'policy', name='profile_type') op.create_table( 'cisco_ml2_n1kv_profile_bindings', sa.Column('profile_type', profile_type, nullable=True), From 5ff082bcfe12647036e5b033bfc2bac514acdb42 Mon Sep 17 00:00:00 2001 From: Dane LeBlanc Date: Tue, 24 Feb 2015 15:47:01 -0500 Subject: [PATCH 177/292] Stop sending gratuitous arp when ip version is 6 This fix prevents calls to the arping utility for IPv6 addresses, thereby eliminating errors reported by arping for IPv6 addresses. The assumption is that NDP, DAD, and RAs are sufficient for address resolution and duplicate address detection for IPv6, and that unsolicited Neighbor Advertisements (NAs) are not required for OpenStack services. If this turns out not to be the case for some service/feature, then a separate bug should be filed to add support for unsolicited NAs for that service. Change-Id: I14f869b7d488d7e691f7316eafcab3064e12cda6 Closes-Bug: 1357068 --- neutron/agent/l3/dvr_fip_ns.py | 8 ++--- neutron/agent/l3/dvr_router.py | 8 ++--- neutron/agent/l3/legacy_router.py | 8 ++--- neutron/agent/l3/router_info.py | 16 +++++----- neutron/agent/linux/ip_lib.py | 16 ++++++++-- neutron/tests/unit/agent/l3/test_agent.py | 31 ++++++++++--------- .../tests/unit/agent/l3/test_dvr_fip_ns.py | 13 ++++---- .../tests/unit/agent/l3/test_dvr_router.py | 4 +-- .../tests/unit/agent/l3/test_legacy_router.py | 13 ++++---- neutron/tests/unit/agent/linux/test_ip_lib.py | 28 +++++++++++------ 10 files changed, 83 insertions(+), 62 deletions(-) diff --git a/neutron/agent/l3/dvr_fip_ns.py b/neutron/agent/l3/dvr_fip_ns.py index 9b7eee99a88..90e24d129d9 100644 --- a/neutron/agent/l3/dvr_fip_ns.py +++ b/neutron/agent/l3/dvr_fip_ns.py @@ -107,10 +107,10 @@ class FipNamespace(namespaces.Namespace): clean_connections=True) for fixed_ip in ex_gw_port['fixed_ips']: - ip_lib.send_gratuitous_arp(ns_name, - interface_name, - fixed_ip['ip_address'], - self.agent_conf.send_arp_for_ha) + ip_lib.send_ip_addr_adv_notif(ns_name, + interface_name, + fixed_ip['ip_address'], + self.agent_conf) for subnet in ex_gw_port['subnets']: gw_ip = subnet.get('gateway_ip') diff --git a/neutron/agent/l3/dvr_router.py b/neutron/agent/l3/dvr_router.py index 8c1313acc9f..df3d465e4d6 100755 --- a/neutron/agent/l3/dvr_router.py +++ b/neutron/agent/l3/dvr_router.py @@ -100,10 +100,10 @@ class DvrRouter(router.RouterInfo): interface_name = ( self.fip_ns.get_ext_device_name( self.fip_ns.agent_gateway_port['id'])) - ip_lib.send_gratuitous_arp(fip_ns_name, - interface_name, - floating_ip, - self.agent_conf.send_arp_for_ha) + ip_lib.send_ip_addr_adv_notif(fip_ns_name, + interface_name, + floating_ip, + self.agent_conf) # update internal structures self.dist_fip_count = self.dist_fip_count + 1 diff --git a/neutron/agent/l3/legacy_router.py b/neutron/agent/l3/legacy_router.py index 9c7c5bdc7e9..2b8ccdbaa96 100644 --- a/neutron/agent/l3/legacy_router.py +++ b/neutron/agent/l3/legacy_router.py @@ -24,8 +24,8 @@ class LegacyRouter(router.RouterInfo): # As GARP is processed in a distinct thread the call below # won't raise an exception to be handled. - ip_lib.send_gratuitous_arp(self.ns_name, - interface_name, - fip['floating_ip_address'], - self.agent_conf.send_arp_for_ha) + ip_lib.send_ip_addr_adv_notif(self.ns_name, + interface_name, + fip['floating_ip_address'], + self.agent_conf) return l3_constants.FLOATINGIP_STATUS_ACTIVE diff --git a/neutron/agent/l3/router_info.py b/neutron/agent/l3/router_info.py index 0dfbc13ef58..f698a94d61c 100644 --- a/neutron/agent/l3/router_info.py +++ b/neutron/agent/l3/router_info.py @@ -293,10 +293,10 @@ class RouterInfo(object): ip_cidrs = common_utils.fixed_ip_cidrs(fixed_ips) self.driver.init_l3(interface_name, ip_cidrs, namespace=ns_name) for fixed_ip in fixed_ips: - ip_lib.send_gratuitous_arp(ns_name, - interface_name, - fixed_ip['ip_address'], - self.agent_conf.send_arp_for_ha) + ip_lib.send_ip_addr_adv_notif(ns_name, + interface_name, + fixed_ip['ip_address'], + self.agent_conf) def internal_network_added(self, port): network_id = port['network_id'] @@ -465,10 +465,10 @@ class RouterInfo(object): enable_ra_on_gw=enable_ra_on_gw, clean_connections=True) for fixed_ip in ex_gw_port['fixed_ips']: - ip_lib.send_gratuitous_arp(ns_name, - interface_name, - fixed_ip['ip_address'], - self.agent_conf.send_arp_for_ha) + ip_lib.send_ip_addr_adv_notif(ns_name, + interface_name, + fixed_ip['ip_address'], + self.agent_conf) def is_v6_gateway_set(self, gateway_ips): """Check to see if list of gateway_ips has an IPv6 gateway. diff --git a/neutron/agent/linux/ip_lib.py b/neutron/agent/linux/ip_lib.py index 32fe1f9ac84..f04152cf538 100644 --- a/neutron/agent/linux/ip_lib.py +++ b/neutron/agent/linux/ip_lib.py @@ -757,13 +757,23 @@ def _arping(ns_name, iface_name, address, count): 'ns': ns_name}) -def send_gratuitous_arp(ns_name, iface_name, address, count): - """Send a gratuitous arp using given namespace, interface, and address.""" +def send_ip_addr_adv_notif(ns_name, iface_name, address, config): + """Send advance notification of an IP address assignment. + + If the address is in the IPv4 family, send gratuitous ARP. + + If the address is in the IPv6 family, no advance notification is + necessary, since the Neighbor Discovery Protocol (NDP), Duplicate + Address Discovery (DAD), and (for stateless addresses) router + advertisements (RAs) are sufficient for address resolution and + duplicate address detection. + """ + count = config.send_arp_for_ha def arping(): _arping(ns_name, iface_name, address, count) - if count > 0: + if count > 0 and netaddr.IPAddress(address).version == 4: eventlet.spawn_n(arping) diff --git a/neutron/tests/unit/agent/l3/test_agent.py b/neutron/tests/unit/agent/l3/test_agent.py index aeec5c6f1c2..143c659dd12 100644 --- a/neutron/tests/unit/agent/l3/test_agent.py +++ b/neutron/tests/unit/agent/l3/test_agent.py @@ -323,9 +323,9 @@ class BasicRouterOperationsFramework(base.BaseTestCase): self.process_monitor = mock.patch( 'neutron.agent.linux.external_process.ProcessMonitor').start() - self.send_arp_p = mock.patch( - 'neutron.agent.linux.ip_lib.send_gratuitous_arp') - self.send_arp = self.send_arp_p.start() + self.send_adv_notif_p = mock.patch( + 'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif') + self.send_adv_notif = self.send_adv_notif_p.start() self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver') driver_cls = self.dvr_cls_p.start() @@ -510,8 +510,9 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): ri.internal_network_added(port) self.assertEqual(self.mock_driver.plug.call_count, 1) self.assertEqual(self.mock_driver.init_l3.call_count, 1) - self.send_arp.assert_called_once_with(ri.ns_name, interface_name, - '99.0.1.9', mock.ANY) + self.send_adv_notif.assert_called_once_with(ri.ns_name, + interface_name, + '99.0.1.9', mock.ANY) elif action == 'remove': self.device_exists.return_value = True ri.internal_network_removed(port) @@ -622,7 +623,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self.assertEqual(self.mock_driver.plug.call_count, 1) self.assertEqual(self.mock_driver.init_l3.call_count, 1) if no_subnet and not dual_stack: - self.assertEqual(self.send_arp.call_count, 0) + self.assertEqual(self.send_adv_notif.call_count, 0) ip_cidrs = [] gateway_ips = [] if no_sub_gw: @@ -640,7 +641,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): exp_arp_calls += [mock.call(ri.ns_name, interface_name, '2001:192:168:100::2', mock.ANY)] - self.send_arp.assert_has_calls(exp_arp_calls) + self.send_adv_notif.assert_has_calls(exp_arp_calls) ip_cidrs = ['20.0.0.30/24'] gateway_ips = ['20.0.0.1'] if dual_stack: @@ -811,7 +812,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): ri.use_ipv6 = True exp_arp_calls += [mock.call(ri.ns_name, interface_name, '2001:192:168:100::2', mock.ANY)] - self.send_arp.assert_has_calls(exp_arp_calls) + self.send_adv_notif.assert_has_calls(exp_arp_calls) ip_cidrs = ['20.0.0.30/24'] gateway_ips = ['20.0.0.1'] if dual_stack: @@ -1148,7 +1149,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): del router[l3_constants.INTERFACE_KEY] del router['gw_port'] ri.process(agent) - self.assertEqual(self.send_arp.call_count, 1) + self.assertEqual(self.send_adv_notif.call_count, 1) distributed = ri.router.get('distributed', False) self.assertEqual(ri.process_floating_ip_addresses.called, distributed) @@ -1385,7 +1386,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self.assertEqual(len(mangle_rules_delta), 1) self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta, router) - self.assertEqual(self.send_arp.call_count, 1) + self.assertEqual(self.send_adv_notif.call_count, 1) def test_process_router_snat_enabled(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1412,7 +1413,7 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): self.assertEqual(len(mangle_rules_delta), 1) self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta, router) - self.assertEqual(self.send_arp.call_count, 1) + self.assertEqual(self.send_adv_notif.call_count, 1) def test_process_router_interface_added(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1426,8 +1427,8 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): # Reassign the router object to RouterInfo ri.router = router ri.process(agent) - # send_arp is called both times process is called - self.assertEqual(self.send_arp.call_count, 2) + # send_ip_addr_adv_notif is called both times process is called + self.assertEqual(self.send_adv_notif.call_count, 2) def _test_process_ipv6_only_or_dual_stack_gw(self, dual_stack=False): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) @@ -1617,8 +1618,8 @@ class TestBasicRouterOperations(BasicRouterOperationsFramework): # Reassign the router object to RouterInfo ri.router = router ri.process(agent) - # send_arp is called both times process is called - self.assertEqual(self.send_arp.call_count, 2) + # send_ip_addr_adv_notif is called both times process is called + self.assertEqual(self.send_adv_notif.call_count, 2) def test_process_router_ipv6_interface_removed(self): agent = l3_agent.L3NATAgent(HOSTNAME, self.conf) diff --git a/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py b/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py index 47b8c45e2d0..b6ee852ad83 100644 --- a/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py +++ b/neutron/tests/unit/agent/l3/test_dvr_fip_ns.py @@ -67,9 +67,10 @@ class TestDvrFipNs(base.BaseTestCase): @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch.object(ip_lib, 'IPDevice') - @mock.patch.object(ip_lib, 'send_gratuitous_arp') + @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') @mock.patch.object(ip_lib, 'device_exists') - def test_gateway_added(self, device_exists, send_arp, IPDevice, IPWrapper): + def test_gateway_added(self, device_exists, send_adv_notif, + IPDevice, IPWrapper): subnet_id = _uuid() agent_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30', 'prefixlen': 24, @@ -86,10 +87,10 @@ class TestDvrFipNs(base.BaseTestCase): mock.sentinel.interface_name) self.assertEqual(self.driver.plug.call_count, 1) self.assertEqual(self.driver.init_l3.call_count, 1) - send_arp.assert_called_once_with(self.fip_ns.get_name(), - mock.sentinel.interface_name, - '20.0.0.30', - mock.ANY) + send_adv_notif.assert_called_once_with(self.fip_ns.get_name(), + mock.sentinel.interface_name, + '20.0.0.30', + mock.ANY) @mock.patch.object(ip_lib, 'IPWrapper') def test_destroy(self, IPWrapper): diff --git a/neutron/tests/unit/agent/l3/test_dvr_router.py b/neutron/tests/unit/agent/l3/test_dvr_router.py index fbbf08c43a6..8b68478c1be 100644 --- a/neutron/tests/unit/agent/l3/test_dvr_router.py +++ b/neutron/tests/unit/agent/l3/test_dvr_router.py @@ -56,10 +56,10 @@ class TestDvrRouterOperations(base.BaseTestCase): self.assertEqual([{'host': mock.sentinel.myhost}], fips) - @mock.patch.object(ip_lib, 'send_gratuitous_arp') + @mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') @mock.patch.object(ip_lib, 'IPDevice') @mock.patch.object(ip_lib, 'IPRule') - def test_floating_ip_added_dist(self, mIPRule, mIPDevice, mock_arp): + def test_floating_ip_added_dist(self, mIPRule, mIPDevice, mock_adv_notif): router = mock.MagicMock() ri = self._create_router(router) ext_net_id = _uuid() diff --git a/neutron/tests/unit/agent/l3/test_legacy_router.py b/neutron/tests/unit/agent/l3/test_legacy_router.py index 2bf4f303515..b34b3cc540a 100644 --- a/neutron/tests/unit/agent/l3/test_legacy_router.py +++ b/neutron/tests/unit/agent/l3/test_legacy_router.py @@ -49,28 +49,27 @@ class TestBasicRouterOperations(BasicRouterTestCaseFramework): device.delete_addr_and_conntrack_state.assert_called_once_with(cidr) -@mock.patch.object(ip_lib, 'send_gratuitous_arp') +@mock.patch.object(ip_lib, 'send_ip_addr_adv_notif') class TestAddFloatingIpWithMockGarp(BasicRouterTestCaseFramework): - def test_add_floating_ip(self, send_gratuitous_arp): + def test_add_floating_ip(self, send_ip_addr_adv_notif): ri = self._create_router() ri._add_fip_addr_to_device = mock.Mock(return_value=True) - self.agent_conf.send_arp_for_ha = mock.sentinel.arp_count ip = '15.1.2.3' result = ri.add_floating_ip({'floating_ip_address': ip}, mock.sentinel.interface_name, mock.sentinel.device) - ip_lib.send_gratuitous_arp.assert_called_once_with( + ip_lib.send_ip_addr_adv_notif.assert_called_once_with( ri.ns_name, mock.sentinel.interface_name, ip, - mock.sentinel.arp_count) + self.agent_conf) self.assertEqual(l3_constants.FLOATINGIP_STATUS_ACTIVE, result) - def test_add_floating_ip_error(self, send_gratuitous_arp): + def test_add_floating_ip_error(self, send_ip_addr_adv_notif): ri = self._create_router() ri._add_fip_addr_to_device = mock.Mock(return_value=False) result = ri.add_floating_ip({'floating_ip_address': '15.1.2.3'}, mock.sentinel.interface_name, mock.sentinel.device) - self.assertFalse(ip_lib.send_gratuitous_arp.called) + self.assertFalse(ip_lib.send_ip_addr_adv_notif.called) self.assertEqual(l3_constants.FLOATINGIP_STATUS_ERROR, result) diff --git a/neutron/tests/unit/agent/linux/test_ip_lib.py b/neutron/tests/unit/agent/linux/test_ip_lib.py index 01ddf39997b..51ac34cfe95 100644 --- a/neutron/tests/unit/agent/linux/test_ip_lib.py +++ b/neutron/tests/unit/agent/linux/test_ip_lib.py @@ -1013,13 +1013,18 @@ class TestIpNeighCommand(TestIPCmdBase): class TestArpPing(TestIPCmdBase): - def _test_arping(self, function, address, spawn_n, mIPWrapper): + @mock.patch.object(ip_lib, 'IPWrapper') + @mock.patch('eventlet.spawn_n') + def test_send_ipv4_addr_adv_notif(self, spawn_n, mIPWrapper): spawn_n.side_effect = lambda f: f() ARPING_COUNT = 3 - function(mock.sentinel.ns_name, - mock.sentinel.iface_name, - address, - ARPING_COUNT) + address = '20.0.0.1' + config = mock.Mock() + config.send_arp_for_ha = ARPING_COUNT + ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name, + mock.sentinel.iface_name, + address, + config) self.assertTrue(spawn_n.called) mIPWrapper.assert_called_once_with(namespace=mock.sentinel.ns_name) @@ -1035,11 +1040,16 @@ class TestArpPing(TestIPCmdBase): ip_wrapper.netns.execute.assert_any_call(arping_cmd, check_exit_code=True) - @mock.patch.object(ip_lib, 'IPWrapper') @mock.patch('eventlet.spawn_n') - def test_send_gratuitous_arp(self, spawn_n, mIPWrapper): - self._test_arping( - ip_lib.send_gratuitous_arp, '20.0.0.1', spawn_n, mIPWrapper) + def test_no_ipv6_addr_notif(self, spawn_n): + ipv6_addr = 'fd00::1' + config = mock.Mock() + config.send_arp_for_ha = 3 + ip_lib.send_ip_addr_adv_notif(mock.sentinel.ns_name, + mock.sentinel.iface_name, + ipv6_addr, + config) + self.assertFalse(spawn_n.called) class TestAddNamespaceToCmd(base.BaseTestCase): From 7e0222409dab6223579efea34ba0d3ccf93e11d3 Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Thu, 11 Jun 2015 17:23:41 +0300 Subject: [PATCH 178/292] Refactor _update_subnet_allocation_pools Moved _update_subnet_allocation_pools to ipam_backend_mixin.py. Call _rebuild_availability_ranges with self to make it overridable on upper level (from non-pluggable backend). Partially-Implements: blueprint neutron-ipam Change-Id: If7b1e720f88a2f0177b6772a015ae216f19ee22d --- neutron/db/db_base_plugin_v2.py | 15 --------------- neutron/db/ipam_backend_mixin.py | 24 ++++++++++++++++++++++++ 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 9147fd88996..8dcd27554a4 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -781,21 +781,6 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, LOG.debug("Port %s was deleted while updating it with an " "IPv6 auto-address. Ignoring.", port['id']) - def _update_subnet_allocation_pools(self, context, id, s): - context.session.query(models_v2.IPAllocationPool).filter_by( - subnet_id=id).delete() - new_pools = [models_v2.IPAllocationPool( - first_ip=p['start'], last_ip=p['end'], - subnet_id=id) for p in s['allocation_pools']] - context.session.add_all(new_pools) - NeutronDbPluginV2._rebuild_availability_ranges(context, [s]) - #Gather new pools for result: - result_pools = [{'start': pool['start'], - 'end': pool['end']} - for pool in s['allocation_pools']] - del s['allocation_pools'] - return result_pools - def update_subnet(self, context, id, subnet): """Update the subnet with new info. diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index e38b49549be..74853bd619e 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -37,6 +37,12 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): # Tracks changes in ip allocation for port using namedtuple Changes = collections.namedtuple('Changes', 'add original remove') + @staticmethod + def _rebuild_availability_ranges(context, subnets): + """Should be redefined for non-ipam backend only + """ + pass + def _update_db_port(self, context, db_port, new_port, network_id, new_mac): # Remove all attributes in new_port which are not in the port DB model # and then update the port @@ -99,6 +105,24 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): del s["dns_nameservers"] return new_dns + def _update_subnet_allocation_pools(self, context, id, s): + context.session.query(models_v2.IPAllocationPool).filter_by( + subnet_id=id).delete() + new_pools = [models_v2.IPAllocationPool(first_ip=p['start'], + last_ip=p['end'], + subnet_id=id) + for p in s['allocation_pools']] + context.session.add_all(new_pools) + # Call static method with self to redefine in child + # (non-pluggable backend) + self._rebuild_availability_ranges(context, [s]) + # Gather new pools for result: + result_pools = [{'start': pool['start'], + 'end': pool['end']} + for pool in s['allocation_pools']] + del s['allocation_pools'] + return result_pools + def _validate_allocation_pools(self, ip_pools, subnet_cidr): """Validate IP allocation pools. From 89a83bf199e7ea75c04f3205ff77987feed13184 Mon Sep 17 00:00:00 2001 From: rossella Date: Thu, 11 Jun 2015 10:43:36 +0200 Subject: [PATCH 179/292] OVSNeutronAgent pass the config as parameter Instead of using the global cfg.CONF, pass the config as parameter. This is very useful to test the agent without having to override the global config. Change-Id: I45534d79e044da9f2be4d596a58310fb28b7bf22 --- .../openvswitch/agent/ovs_neutron_agent.py | 54 ++++++++++--------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py index 36e8851fdf5..8e5a5c18ced 100644 --- a/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py +++ b/neutron/plugins/openvswitch/agent/ovs_neutron_agent.py @@ -134,7 +134,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, arp_responder=False, prevent_arp_spoofing=True, use_veth_interconnection=False, - quitting_rpc_timeout=None): + quitting_rpc_timeout=None, + conf=None): '''Constructor. :param bridge_classes: a dict for bridge classes. @@ -163,6 +164,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, interconnect the integration bridge to physical bridges. :param quitting_rpc_timeout: timeout in seconds for rpc calls after SIGTERM is received + :param conf: an instance of ConfigOpts ''' super(OVSNeutronAgent, self).__init__() self.br_int_cls = bridge_classes['br_int'] @@ -180,9 +182,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.enable_distributed_routing = enable_distributed_routing self.arp_responder_enabled = arp_responder and self.l2_pop self.prevent_arp_spoofing = prevent_arp_spoofing + self.conf = conf or cfg.CONF + self.agent_state = { 'binary': 'neutron-openvswitch-agent', - 'host': cfg.CONF.host, + 'host': self.conf.host, 'topic': q_const.L2_AGENT_TOPIC, 'configurations': {'bridge_mappings': bridge_mappings, 'tunnel_types': self.tunnel_types, @@ -226,8 +230,8 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval self.local_ip = local_ip self.tunnel_count = 0 - self.vxlan_udp_port = cfg.CONF.AGENT.vxlan_udp_port - self.dont_fragment = cfg.CONF.AGENT.dont_fragment + self.vxlan_udp_port = self.conf.AGENT.vxlan_udp_port + self.dont_fragment = self.conf.AGENT.dont_fragment self.tun_br = None self.patch_int_ofport = constants.OFPORT_INVALID self.patch_tun_ofport = constants.OFPORT_INVALID @@ -247,11 +251,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.phys_ofports, self.patch_int_ofport, self.patch_tun_ofport, - cfg.CONF.host, + self.conf.host, self.enable_tunneling, self.enable_distributed_routing) - report_interval = cfg.CONF.AGENT.report_interval + report_interval = self.conf.AGENT.report_interval if report_interval: heartbeat = loopingcall.FixedIntervalLoopingCall( self._report_state) @@ -317,7 +321,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, local_vlan) def setup_rpc(self): - self.agent_id = 'ovs-agent-%s' % cfg.CONF.host + self.agent_id = 'ovs-agent-%s' % self.conf.host self.topic = topics.AGENT self.plugin_rpc = OVSPluginApi(topics.PLUGIN) self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) @@ -338,7 +342,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, [topics.DVR, topics.UPDATE]] if self.l2_pop: consumers.append([topics.L2POPULATION, - topics.UPDATE, cfg.CONF.host]) + topics.UPDATE, self.conf.host]) self.connection = agent_rpc.create_consumers(self.endpoints, self.topic, consumers, @@ -757,11 +761,11 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, if port_detail.get('admin_state_up'): LOG.debug("Setting status for %s to UP", device) self.plugin_rpc.update_device_up( - self.context, device, self.agent_id, cfg.CONF.host) + self.context, device, self.agent_id, self.conf.host) else: LOG.debug("Setting status for %s to DOWN", device) self.plugin_rpc.update_device_down( - self.context, device, self.agent_id, cfg.CONF.host) + self.context, device, self.agent_id, self.conf.host) LOG.info(_LI("Configuration for device %s completed."), device) @staticmethod @@ -835,9 +839,9 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, # which does nothing if bridge already exists. self.int_br.create() self.int_br.set_secure_mode() - self.int_br.setup_controllers(cfg.CONF) + self.int_br.setup_controllers(self.conf) - self.int_br.delete_port(cfg.CONF.OVS.int_peer_patch_port) + self.int_br.delete_port(self.conf.OVS.int_peer_patch_port) self.int_br.setup_default_table() @@ -879,11 +883,13 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.tun_br = self.br_tun_cls(tun_br_name) self.tun_br.reset_bridge(secure_mode=True) - self.tun_br.setup_controllers(cfg.CONF) + self.tun_br.setup_controllers(self.conf) self.patch_tun_ofport = self.int_br.add_patch_port( - cfg.CONF.OVS.int_peer_patch_port, cfg.CONF.OVS.tun_peer_patch_port) + self.conf.OVS.int_peer_patch_port, + self.conf.OVS.tun_peer_patch_port) self.patch_int_ofport = self.tun_br.add_patch_port( - cfg.CONF.OVS.tun_peer_patch_port, cfg.CONF.OVS.int_peer_patch_port) + self.conf.OVS.tun_peer_patch_port, + self.conf.OVS.int_peer_patch_port) if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport, self.patch_int_ofport): LOG.error(_LE("Failed to create OVS patch port. Cannot have " @@ -952,7 +958,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, 'bridge': bridge}) sys.exit(1) br = self.br_phys_cls(bridge) - br.setup_controllers(cfg.CONF) + br.setup_controllers(self.conf) br.setup_default_table() self.phys_brs[physical_network] = br @@ -1192,7 +1198,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.context, devices, self.agent_id, - cfg.CONF.host) + self.conf.host) except Exception as e: raise DeviceListRetrievalError(devices=devices, error=e) for details in devices_details_list: @@ -1236,7 +1242,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.context, devices, self.agent_id, - cfg.CONF.host) + self.conf.host) except Exception as e: raise DeviceListRetrievalError(devices=devices, error=e) @@ -1248,7 +1254,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.plugin_rpc.update_device_up(self.context, device, self.agent_id, - cfg.CONF.host) + self.conf.host) def treat_devices_removed(self, devices): resync = False @@ -1259,7 +1265,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, self.plugin_rpc.update_device_down(self.context, device, self.agent_id, - cfg.CONF.host) + self.conf.host) except Exception as e: LOG.debug("port_removed failed for %(device)s: %(e)s", {'device': device, 'e': e}) @@ -1276,7 +1282,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, details = self.plugin_rpc.update_device_down(self.context, device, self.agent_id, - cfg.CONF.host) + self.conf.host) except Exception as e: LOG.debug("port_removed failed for %(device)s: %(e)s", {'device': device, 'e': e}) @@ -1394,7 +1400,7 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, details = self.plugin_rpc.tunnel_sync(self.context, self.local_ip, tunnel_type, - cfg.CONF.host) + self.conf.host) if not self.l2_pop: tunnels = details['tunnels'] for tunnel in tunnels: @@ -1608,10 +1614,10 @@ class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, def _handle_sighup(self, signum, frame): LOG.info(_LI("Agent caught SIGHUP, resetting.")) - cfg.CONF.reload_config_files() + self.conf.reload_config_files() config.setup_logging() LOG.debug('Full set of CONF:') - cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) + self.conf.log_opt_values(LOG, std_logging.DEBUG) def set_rpc_timeout(self, timeout): for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc, From ba2c44ef000221f8a18274a9569838d8c26014c0 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Fri, 12 Jun 2015 08:58:05 -0700 Subject: [PATCH 180/292] Revert "Revert "Set default of api_workers to number of CPUs"" This reverts commit 12a564cf03e612dda36df26df8d28dfc75f1af6e. We should re-enable this feature on a controlled basis so that we can flush out any outstanding issue we may have. Related-bug: #1432189 Change-Id: I2cfd93fdb032b461022b729347390ff8636ccdeb --- etc/neutron.conf | 8 +++--- neutron/service.py | 15 +++++++++--- neutron/tests/functional/test_service.py | 31 ++++++++++++++++++++++++ 3 files changed, 48 insertions(+), 6 deletions(-) create mode 100644 neutron/tests/functional/test_service.py diff --git a/etc/neutron.conf b/etc/neutron.conf index 7a11b939fb5..f5a6da62767 100755 --- a/etc/neutron.conf +++ b/etc/neutron.conf @@ -287,10 +287,12 @@ # ========== end of items for VLAN trunking networks ========== # =========== WSGI parameters related to the API server ============== -# Number of separate worker processes to spawn. The default, 0, runs the +# Number of separate worker processes to spawn. A value of 0 runs the # worker thread in the current process. Greater than 0 launches that number of -# child processes as workers. The parent process manages them. -# api_workers = 0 +# child processes as workers. The parent process manages them. If not +# specified, the default value is equal to the number of CPUs available to +# achieve best performance. +# api_workers = # Number of separate RPC worker processes to spawn. The default, 0, runs the # worker thread in the current process. Greater than 0 launches that number of diff --git a/neutron/service.py b/neutron/service.py index e27dd5cdc2f..76b0fd90d2e 100644 --- a/neutron/service.py +++ b/neutron/service.py @@ -18,6 +18,7 @@ import logging as std_logging import os import random +from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log as logging from oslo_messaging import server as rpc_server @@ -40,8 +41,9 @@ service_opts = [ default=40, help=_('Seconds between running periodic tasks')), cfg.IntOpt('api_workers', - default=0, - help=_('Number of separate API worker processes for service')), + help=_('Number of separate API worker processes for service. ' + 'If not specified, the default is equal to the number ' + 'of CPUs available for best performance.')), cfg.IntOpt('rpc_workers', default=0, help=_('Number of RPC worker processes for service')), @@ -168,6 +170,13 @@ def serve_rpc(): 'details.')) +def _get_api_workers(): + workers = cfg.CONF.api_workers + if workers is None: + workers = processutils.get_worker_count() + return workers + + def _run_wsgi(app_name): app = config.load_paste_app(app_name) if not app: @@ -175,7 +184,7 @@ def _run_wsgi(app_name): return server = wsgi.Server("Neutron") server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host, - workers=cfg.CONF.api_workers) + workers=_get_api_workers()) # Dump all option values here after all options are parsed cfg.CONF.log_opt_values(LOG, std_logging.DEBUG) LOG.info(_LI("Neutron service started, listening on %(host)s:%(port)s"), diff --git a/neutron/tests/functional/test_service.py b/neutron/tests/functional/test_service.py new file mode 100644 index 00000000000..d3bed3ee048 --- /dev/null +++ b/neutron/tests/functional/test_service.py @@ -0,0 +1,31 @@ +# Copyright 2014 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_concurrency import processutils +from oslo_config import cfg + +from neutron import service +from neutron.tests import base + + +class TestService(base.BaseTestCase): + + def test_api_workers_default(self): + self.assertEqual(processutils.get_worker_count(), + service._get_api_workers()) + + def test_api_workers_from_config(self): + cfg.CONF.set_override('api_workers', 1234) + self.assertEqual(1234, + service._get_api_workers()) From 901e6ae6fb05d65ccfc4a6602de4160c3a34031e Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Sat, 13 Jun 2015 02:14:17 +0900 Subject: [PATCH 181/292] Remove meaningless no_delete from L3 test no_delete parameter was removed in Kilo and it no longer has any effect. Change-Id: Idf0f3ac24b3978392222efbf465cc9e6cfd5d346 --- neutron/tests/unit/extensions/test_l3.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neutron/tests/unit/extensions/test_l3.py b/neutron/tests/unit/extensions/test_l3.py index fd6fff85bb4..52503d7d8de 100644 --- a/neutron/tests/unit/extensions/test_l3.py +++ b/neutron/tests/unit/extensions/test_l3.py @@ -2213,8 +2213,8 @@ class L3NatTestCaseBase(L3NatTestCaseMixin): self._set_net_external(network_ex_id2) r2i_fixed_ips = [{'ip_address': '12.0.0.2'}] - with self.router(no_delete=True) as r1,\ - self.router(no_delete=True) as r2,\ + with self.router() as r1,\ + self.router() as r2,\ self.port(subnet=ins1, fixed_ips=r2i_fixed_ips) as r2i_port: self._add_external_gateway_to_router( From a5bf502fab57453a1aedd3a53ce89eaf464e1cd9 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Fri, 12 Jun 2015 21:11:02 +0200 Subject: [PATCH 182/292] Remove duplicate tunnel id check in sync_allocations Currently, gre/vxlan sync_allocations and _parse_tunnel_ranges both check tunnel id values. This change removes the check in gre/vxlan sync_allocations as they duplicate _parse_tunnel_ranges check and is less fine. Change-Id: I5827468aeaec5d6c79d469132b129aeb7da171e2 --- neutron/plugins/ml2/drivers/type_gre.py | 7 +------ neutron/plugins/ml2/drivers/type_vxlan.py | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/neutron/plugins/ml2/drivers/type_gre.py b/neutron/plugins/ml2/drivers/type_gre.py index 18d7040f79a..5db7074c73c 100644 --- a/neutron/plugins/ml2/drivers/type_gre.py +++ b/neutron/plugins/ml2/drivers/type_gre.py @@ -89,12 +89,7 @@ class GreTypeDriver(type_tunnel.EndpointTunnelTypeDriver): gre_ids = set() for gre_id_range in self.tunnel_ranges: tun_min, tun_max = gre_id_range - if tun_max + 1 - tun_min > 1000000: - LOG.error(_LE("Skipping unreasonable gre ID range " - "%(tun_min)s:%(tun_max)s"), - {'tun_min': tun_min, 'tun_max': tun_max}) - else: - gre_ids |= set(moves.range(tun_min, tun_max + 1)) + gre_ids |= set(moves.range(tun_min, tun_max + 1)) session = db_api.get_session() try: diff --git a/neutron/plugins/ml2/drivers/type_vxlan.py b/neutron/plugins/ml2/drivers/type_vxlan.py index b8cdb003c33..52e5f7eaee7 100644 --- a/neutron/plugins/ml2/drivers/type_vxlan.py +++ b/neutron/plugins/ml2/drivers/type_vxlan.py @@ -91,12 +91,7 @@ class VxlanTypeDriver(type_tunnel.EndpointTunnelTypeDriver): # determine current configured allocatable vnis vxlan_vnis = set() for tun_min, tun_max in self.tunnel_ranges: - if tun_max + 1 - tun_min > p_const.MAX_VXLAN_VNI: - LOG.error(_LE("Skipping unreasonable VXLAN VNI range " - "%(tun_min)s:%(tun_max)s"), - {'tun_min': tun_min, 'tun_max': tun_max}) - else: - vxlan_vnis |= set(moves.range(tun_min, tun_max + 1)) + vxlan_vnis |= set(moves.range(tun_min, tun_max + 1)) session = db_api.get_session() with session.begin(subtransactions=True): From 9952abaab182f3ec701aad2397d6f3fcc0bacc7f Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Mon, 8 Jun 2015 14:15:30 +0300 Subject: [PATCH 183/292] Decompose db_base_plugin_v2.py with changes This commit is a preparation step for using pluggable IPAM. 1. Moved get_subnets functionality to db_base_plugin_common to make it accessible by ipam backends. 2. Reworked update_subnet routine: - moved db part into update_db_subnet; Partially-Implements: blueprint neutron-ipam Change-Id: Idb8f54d9fccaad1137222d156590c37d86aa576b --- neutron/db/db_base_plugin_common.py | 12 ++++++ neutron/db/db_base_plugin_v2.py | 37 ++----------------- neutron/db/ipam_backend_mixin.py | 26 +++++++++++-- neutron/db/ipam_non_pluggable_backend.py | 4 +- .../tests/unit/db/test_db_base_plugin_v2.py | 2 +- 5 files changed, 42 insertions(+), 39 deletions(-) diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py index d6a136c1db3..c69c543e9f8 100644 --- a/neutron/db/db_base_plugin_common.py +++ b/neutron/db/db_base_plugin_common.py @@ -203,6 +203,18 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): # a lot of stress on the db. Consider adding a cache layer return context.session.query(models_v2.Subnet).all() + def _get_subnets(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + marker_obj = self._get_marker_obj(context, 'subnet', limit, marker) + return self._get_collection(context, models_v2.Subnet, + self._make_subnet_dict, + filters=filters, fields=fields, + sorts=sorts, + limit=limit, + marker_obj=marker_obj, + page_reverse=page_reverse) + def _make_network_dict(self, network, fields=None, process_extensions=True): res = {'id': network['id'], diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 8dcd27554a4..0ff7e653d29 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -788,9 +788,6 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, dns lease or we support gratuitous DHCP offers """ s = subnet['subnet'] - changed_host_routes = False - changed_dns = False - changed_allocation_pools = False db_subnet = self._get_subnet(context, id) # Fill 'ip_version' and 'allocation_pools' fields with the current # value since _validate_subnet() expects subnet spec has 'ip_version' @@ -806,30 +803,10 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, self._validate_gw_out_of_pools(s["gateway_ip"], allocation_pools) with context.session.begin(subtransactions=True): - if "dns_nameservers" in s: - changed_dns = True - new_dns = self._update_subnet_dns_nameservers(context, id, s) - - if "host_routes" in s: - changed_host_routes = True - new_routes = self._update_subnet_host_routes(context, id, s) - - if "allocation_pools" in s: - self._validate_allocation_pools(s['allocation_pools'], - s['cidr']) - changed_allocation_pools = True - new_pools = self._update_subnet_allocation_pools(context, - id, s) - subnet = self._get_subnet(context, id) - subnet.update(s) + subnet, changes = self._update_db_subnet(context, id, s) result = self._make_subnet_dict(subnet) # Keep up with fields that changed - if changed_dns: - result['dns_nameservers'] = new_dns - if changed_host_routes: - result['host_routes'] = new_routes - if changed_allocation_pools: - result['allocation_pools'] = new_pools + result.update(changes) return result def _subnet_check_ip_allocations(self, context, subnet_id): @@ -912,14 +889,8 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, def get_subnets(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): - marker_obj = self._get_marker_obj(context, 'subnet', limit, marker) - return self._get_collection(context, models_v2.Subnet, - self._make_subnet_dict, - filters=filters, fields=fields, - sorts=sorts, - limit=limit, - marker_obj=marker_obj, - page_reverse=page_reverse) + return self._get_subnets(context, filters, fields, sorts, limit, + marker, page_reverse) def get_subnets_count(self, context, filters=None): return self._get_collection_count(context, models_v2.Subnet, diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index 74853bd619e..f7b231d12cd 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -105,12 +105,12 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): del s["dns_nameservers"] return new_dns - def _update_subnet_allocation_pools(self, context, id, s): + def _update_subnet_allocation_pools(self, context, subnet_id, s): context.session.query(models_v2.IPAllocationPool).filter_by( - subnet_id=id).delete() + subnet_id=subnet_id).delete() new_pools = [models_v2.IPAllocationPool(first_ip=p['start'], last_ip=p['end'], - subnet_id=id) + subnet_id=subnet_id) for p in s['allocation_pools']] context.session.add_all(new_pools) # Call static method with self to redefine in child @@ -123,6 +123,26 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): del s['allocation_pools'] return result_pools + def _update_db_subnet(self, context, subnet_id, s): + changes = {} + if "dns_nameservers" in s: + changes['dns_nameservers'] = ( + self._update_subnet_dns_nameservers(context, subnet_id, s)) + + if "host_routes" in s: + changes['host_routes'] = self._update_subnet_host_routes( + context, subnet_id, s) + + if "allocation_pools" in s: + self._validate_allocation_pools(s['allocation_pools'], + s['cidr']) + changes['allocation_pools'] = ( + self._update_subnet_allocation_pools(context, subnet_id, s)) + + subnet = self._get_subnet(context, subnet_id) + subnet.update(s) + return subnet, changes + def _validate_allocation_pools(self, ip_pools, subnet_cidr): """Validate IP allocation pools. diff --git a/neutron/db/ipam_non_pluggable_backend.py b/neutron/db/ipam_non_pluggable_backend.py index bb929975a6b..c1fb4bc9631 100644 --- a/neutron/db/ipam_non_pluggable_backend.py +++ b/neutron/db/ipam_non_pluggable_backend.py @@ -221,7 +221,7 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): raise n_exc.InvalidInput(error_message=msg) filter = {'network_id': [network_id]} - subnets = self.get_subnets(context, filters=filter) + subnets = self._get_subnets(context, filters=filter) for subnet in subnets: if ipam_utils.check_subnet_ip(subnet['cidr'], fixed['ip_address']): @@ -352,7 +352,7 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): ips = [] v6_stateless = [] net_id_filter = {'network_id': [p['network_id']]} - subnets = self.get_subnets(context, filters=net_id_filter) + subnets = self._get_subnets(context, filters=net_id_filter) is_router_port = ( p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS or p['device_owner'] == constants.DEVICE_OWNER_ROUTER_SNAT) diff --git a/neutron/tests/unit/db/test_db_base_plugin_v2.py b/neutron/tests/unit/db/test_db_base_plugin_v2.py index a73753ce182..ba99aa62487 100644 --- a/neutron/tests/unit/db/test_db_base_plugin_v2.py +++ b/neutron/tests/unit/db/test_db_base_plugin_v2.py @@ -5422,7 +5422,7 @@ class TestNeutronDbPluginV2(base.BaseTestCase): def _test__allocate_ips_for_port(self, subnets, port, expected): plugin = db_base_plugin_v2.NeutronDbPluginV2() with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, - 'get_subnets') as get_subnets: + '_get_subnets') as get_subnets: with mock.patch.object(db_base_plugin_v2.NeutronDbPluginV2, '_check_unique_ip') as check_unique: context = mock.Mock() From 1710f7c72f2c509d1009ee36ba4f66b298967fe9 Mon Sep 17 00:00:00 2001 From: Kobi Samoray Date: Thu, 4 Jun 2015 15:49:13 +0300 Subject: [PATCH 184/292] VMWare NSXv: Add distributed URL locking to ini NSXv plugin supports distributed locking using tooz library. This patch adds the required parameter to the ini file. DocImpact Depends-On: Icbcec938c1c5ae7a528350f2f283388b81fa66b7 Change-Id: I8a7c36d044c4be29b0dfa3fbb8e9379723cebd61 --- etc/neutron/plugins/vmware/nsx.ini | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/etc/neutron/plugins/vmware/nsx.ini b/etc/neutron/plugins/vmware/nsx.ini index 64c08f31cd5..2eef2254eca 100644 --- a/etc/neutron/plugins/vmware/nsx.ini +++ b/etc/neutron/plugins/vmware/nsx.ini @@ -150,6 +150,12 @@ # (Optional) Password to configure for Edge appliance login # edge_appliance_password = +# (Optional) URL for distributed locking coordination resource for lock manager +# This value is passed as a parameter to tooz coordinator. +# By default, value is None and oslo_concurrency is used for single-node +# lock management. +# locking_coordinator_url = + [nsx] # Maximum number of ports for each bridged logical switch # The recommended value for this parameter varies with NSX version From bb846c89ee120662eabdd4b0136fac82de076777 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Fri, 12 Jun 2015 21:26:37 -0400 Subject: [PATCH 185/292] Reflect project moves from stackforge to openstack. Several git repos were just moved from stackforge to openstack. Reflect the move in various places where the URL was in docs and comments. In passing, also change URLs to git.openstack.org instead of github, as that is the official home of all of these repos. Change-Id: I6c79a192d6604cef01e88d5b305fcc2b0f9c6b30 Co-Authored-By: Kyle Mestery Signed-off-by: Russell Bryant Signed-off-by: Kyle Mestery --- doc/source/devref/contribute.rst | 20 ++++---- doc/source/devref/security_group_api.rst | 14 +++--- doc/source/devref/sub_projects.rst | 50 +++++++++---------- .../plugins/ml2/drivers/opendaylight/README | 2 +- neutron/plugins/vmware/README | 4 +- .../plugins/vmware/extensions/networkgw.py | 2 +- 6 files changed, 46 insertions(+), 46 deletions(-) diff --git a/doc/source/devref/contribute.rst b/doc/source/devref/contribute.rst index 8f90e4d757d..b24366685f6 100644 --- a/doc/source/devref/contribute.rst +++ b/doc/source/devref/contribute.rst @@ -129,7 +129,7 @@ The testing process will be as follow: the vendor to choose what CI system they see fit to run them. There is no need or requirement to use OpenStack CI resources if they do not want to. Having said that, it may be useful to provide coverage for the shim layer in - the form of basic validation as done in `ODL `_ and `LBaaS A10 driver `_. + the form of basic validation as done in `ODL `_ and `LBaaS A10 driver `_. * 3rd Party CI will continue to validate vendor integration with Neutron via functional testing. 3rd Party CI is a communication mechanism. This objective @@ -227,7 +227,7 @@ library, and it leads to the greatest level of flexibility when dealing with Dev dev/test deployments. Having said that, most Neutron plugins developed in the past likely already have -integration with DevStack in the form of `neutron_plugins `_. +integration with DevStack in the form of `neutron_plugins `_. If the plugin is being decomposed in vendor integration plus vendor library, it would be necessary to adjust the instructions provided in the neutron_plugin file to pull the vendor library code as a new dependency. For instance, the instructions below: @@ -247,7 +247,7 @@ vendor library code as a new dependency. For instance, the instructions below: could be placed in 'neutron_plugin_configure_service', ahead of the service configuration. An alternative could be under the `third_party section -`_, +`_, if available. This solution can be similarly exploited for both monolithic plugins or ML2 mechanism drivers. The configuration of the plugin or driver itself can be done by leveraging the extensibility mechanisms provided by `local.conf `_. In fact, since the .ini file for the vendor plugin or driver lives @@ -270,9 +270,9 @@ is strongly encouraged to revise the existing DevStack integration, in order to in an extras.d hooks based approach. One final consideration is worth making for 3rd party CI setups: if `Devstack Gate -`_ is used, it does provide hook +`_ is used, it does provide hook functions that can be executed at specific times of the devstack-gate-wrap script run. -For example, the `Neutron Functional job `_ uses them. For more details see `devstack-vm-gate-wrap.sh `_. +For example, the `Neutron Functional job `_ uses them. For more details see `devstack-vm-gate-wrap.sh `_. Documentation Strategies ------------------------ @@ -291,12 +291,12 @@ The list of steps below are somewhat the tl;dr; version of what you can find on http://docs.openstack.org/infra/manual/creators.html. They are meant to be the bare minimum you have to complete in order to get you off the ground. -* Create a public repository: this can be a personal github.com repo or any - publicly available git repo, e.g. https://github.com/john-doe/foo.git. This +* Create a public repository: this can be a personal git.openstack.org repo or any + publicly available git repo, e.g. ``https://github.com/john-doe/foo.git``. This would be a temporary buffer to be used to feed the StackForge one. * Initialize the repository: if you are starting afresh, you may *optionally* want to use cookiecutter to get a skeleton project. You can learn how to use - cookiecutter on https://github.com/openstack-dev/cookiecutter. + cookiecutter on https://git.openstack.org/cgit/openstack-dev/cookiecutter. If you want to build the repository from an existing Neutron module, you may want to skip this step now, build the history first (next step), and come back here to initialize the remainder of the repository with other files being @@ -312,7 +312,7 @@ be the bare minimum you have to complete in order to get you off the ground. :: - git clone https://github.com/openstack/neutron.git + git clone https://git.openstack.org/openstack/neutron.git cd neutron ./tools/split.sh # Sit and wait for a while, or grab a cup of your favorite drink @@ -357,7 +357,7 @@ be the bare minimum you have to complete in order to get you off the ground. jobs that validate your patches when posted to Gerrit. For instance, one thing you would need to do is to define an entry point for your plugin or driver in your own setup.cfg similarly as to how it is done - `here `_. + `here `_. * Define an entry point for your plugin or driver in setup.cfg * Create 3rd Party CI account: if you do not already have one, follow instructions for diff --git a/doc/source/devref/security_group_api.rst b/doc/source/devref/security_group_api.rst index c1e87d3d0e2..750c744f362 100644 --- a/doc/source/devref/security_group_api.rst +++ b/doc/source/devref/security_group_api.rst @@ -10,7 +10,7 @@ API Extension The API extension is the 'front' end portion of the code, which handles defining a `REST-ful API`_, which is used by tenants. -.. _`REST-ful API`: https://github.com/openstack/neutron/blob/master/neutron/extensions/securitygroup.py +.. _`REST-ful API`: https://git.openstack.org/cgit/openstack/neutron/tree/neutron/extensions/securitygroup.py Database API @@ -18,7 +18,7 @@ Database API The Security Group API extension adds a number of `methods to the database layer`_ of Neutron -.. _`methods to the database layer`: https://github.com/openstack/neutron/blob/master/neutron/db/securitygroups_db.py +.. _`methods to the database layer`: https://git.openstack.org/cgit/openstack/neutron/tree/neutron/db/securitygroups_db.py Agent RPC --------- @@ -27,12 +27,12 @@ This portion of the code handles processing requests from tenants, after they ha running on the compute nodes, and modifying the IPTables rules on each hypervisor. -* `Plugin RPC classes `_ +* `Plugin RPC classes `_ - * `SecurityGroupServerRpcMixin `_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes + * `SecurityGroupServerRpcMixin `_ - defines the RPC API that the plugin uses to communicate with the agents running on the compute nodes * SecurityGroupServerRpcMixin - Defines the API methods used to fetch data from the database, in order to return responses to agents via the RPC API -* `Agent RPC classes `_ +* `Agent RPC classes `_ * The SecurityGroupServerRpcApi defines the API methods that can be called by agents, back to the plugin that runs on the Neutron controller * The SecurityGroupAgentRpcCallbackMixin defines methods that a plugin uses to call back to an agent after performing an action called by an agent. @@ -43,8 +43,8 @@ IPTables Driver * ``prepare_port_filter`` takes a ``port`` argument, which is a ``dictionary`` object that contains information about the port - including the ``security_group_rules`` -* ``prepare_port_filter`` `appends the port to an internal dictionary `_, ``filtered_ports`` which is used to track the internal state. +* ``prepare_port_filter`` `appends the port to an internal dictionary `_, ``filtered_ports`` which is used to track the internal state. * Each security group has a `chain `_ in Iptables. -* The ``IptablesFirewallDriver`` has a method to `convert security group rules into iptables statements `_ +* The ``IptablesFirewallDriver`` has a method to `convert security group rules into iptables statements `_ diff --git a/doc/source/devref/sub_projects.rst b/doc/source/devref/sub_projects.rst index 3a32d7a2614..768bc23f8e1 100644 --- a/doc/source/devref/sub_projects.rst +++ b/doc/source/devref/sub_projects.rst @@ -150,7 +150,7 @@ Functionality legend Arista ------ -* Git: https://github.com/stackforge/networking-arista +* Git: https://git.openstack.org/cgit/stackforge/networking-arista * Launchpad: https://launchpad.net/networking-arista * Pypi: https://pypi.python.org/pypi/networking-arista @@ -159,7 +159,7 @@ Arista BaGPipe ------- -* Git: https://github.com/stackforge/networking-bagpipe-l2 +* Git: https://git.openstack.org/cgit/stackforge/networking-bagpipe-l2 * Launchpad: https://launchpad.net/bagpipe-l2 * Pypi: https://pypi.python.org/pypi/bagpipe-l2 @@ -168,14 +168,14 @@ BaGPipe BGPVPN ------- -* Git: https://github.com/stackforge/networking-bgpvpn +* Git: https://git.openstack.org/cgit/openstack/networking-bgpvpn .. _networking-bigswitch: Big Switch Networks ------------------- -* Git: https://git.openstack.org/stackforge/networking-bigswitch +* Git: https://git.openstack.org/cgit/stackforge/networking-bigswitch * Pypi: https://pypi.python.org/pypi/bsnstacklib .. _networking-brocade: @@ -183,7 +183,7 @@ Big Switch Networks Brocade ------- -* Git: https://github.com/stackforge/networking-brocade +* Git: https://git.openstack.org/cgit/stackforge/networking-brocade * Launchpad: https://launchpad.net/networking-brocade * PyPI: https://pypi.python.org/pypi/networking-brocade @@ -192,7 +192,7 @@ Brocade Cisco ----- -* Git: https://github.com/stackforge/networking-cisco +* Git: https://git.openstack.org/cgit/stackforge/networking-cisco * Launchpad: https://launchpad.net/networking-cisco * PyPI: https://pypi.python.org/pypi/networking-cisco @@ -201,7 +201,7 @@ Cisco DragonFlow ---------- -* Git: https://github.com/stackforge/dragonflow +* Git: https://git.openstack.org/cgit/openstack/dragonflow * Launchpad: https://launchpad.net/dragonflow * PyPi: https://pypi.python.org/pypi/DragonFlow @@ -210,7 +210,7 @@ DragonFlow Edge VPN -------- -* Git: https://git.openstack.org/stackforge/networking-edge-vpn +* Git: https://git.openstack.org/cgit/stackforge/networking-edge-vpn * Launchpad: https://launchpad.net/edge-vpn .. _networking-hyperv: @@ -218,7 +218,7 @@ Edge VPN Hyper-V ------- -* Git: https://github.com/stackforge/networking-hyperv +* Git: https://git.openstack.org/cgit/stackforge/networking-hyperv * Launchpad: https://launchpad.net/networking-hyperv * PyPi: https://pypi.python.org/pypi/networking-hyperv @@ -227,7 +227,7 @@ Hyper-V Group Based Policy ------------------ -* Git: https://github.com/stackforge/group-based-policy +* Git: https://git.openstack.org/cgit/stackforge/group-based-policy * Launchpad: https://launchpad.net/group-based-policy * PyPi: https://pypi.python.org/pypi/group-based-policy @@ -236,7 +236,7 @@ Group Based Policy IBM SDNVE --------- -* Git: https://github.com/stackforge/networking-ibm +* Git: https://git.openstack.org/cgit/stackforge/networking-ibm * Launchpad: https://launchpad.net/networking-ibm .. _networking-l2gw: @@ -244,7 +244,7 @@ IBM SDNVE L2 Gateway ---------- -* Git: https://github.com/stackforge/networking-l2gw +* Git: https://git.openstack.org/cgit/openstack/networking-l2gw * Launchpad: https://launchpad.net/networking-l2gw .. _networking-metaplugin: @@ -259,7 +259,7 @@ Metaplugin MidoNet ------- -* Git: https://github.com/stackforge/networking-midonet +* Git: https://git.openstack.org/cgit/openstack/networking-midonet * Launchpad: https://launchpad.net/networking-midonet * PyPI: https://pypi.python.org/pypi/networking-midonet @@ -268,7 +268,7 @@ MidoNet Mellanox -------- -* Git: https://github.com/stackforge/networking-mlnx +* Git: https://git.openstack.org/cgit/stackforge/networking-mlnx * Launchpad: https://launchpad.net/networking-mlnx .. _networking-nec: @@ -276,7 +276,7 @@ Mellanox NEC --- -* Git: https://github.com/stackforge/networking-nec +* Git: https://git.openstack.org/cgit/stackforge/networking-nec * Launchpad: https://launchpad.net/networking-nec * PyPI: https://pypi.python.org/pypi/networking-nec @@ -292,7 +292,7 @@ Nuage OpenDayLight ------------ -* Git: https://github.com/stackforge/networking-odl +* Git: https://git.openstack.org/cgit/openstack/networking-odl * Launchpad: https://launchpad.net/networking-odl .. _networking-ofagent: @@ -300,7 +300,7 @@ OpenDayLight OpenFlow Agent (ofagent) ------------------------ -* Git: https://github.com/stackforge/networking-ofagent +* Git: https://git.openstack.org/cgit/openstack/networking-ofagent * Launchpad: https://launchpad.net/networking-ofagent * PyPI: https://pypi.python.org/pypi/networking-ofagent @@ -309,7 +309,7 @@ OpenFlow Agent (ofagent) Open Virtual Network -------------------- -* Git: https://github.com/stackforge/networking-ovn +* Git: https://git.openstack.org/cgit/openstack/networking-ovn * Launchpad: https://launchpad.net/networking-ovn * PyPI: https://pypi.python.org/pypi/networking-ovn @@ -318,7 +318,7 @@ Open Virtual Network Open DPDK --------- -* Git: https://github.com/stackforge/networking-ovs-dpdk +* Git: https://git.openstack.org/cgit/stackforge/networking-ovs-dpdk * Launchpad: https://launchpad.net/networking-ovs-dpdk .. _networking-plumgrid: @@ -326,7 +326,7 @@ Open DPDK PLUMgrid -------- -* Git: https://github.com/stackforge/networking-plumgrid +* Git: https://git.openstack.org/cgit/stackforge/networking-plumgrid * Launchpad: https://launchpad.net/networking-plumgrid * PyPI: https://pypi.python.org/pypi/networking-plumgrid @@ -335,7 +335,7 @@ PLUMgrid PowerVM ------- -* Git: https://github.com/stackforge/neutron-powervm +* Git: https://git.openstack.org/cgit/stackforge/neutron-powervm * Launchpad: https://launchpad.net/neutron-powervm * PyPI: https://pypi.python.org/pypi/neutron-powervm @@ -344,7 +344,7 @@ PowerVM PortForwarding -------------- -* Git: https://github.com/stackforge/networking-portforwarding +* Git: https://git.openstack.org/cgit/stackforge/networking-portforwarding * Launchpad: https://launchpad.net/networking-portforwarding .. _networking-vsphere: @@ -352,7 +352,7 @@ PortForwarding vSphere ------- -* Git: https://github.com/stackforge/networking-vsphere +* Git: https://git.openstack.org/cgit/stackforge/networking-vsphere * Launchpad: https://launchpad.net/networking-vsphere .. _vmware-nsx: @@ -360,7 +360,7 @@ vSphere VMware NSX ---------- -* Git: https://github.com/stackforge/vmware-nsx +* Git: https://git.openstack.org/cgit/openstack/vmware-nsx * Launchpad: https://launchpad.net/vmware-nsx * PyPI: https://pypi.python.org/pypi/vmware-nsx @@ -369,5 +369,5 @@ VMware NSX Octavia ------- -* Git: https://github.com/stackforge/octavia +* Git: https://git.openstack.org/cgit/openstack/octavia * Launchpad: https://launchpad.net/octavia diff --git a/neutron/plugins/ml2/drivers/opendaylight/README b/neutron/plugins/ml2/drivers/opendaylight/README index 1022db435c4..37e82d8d57d 100644 --- a/neutron/plugins/ml2/drivers/opendaylight/README +++ b/neutron/plugins/ml2/drivers/opendaylight/README @@ -9,7 +9,7 @@ contains more information on the capabilities OpenDaylight provides: The networking-odl project provides a thin layer sitting between this driver and OpenDaylight. The code can be downloaded from: - https://git.openstack.org/cgit/stackforge/networking-odl + https://git.openstack.org/cgit/openstack/networking-odl Theory of operation =================== diff --git a/neutron/plugins/vmware/README b/neutron/plugins/vmware/README index c636ee42cef..fac935450a2 100644 --- a/neutron/plugins/vmware/README +++ b/neutron/plugins/vmware/README @@ -8,7 +8,7 @@ Neutron plugins for VMware NSX family products https://wiki.openstack.org/wiki/Neutron/VMware_NSX_plugins * Full plugin code available at: - * http://git.openstack.org/cgit/stackforge/vmware-nsx - * https://github.com/stackforge/vmware-nsx + * http://git.openstack.org/cgit/openstack/vmware-nsx + * https://github.com/openstack/vmware-nsx * Pypi location: https://pypi.python.org/pypi/vmware-nsx diff --git a/neutron/plugins/vmware/extensions/networkgw.py b/neutron/plugins/vmware/extensions/networkgw.py index 1e9bc2ace6f..a9c5c2a86d3 100644 --- a/neutron/plugins/vmware/extensions/networkgw.py +++ b/neutron/plugins/vmware/extensions/networkgw.py @@ -31,7 +31,7 @@ IFACE_NAME_ATTR = 'interface_name' # TODO(salv-orlando): This type definition is duplicated into -# stackforge/vmware-nsx. This temporary duplication should be removed once the +# openstack/vmware-nsx. This temporary duplication should be removed once the # plugin decomposition is finished. # Allowed network types for the NSX Plugin class NetworkTypes(object): From 1318437a0caf38e695a819848832a955fef7d909 Mon Sep 17 00:00:00 2001 From: Eugene Nikanorov Date: Fri, 5 Jun 2015 01:46:22 +0400 Subject: [PATCH 186/292] Skip rescheduling networks if no DHCP agents available This eliminates the problem of unscheduled networks in case of communication failure between agents and servers which can occur if messaging queue service fails. Change-Id: Ied4fa301fc3d475bee25c47f3a01c2381ae9a01e Closes-Bug: #1461714 --- neutron/db/agentschedulers_db.py | 11 ++++++++++- .../unit/scheduler/test_dhcp_agent_scheduler.py | 14 ++++++++++++-- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/neutron/db/agentschedulers_db.py b/neutron/db/agentschedulers_db.py index 61eff9b07cb..b9d9c11dbe5 100644 --- a/neutron/db/agentschedulers_db.py +++ b/neutron/db/agentschedulers_db.py @@ -271,7 +271,16 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP) try: - for binding in self._filter_bindings(context, down_bindings): + dead_bindings = [b for b in + self._filter_bindings(context, down_bindings)] + dead_agents = set([b.dhcp_agent_id for b in dead_bindings]) + agents = self.get_agents_db( + context, {'agent_type': [constants.AGENT_TYPE_DHCP]}) + if len(agents) == len(dead_agents): + LOG.warn(_LW("No DHCP agents available, " + "skipping rescheduling")) + return + for binding in dead_bindings: LOG.warn(_LW("Removing network %(network)s from agent " "%(agent)s because the agent did not report " "to the server in the last %(dead_time)s " diff --git a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py index 5ee1adb16cd..260a5b01a8d 100644 --- a/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py +++ b/neutron/tests/unit/scheduler/test_dhcp_agent_scheduler.py @@ -21,6 +21,7 @@ import testscenarios from neutron.common import constants from neutron import context from neutron.db import agentschedulers_db as sched_db +from neutron.db import common_db_mixin from neutron.db import models_v2 from neutron.extensions import dhcpagentscheduler from neutron.scheduler import dhcp_agent_scheduler @@ -177,7 +178,8 @@ class TestAutoScheduleNetworks(TestDhcpSchedulerBaseTestCase): class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, - sched_db.DhcpAgentSchedulerDbMixin): + sched_db.DhcpAgentSchedulerDbMixin, + common_db_mixin.CommonDbMixin): def test_reschedule_network_from_down_agent(self): agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) @@ -201,7 +203,7 @@ class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, mock.ANY, self.network_id, agents[1].host) def _test_failed_rescheduling(self, rn_side_effect=None): - agents = self._create_and_set_agents_down(['host-a'], 1) + agents = self._create_and_set_agents_down(['host-a', 'host-b'], 1) self._test_schedule_bind_network([agents[0]], self.network_id) with mock.patch.object(self, 'remove_network_from_dhcp_agent', @@ -257,6 +259,14 @@ class TestNetworksFailover(TestDhcpSchedulerBaseTestCase, # just make sure that no exception is raised self.remove_networks_from_down_agents() + def test_reschedule_doesnt_occur_if_no_agents(self): + agents = self._create_and_set_agents_down(['host-a'], 1) + self._test_schedule_bind_network([agents[0]], self.network_id) + with mock.patch.object( + self, 'remove_network_from_dhcp_agent') as rn: + self.remove_networks_from_down_agents() + self.assertFalse(rn.called) + class DHCPAgentWeightSchedulerTestCase(TestDhcpSchedulerBaseTestCase): """Unit test scenarios for WeightScheduler.schedule.""" From 278a5fce29504c43d669feed210f7b3627616e22 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Wed, 10 Jun 2015 22:35:11 +0200 Subject: [PATCH 187/292] Use PyMySQL in MySQL related functional/fullstack tests mysql-python driver has been replaced by PyMySQL driver[1] in neutron code but MySQL related functional/fullstack tests try to use mysql-python driver because of MySQLOpportunisticTestCase[2] and tests are skipped because mysql-python driver is no more available. This change provides a backend implementation for mysql+pymysql, a base base testcase MySQLTestCase[2] using mysql+pymysql implementation (currently oslo.db provides none of them but will in the future) and replaces MySQLOpportunisticTestCase with MySQLTestCase. [1] I73e0fdb6eca70e7d029a40a2f6f17a7c0797a21d [2] neutron.tests.common.base Closes-Bug: #1463980 Change-Id: Ic5c1d12ab75443e1cc290a7447eeb4b452b4a9dd --- neutron/tests/common/base.py | 45 +++++++++++++++++++ neutron/tests/fullstack/base.py | 11 ++--- neutron/tests/functional/db/test_ipam.py | 3 +- .../tests/functional/db/test_migrations.py | 3 +- 4 files changed, 55 insertions(+), 7 deletions(-) diff --git a/neutron/tests/common/base.py b/neutron/tests/common/base.py index 11499e8050e..d1541a63901 100644 --- a/neutron/tests/common/base.py +++ b/neutron/tests/common/base.py @@ -14,6 +14,8 @@ import functools import unittest.case +from oslo_db.sqlalchemy import provision +from oslo_db.sqlalchemy import test_base import testtools.testcase from neutron.common import constants as n_const @@ -67,3 +69,46 @@ def no_skip_on_missing_deps(wrapped): 'is enabled, skip reason: %s' % (wrapped.__name__, e)) raise return wrapper + + +# NOTE(cbrandily): Define mysql+pymysql backend implementation +@provision.BackendImpl.impl.dispatch_for("mysql+pymysql") +class PyMySQLBackendImpl(provision.BackendImpl): + + default_engine_kwargs = {'mysql_sql_mode': 'TRADITIONAL'} + + def create_opportunistic_driver_url(self): + return "mysql+pymysql://openstack_citest:openstack_citest@localhost/" + + def create_named_database(self, engine, ident, conditional=False): + with engine.connect() as conn: + if not conditional or not self.database_exists(conn, ident): + conn.execute("CREATE DATABASE %s" % ident) + + def drop_named_database(self, engine, ident, conditional=False): + with engine.connect() as conn: + if not conditional or self.database_exists(conn, ident): + conn.execute("DROP DATABASE %s" % ident) + + def database_exists(self, engine, ident): + return bool(engine.scalar("SHOW DATABASES LIKE '%s'" % ident)) + + +impl = provision.BackendImpl.impl("mysql+pymysql") +url = impl.create_opportunistic_driver_url() +# NOTE(cbrandily): Declare mysql+pymysql backend implementation +provision.Backend("mysql+pymysql", url) + + +# NOTE(cbrandily): Define mysql+pymysql db fixture +class PyMySQLFixture(test_base.DbFixture): + DRIVER = 'mysql+pymysql' + + +# NOTE(cbrandily): Define mysql+pymysql base testcase +class MySQLTestCase(test_base.DbTestCase): + """Base test class for MySQL tests. + + Enforce the supported driver, which is PyMySQL. + """ + FIXTURE = PyMySQLFixture diff --git a/neutron/tests/fullstack/base.py b/neutron/tests/fullstack/base.py index c886bd5e790..87eb1880224 100644 --- a/neutron/tests/fullstack/base.py +++ b/neutron/tests/fullstack/base.py @@ -17,9 +17,10 @@ from oslo_db.sqlalchemy import test_base from neutron.db.migration.models import head # noqa from neutron.db import model_base +from neutron.tests.common import base -class BaseFullStackTestCase(test_base.MySQLOpportunisticTestCase): +class BaseFullStackTestCase(base.MySQLTestCase): """Base test class for full-stack tests.""" def __init__(self, environment, *args, **kwargs): @@ -42,10 +43,10 @@ class BaseFullStackTestCase(test_base.MySQLOpportunisticTestCase): def create_db_tables(self): """Populate the new database. - MySQLOpportunisticTestCase creates a new database for each test, but - these need to be populated with the appropriate tables. Before we can - do that, we must change the 'connection' option which the Neutron code - knows to look at. + MySQLTestCase creates a new database for each test, but these need to + be populated with the appropriate tables. Before we can do that, we + must change the 'connection' option which the Neutron code knows to + look at. Currently, the username and password options are hard-coded by oslo.db and neutron/tests/functional/contrib/gate_hook.sh. Also, diff --git a/neutron/tests/functional/db/test_ipam.py b/neutron/tests/functional/db/test_ipam.py index 3c3a9d163a4..c8cb98212a7 100644 --- a/neutron/tests/functional/db/test_ipam.py +++ b/neutron/tests/functional/db/test_ipam.py @@ -26,6 +26,7 @@ from neutron.db import db_base_plugin_v2 as base_plugin from neutron.db import model_base from neutron.db import models_v2 from neutron.tests import base +from neutron.tests.common import base as common_base def get_admin_test_context(db_url): @@ -205,7 +206,7 @@ class IpamTestCase(object): ip_avail_ranges_expected) -class TestIpamMySql(test_base.MySQLOpportunisticTestCase, base.BaseTestCase, +class TestIpamMySql(common_base.MySQLTestCase, base.BaseTestCase, IpamTestCase): def setUp(self): diff --git a/neutron/tests/functional/db/test_migrations.py b/neutron/tests/functional/db/test_migrations.py index a05bbb215e8..a7a9b386890 100644 --- a/neutron/tests/functional/db/test_migrations.py +++ b/neutron/tests/functional/db/test_migrations.py @@ -30,6 +30,7 @@ import sqlalchemy from neutron.db.migration.alembic_migrations import external from neutron.db.migration import cli as migration from neutron.db.migration.models import head as head_models +from neutron.tests.common import base LOG = logging.getLogger(__name__) @@ -208,7 +209,7 @@ class _TestModelsMigrations(test_migrations.ModelsMigrationsSync): class TestModelsMigrationsMysql(_TestModelsMigrations, - test_base.MySQLOpportunisticTestCase): + base.MySQLTestCase): pass From b370c69b75fe38cd285512f9516ce428e8a806dd Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Tue, 9 Jun 2015 16:00:58 +0000 Subject: [PATCH 188/292] Ensure no "db" related functional/fullstack tests are skipped in the gate Currently neutron uses MySQLTestCase[1] and PostgreSQLOpportunisticTestCase[2] for functional and fullstack tests using a real MySQL/PostgreSQL database. These classes skip tests when the db is unavailable (db not installed/configured, missing packages, etc.) which is fine when tests are runned by developers but not when runned by the gate jobs. This change updates MySQLTestCase[1] and defines PostgreSQLTestCase[1] as PostgreSQL oslo.db test class wrapper: when the db is unavailable, these classes ensure tests will: * fail in the gate (dsvm-functional, dsvm-fullstack jobs), * be skipped by default otherwise (functional, fullstack jobs). [1] neutron.tests.common.base [2] oslo_db.sqlalchemy.test_base Closes-Bug: #1404093 Change-Id: I77b12e728ce9a7b0222c3df081842635f6375a3e --- neutron/tests/common/base.py | 12 ++++++++++++ neutron/tests/functional/db/test_ipam.py | 3 +-- neutron/tests/functional/db/test_migrations.py | 2 +- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/neutron/tests/common/base.py b/neutron/tests/common/base.py index d1541a63901..8c89f9be76e 100644 --- a/neutron/tests/common/base.py +++ b/neutron/tests/common/base.py @@ -110,5 +110,17 @@ class MySQLTestCase(test_base.DbTestCase): """Base test class for MySQL tests. Enforce the supported driver, which is PyMySQL. + If the MySQL db is unavailable then this test is skipped, unless + OS_FAIL_ON_MISSING_DEPS is enabled. """ FIXTURE = PyMySQLFixture + SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS') + + +class PostgreSQLTestCase(test_base.PostgreSQLOpportunisticTestCase): + """Base test class for PostgreSQL tests. + + If the PostgreSQL db is unavailable then this test is skipped, unless + OS_FAIL_ON_MISSING_DEPS is enabled. + """ + SKIP_ON_UNAVAILABLE_DB = not base.bool_from_env('OS_FAIL_ON_MISSING_DEPS') diff --git a/neutron/tests/functional/db/test_ipam.py b/neutron/tests/functional/db/test_ipam.py index c8cb98212a7..02dcd20781f 100644 --- a/neutron/tests/functional/db/test_ipam.py +++ b/neutron/tests/functional/db/test_ipam.py @@ -15,7 +15,6 @@ from oslo_config import cfg from oslo_db.sqlalchemy import session -from oslo_db.sqlalchemy import test_base import testtools from neutron.api.v2 import attributes @@ -214,7 +213,7 @@ class TestIpamMySql(common_base.MySQLTestCase, base.BaseTestCase, self.configure_test() -class TestIpamPsql(test_base.PostgreSQLOpportunisticTestCase, +class TestIpamPsql(common_base.PostgreSQLTestCase, base.BaseTestCase, IpamTestCase): def setUp(self): diff --git a/neutron/tests/functional/db/test_migrations.py b/neutron/tests/functional/db/test_migrations.py index a7a9b386890..ad3fd859534 100644 --- a/neutron/tests/functional/db/test_migrations.py +++ b/neutron/tests/functional/db/test_migrations.py @@ -214,7 +214,7 @@ class TestModelsMigrationsMysql(_TestModelsMigrations, class TestModelsMigrationsPsql(_TestModelsMigrations, - test_base.PostgreSQLOpportunisticTestCase): + base.PostgreSQLTestCase): pass From ad1c7a35dec614a26de0a426950fa005df5f489d Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Wed, 10 Jun 2015 10:23:44 -0400 Subject: [PATCH 189/292] Remove get_namespace from API extensions Based on the conversation on the ML. http://lists.openstack.org/pipermail/openstack-dev/2015-June/066219.html APIImpact DocImpact Closes-Bug: #1464023 Depends-On: 6f900fc429bf24cb31e0d2f149aa732055fd5956 Change-Id: I3c406910991c33cf959c5345d76153eabe3ace2d --- neutron/api/extensions.py | 21 ------------------- neutron/extensions/agent.py | 4 ---- neutron/extensions/allowedaddresspairs.py | 4 ---- neutron/extensions/dhcpagentscheduler.py | 4 ---- neutron/extensions/dvr.py | 5 ----- neutron/extensions/external_net.py | 8 ------- neutron/extensions/extra_dhcp_opt.py | 4 ---- neutron/extensions/extraroute.py | 4 ---- neutron/extensions/flavor.py | 4 ---- neutron/extensions/l3.py | 4 ---- neutron/extensions/l3_ext_gw_mode.py | 4 ---- neutron/extensions/l3_ext_ha_mode.py | 4 ---- neutron/extensions/l3agentscheduler.py | 4 ---- neutron/extensions/metering.py | 4 ---- neutron/extensions/multiprovidernet.py | 4 ---- neutron/extensions/netmtu.py | 4 ---- neutron/extensions/portbindings.py | 4 ---- neutron/extensions/portsecurity.py | 4 ---- neutron/extensions/providernet.py | 4 ---- neutron/extensions/quotasv2.py | 4 ---- neutron/extensions/routerservicetype.py | 4 ---- neutron/extensions/securitygroup.py | 5 ----- neutron/extensions/servicetype.py | 4 ---- neutron/extensions/subnetallocation.py | 5 ----- neutron/extensions/vlantransparent.py | 4 ---- .../plugins/cisco/extensions/credential.py | 5 ----- neutron/plugins/cisco/extensions/n1kv.py | 4 ---- .../cisco/extensions/network_profile.py | 4 ---- .../cisco/extensions/policy_profile.py | 4 ---- neutron/plugins/cisco/extensions/qos.py | 5 ----- .../ml2/drivers/cisco/n1kv/extensions/n1kv.py | 4 ---- .../plugins/nec/extensions/packetfilter.py | 4 ---- .../plugins/nec/extensions/router_provider.py | 4 ---- .../extensions/advancedserviceproviders.py | 6 ------ neutron/plugins/vmware/extensions/lsn.py | 4 ---- .../plugins/vmware/extensions/maclearning.py | 4 ---- .../plugins/vmware/extensions/networkgw.py | 4 ---- neutron/plugins/vmware/extensions/nvp_qos.py | 4 ---- neutron/plugins/vmware/extensions/qos.py | 4 ---- .../plugins/vmware/extensions/routertype.py | 4 ---- .../plugins/vmware/extensions/vnicindex.py | 4 ---- neutron/tests/api/test_extensions.py | 1 - neutron/tests/unit/api/test_extensions.py | 6 +----- neutron/tests/unit/dummy_plugin.py | 4 ---- neutron/tests/unit/extension_stubs.py | 3 --- .../unit/extensions/extendedattribute.py | 4 ---- .../unit/extensions/extensionattribute.py | 4 ---- neutron/tests/unit/extensions/foxinsocks.py | 3 --- neutron/tests/unit/extensions/v2attributes.py | 3 --- .../plugins/ml2/extensions/fake_extension.py | 5 ----- 50 files changed, 1 insertion(+), 224 deletions(-) diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py index fa275bfe02f..396dc817cb7 100644 --- a/neutron/api/extensions.py +++ b/neutron/api/extensions.py @@ -91,13 +91,6 @@ class ExtensionDescriptor(object): """ raise NotImplementedError() - def get_namespace(self): - """The XML namespace for the extension. - - e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0' - """ - raise NotImplementedError() - def get_updated(self): """The timestamp when the extension was last updated. @@ -175,18 +168,6 @@ class ExtensionDescriptor(object): if extended_attrs: attrs.update(extended_attrs) - def get_alias_namespace_compatibility_map(self): - """Returns mappings between extension aliases and XML namespaces. - - The mappings are XML namespaces that should, for backward compatibility - reasons, be added to the XML serialization of extended attributes. - This allows an established extended attribute to be provided by - another extension than the original one while keeping its old alias - in the name. - :return: A dictionary of extension_aliases and namespace strings. - """ - return {} - class ActionExtensionController(wsgi.Controller): @@ -235,7 +216,6 @@ class ExtensionController(wsgi.Controller): ext_data['name'] = ext.get_name() ext_data['alias'] = ext.get_alias() ext_data['description'] = ext.get_description() - ext_data['namespace'] = ext.get_namespace() ext_data['updated'] = ext.get_updated() ext_data['links'] = [] # TODO(dprince): implement extension links return ext_data @@ -503,7 +483,6 @@ class ExtensionManager(object): LOG.debug('Ext name: %s', extension.get_name()) LOG.debug('Ext alias: %s', extension.get_alias()) LOG.debug('Ext description: %s', extension.get_description()) - LOG.debug('Ext namespace: %s', extension.get_namespace()) LOG.debug('Ext updated: %s', extension.get_updated()) except AttributeError as ex: LOG.exception(_LE("Exception loading extension: %s"), unicode(ex)) diff --git a/neutron/extensions/agent.py b/neutron/extensions/agent.py index 4dee27c6ed7..2f67a3a7b57 100644 --- a/neutron/extensions/agent.py +++ b/neutron/extensions/agent.py @@ -86,10 +86,6 @@ class Agent(object): def get_description(cls): return "The agent management extension." - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/agent/api/v2.0" - @classmethod def get_updated(cls): return "2013-02-03T10:00:00-00:00" diff --git a/neutron/extensions/allowedaddresspairs.py b/neutron/extensions/allowedaddresspairs.py index 2e618eef249..807548a61cf 100644 --- a/neutron/extensions/allowedaddresspairs.py +++ b/neutron/extensions/allowedaddresspairs.py @@ -117,10 +117,6 @@ class Allowedaddresspairs(object): def get_description(cls): return "Provides allowed address pairs" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/allowedaddresspairs/api/v2.0" - @classmethod def get_updated(cls): return "2013-07-23T10:00:00-00:00" diff --git a/neutron/extensions/dhcpagentscheduler.py b/neutron/extensions/dhcpagentscheduler.py index 42817cd1fb1..ebd8ff55d70 100644 --- a/neutron/extensions/dhcpagentscheduler.py +++ b/neutron/extensions/dhcpagentscheduler.py @@ -91,10 +91,6 @@ class Dhcpagentscheduler(extensions.ExtensionDescriptor): def get_description(cls): return "Schedule networks among dhcp agents" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/dhcp_agent_scheduler/api/v1.0" - @classmethod def get_updated(cls): return "2013-02-07T10:00:00-00:00" diff --git a/neutron/extensions/dvr.py b/neutron/extensions/dvr.py index bb5720e9dad..811c92f649f 100644 --- a/neutron/extensions/dvr.py +++ b/neutron/extensions/dvr.py @@ -57,11 +57,6 @@ class Dvr(object): def get_description(cls): return "Enables configuration of Distributed Virtual Routers." - @classmethod - def get_namespace(cls): - return ("http://docs.openstack.org/ext/" - "%s/api/v1.0" % constants.L3_DISTRIBUTED_EXT_ALIAS) - @classmethod def get_updated(cls): return "2014-06-1T10:00:00-00:00" diff --git a/neutron/extensions/external_net.py b/neutron/extensions/external_net.py index 2a985f9d3ca..54b481092f6 100644 --- a/neutron/extensions/external_net.py +++ b/neutron/extensions/external_net.py @@ -16,7 +16,6 @@ from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.common import exceptions as nexception -from neutron.extensions import l3 class ExternalNetworkInUse(nexception.InUse): @@ -50,10 +49,6 @@ class External_net(extensions.ExtensionDescriptor): def get_description(cls): return _("Adds external network attribute to network resource.") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/external_net/api/v1.0" - @classmethod def get_updated(cls): return "2013-01-14T10:00:00-00:00" @@ -63,6 +58,3 @@ class External_net(extensions.ExtensionDescriptor): return EXTENDED_ATTRIBUTES_2_0 else: return {} - - def get_alias_namespace_compatibility_map(self): - return {l3.L3.get_alias(): l3.L3.get_namespace()} diff --git a/neutron/extensions/extra_dhcp_opt.py b/neutron/extensions/extra_dhcp_opt.py index 0de062c7cd5..8d3063b22dd 100644 --- a/neutron/extensions/extra_dhcp_opt.py +++ b/neutron/extensions/extra_dhcp_opt.py @@ -84,10 +84,6 @@ class Extra_dhcp_opt(extensions.ExtensionDescriptor): "be specified (e.g. tftp-server, server-ip-address, " "bootfile-name)") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/extra_dhcp_opt/api/v1.0" - @classmethod def get_updated(cls): return "2013-03-17T12:00:00-00:00" diff --git a/neutron/extensions/extraroute.py b/neutron/extensions/extraroute.py index 2e6916c601a..feb3b4a9722 100644 --- a/neutron/extensions/extraroute.py +++ b/neutron/extensions/extraroute.py @@ -58,10 +58,6 @@ class Extraroute(object): def get_description(cls): return "Extra routes configuration for L3 router" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/extraroutes/api/v1.0" - @classmethod def get_updated(cls): return "2013-02-01T10:00:00-00:00" diff --git a/neutron/extensions/flavor.py b/neutron/extensions/flavor.py index 8679259bd31..9cafb13ef0a 100644 --- a/neutron/extensions/flavor.py +++ b/neutron/extensions/flavor.py @@ -53,10 +53,6 @@ class Flavor(extensions.ExtensionDescriptor): def get_description(cls): return "Flavor" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/flavor/api/v1.0" - @classmethod def get_updated(cls): return "2012-07-20T10:00:00-00:00" diff --git a/neutron/extensions/l3.py b/neutron/extensions/l3.py index b38d34907fa..bcbe15a62e4 100644 --- a/neutron/extensions/l3.py +++ b/neutron/extensions/l3.py @@ -175,10 +175,6 @@ class L3(extensions.ExtensionDescriptor): " between L2 Neutron networks and access to external" " networks via a NAT gateway.") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/router/api/v1.0" - @classmethod def get_updated(cls): return "2012-07-20T10:00:00-00:00" diff --git a/neutron/extensions/l3_ext_gw_mode.py b/neutron/extensions/l3_ext_gw_mode.py index ae0ab1d54b9..a9726a18c29 100644 --- a/neutron/extensions/l3_ext_gw_mode.py +++ b/neutron/extensions/l3_ext_gw_mode.py @@ -54,10 +54,6 @@ class L3_ext_gw_mode(extensions.ExtensionDescriptor): return ("Extension of the router abstraction for specifying whether " "SNAT should occur on the external gateway") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/ext-gw-mode/api/v1.0" - @classmethod def get_updated(cls): return "2013-03-28T10:00:00-00:00" diff --git a/neutron/extensions/l3_ext_ha_mode.py b/neutron/extensions/l3_ext_ha_mode.py index 006cd54d493..a2a3b859dbc 100644 --- a/neutron/extensions/l3_ext_ha_mode.py +++ b/neutron/extensions/l3_ext_ha_mode.py @@ -82,10 +82,6 @@ class L3_ext_ha_mode(extensions.ExtensionDescriptor): def get_description(cls): return "Add HA capability to routers." - @classmethod - def get_namespace(cls): - return "" - @classmethod def get_updated(cls): return "2014-04-26T00:00:00-00:00" diff --git a/neutron/extensions/l3agentscheduler.py b/neutron/extensions/l3agentscheduler.py index c786680a437..50a5e34c99b 100644 --- a/neutron/extensions/l3agentscheduler.py +++ b/neutron/extensions/l3agentscheduler.py @@ -120,10 +120,6 @@ class L3agentscheduler(extensions.ExtensionDescriptor): def get_description(cls): return "Schedule routers among l3 agents" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/l3_agent_scheduler/api/v1.0" - @classmethod def get_updated(cls): return "2013-02-07T10:00:00-00:00" diff --git a/neutron/extensions/metering.py b/neutron/extensions/metering.py index 8485fc40d85..177cf379df6 100644 --- a/neutron/extensions/metering.py +++ b/neutron/extensions/metering.py @@ -97,10 +97,6 @@ class Metering(extensions.ExtensionDescriptor): def get_description(cls): return "Neutron Metering extension." - @classmethod - def get_namespace(cls): - return "http://wiki.openstack.org/wiki/Neutron/Metering/Bandwidth#API" - @classmethod def get_updated(cls): return "2013-06-12T10:00:00-00:00" diff --git a/neutron/extensions/multiprovidernet.py b/neutron/extensions/multiprovidernet.py index 4fba23fbde0..18b769702df 100644 --- a/neutron/extensions/multiprovidernet.py +++ b/neutron/extensions/multiprovidernet.py @@ -107,10 +107,6 @@ class Multiprovidernet(extensions.ExtensionDescriptor): return ("Expose mapping of virtual networks to multiple physical " "networks") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/multi-provider/api/v1.0" - @classmethod def get_updated(cls): return "2013-06-27T10:00:00-00:00" diff --git a/neutron/extensions/netmtu.py b/neutron/extensions/netmtu.py index 55245fdf9a6..b4332511367 100644 --- a/neutron/extensions/netmtu.py +++ b/neutron/extensions/netmtu.py @@ -39,10 +39,6 @@ class Netmtu(extensions.ExtensionDescriptor): def get_description(cls): return "Provides MTU attribute for a network resource." - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/net_mtu/api/v1.0" - @classmethod def get_updated(cls): return "2015-03-25T10:00:00-00:00" diff --git a/neutron/extensions/portbindings.py b/neutron/extensions/portbindings.py index 63fca88ec09..3c50a4f2f8b 100644 --- a/neutron/extensions/portbindings.py +++ b/neutron/extensions/portbindings.py @@ -124,10 +124,6 @@ class Portbindings(extensions.ExtensionDescriptor): def get_description(cls): return "Expose port bindings of a virtual port to external application" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/binding/api/v1.0" - @classmethod def get_updated(cls): return "2014-02-03T10:00:00-00:00" diff --git a/neutron/extensions/portsecurity.py b/neutron/extensions/portsecurity.py index 68663f9e16c..573e04fc011 100644 --- a/neutron/extensions/portsecurity.py +++ b/neutron/extensions/portsecurity.py @@ -63,10 +63,6 @@ class Portsecurity(object): def get_description(cls): return "Provides port security" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/portsecurity/api/v1.0" - @classmethod def get_updated(cls): return "2012-07-23T10:00:00-00:00" diff --git a/neutron/extensions/providernet.py b/neutron/extensions/providernet.py index 6a60ca9a22d..1cc8865f233 100644 --- a/neutron/extensions/providernet.py +++ b/neutron/extensions/providernet.py @@ -85,10 +85,6 @@ class Providernet(extensions.ExtensionDescriptor): def get_description(cls): return "Expose mapping of virtual networks to physical networks" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/provider/api/v1.0" - @classmethod def get_updated(cls): return "2012-09-07T10:00:00-00:00" diff --git a/neutron/extensions/quotasv2.py b/neutron/extensions/quotasv2.py index bddad4fe5b4..b5f8d020d37 100644 --- a/neutron/extensions/quotasv2.py +++ b/neutron/extensions/quotasv2.py @@ -125,10 +125,6 @@ class Quotasv2(extensions.ExtensionDescriptor): description += ' per tenant' return description - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/network/ext/quotas-sets/api/v2.0" - @classmethod def get_updated(cls): return "2012-07-29T10:00:00-00:00" diff --git a/neutron/extensions/routerservicetype.py b/neutron/extensions/routerservicetype.py index cce9cf85d33..d81197a5f94 100644 --- a/neutron/extensions/routerservicetype.py +++ b/neutron/extensions/routerservicetype.py @@ -38,10 +38,6 @@ class Routerservicetype(object): def get_description(cls): return "Provides router service type" - @classmethod - def get_namespace(cls): - return "" - @classmethod def get_updated(cls): return "2013-01-29T00:00:00-00:00" diff --git a/neutron/extensions/securitygroup.py b/neutron/extensions/securitygroup.py index a772be5692f..fd3508a8b23 100644 --- a/neutron/extensions/securitygroup.py +++ b/neutron/extensions/securitygroup.py @@ -291,11 +291,6 @@ class Securitygroup(extensions.ExtensionDescriptor): def get_description(cls): return "The security groups extension." - @classmethod - def get_namespace(cls): - # todo - return "http://docs.openstack.org/ext/securitygroups/api/v2.0" - @classmethod def get_updated(cls): return "2012-10-05T10:00:00-00:00" diff --git a/neutron/extensions/servicetype.py b/neutron/extensions/servicetype.py index a972f6f45e9..3a257478014 100644 --- a/neutron/extensions/servicetype.py +++ b/neutron/extensions/servicetype.py @@ -58,10 +58,6 @@ class Servicetype(extensions.ExtensionDescriptor): return _("API for retrieving service providers for " "Neutron advanced services") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/service-type/api/v1.0" - @classmethod def get_updated(cls): return "2013-01-20T00:00:00-00:00" diff --git a/neutron/extensions/subnetallocation.py b/neutron/extensions/subnetallocation.py index fd8035c10a7..3a9712aa472 100644 --- a/neutron/extensions/subnetallocation.py +++ b/neutron/extensions/subnetallocation.py @@ -32,11 +32,6 @@ class Subnetallocation(extensions.ExtensionDescriptor): def get_description(cls): return "Enables allocation of subnets from a subnet pool" - @classmethod - def get_namespace(cls): - return ("http://docs.openstack.org/ext/" - "%s/api/v1.0" % constants.SUBNET_ALLOCATION_EXT_ALIAS) - @classmethod def get_updated(cls): return "2015-03-30T10:00:00-00:00" diff --git a/neutron/extensions/vlantransparent.py b/neutron/extensions/vlantransparent.py index 4c2d8f980d8..0e83fb51175 100644 --- a/neutron/extensions/vlantransparent.py +++ b/neutron/extensions/vlantransparent.py @@ -60,10 +60,6 @@ class Vlantransparent(object): def get_description(cls): return "Provides Vlan Transparent Networks" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/vlantransparent/api/v1.0" - @classmethod def get_updated(cls): return "2015-03-23T09:00:00-00:00" diff --git a/neutron/plugins/cisco/extensions/credential.py b/neutron/plugins/cisco/extensions/credential.py index 01749357bd3..f4d1f7f8f4f 100644 --- a/neutron/plugins/cisco/extensions/credential.py +++ b/neutron/plugins/cisco/extensions/credential.py @@ -55,11 +55,6 @@ class Credential(extensions.ExtensionDescriptor): """Returns Extended Resource Description.""" return "Credential include username and password" - @classmethod - def get_namespace(cls): - """Returns Extended Resource Namespace.""" - return "http://docs.ciscocloud.com/api/ext/credential/v2.0" - @classmethod def get_updated(cls): """Returns Extended Resource Update Time.""" diff --git a/neutron/plugins/cisco/extensions/n1kv.py b/neutron/plugins/cisco/extensions/n1kv.py index 59552ac3c8d..e754e0a2504 100644 --- a/neutron/plugins/cisco/extensions/n1kv.py +++ b/neutron/plugins/cisco/extensions/n1kv.py @@ -84,10 +84,6 @@ class N1kv(extensions.ExtensionDescriptor): def get_description(cls): return "Expose network profile" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/n1kv/api/v2.0" - @classmethod def get_updated(cls): return "2012-11-15T10:00:00-00:00" diff --git a/neutron/plugins/cisco/extensions/network_profile.py b/neutron/plugins/cisco/extensions/network_profile.py index b2752a06c2c..6a16cc2f058 100644 --- a/neutron/plugins/cisco/extensions/network_profile.py +++ b/neutron/plugins/cisco/extensions/network_profile.py @@ -75,10 +75,6 @@ class Network_profile(extensions.ExtensionDescriptor): def get_description(cls): return ("Profile includes the type of profile for N1kv") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/n1kv/network-profile/api/v2.0" - @classmethod def get_updated(cls): return "2012-07-20T10:00:00-00:00" diff --git a/neutron/plugins/cisco/extensions/policy_profile.py b/neutron/plugins/cisco/extensions/policy_profile.py index 9c028e76b6b..04515c20c3a 100644 --- a/neutron/plugins/cisco/extensions/policy_profile.py +++ b/neutron/plugins/cisco/extensions/policy_profile.py @@ -54,10 +54,6 @@ class Policy_profile(extensions.ExtensionDescriptor): def get_description(cls): return "Profile includes the type of profile for N1kv" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/n1kv/policy-profile/api/v2.0" - @classmethod def get_updated(cls): return "2012-07-20T10:00:00-00:00" diff --git a/neutron/plugins/cisco/extensions/qos.py b/neutron/plugins/cisco/extensions/qos.py index db642f1d4b7..b9428f23dad 100644 --- a/neutron/plugins/cisco/extensions/qos.py +++ b/neutron/plugins/cisco/extensions/qos.py @@ -42,11 +42,6 @@ class Qos(extensions.ExtensionDescriptor): """Returns Ext Resource Description.""" return "qos includes qos_name and qos_desc" - @classmethod - def get_namespace(cls): - """Returns Ext Resource Namespace.""" - return "http://docs.ciscocloud.com/api/ext/qos/v1.0" - @classmethod def get_updated(cls): """Returns Ext Resource update.""" diff --git a/neutron/plugins/ml2/drivers/cisco/n1kv/extensions/n1kv.py b/neutron/plugins/ml2/drivers/cisco/n1kv/extensions/n1kv.py index 7cd4f8cbd83..726779c9df2 100644 --- a/neutron/plugins/ml2/drivers/cisco/n1kv/extensions/n1kv.py +++ b/neutron/plugins/ml2/drivers/cisco/n1kv/extensions/n1kv.py @@ -42,10 +42,6 @@ class N1kv(extensions.ExtensionDescriptor): def get_description(cls): return _("Add new policy profile attribute to port resource.") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/n1kv/api/v2.0" - @classmethod def get_updated(cls): return "2014-11-23T13:33:25-00:00" diff --git a/neutron/plugins/nec/extensions/packetfilter.py b/neutron/plugins/nec/extensions/packetfilter.py index 3711995ea34..7c9971f8a96 100644 --- a/neutron/plugins/nec/extensions/packetfilter.py +++ b/neutron/plugins/nec/extensions/packetfilter.py @@ -174,10 +174,6 @@ class Packetfilter(extensions.ExtensionDescriptor): def get_description(cls): return "PacketFilters on OFC" - @classmethod - def get_namespace(cls): - return "http://www.nec.co.jp/api/ext/packet_filter/v2.0" - @classmethod def get_updated(cls): return "2013-07-16T00:00:00+09:00" diff --git a/neutron/plugins/nec/extensions/router_provider.py b/neutron/plugins/nec/extensions/router_provider.py index 5098a47fda0..e16e4ab9f7c 100644 --- a/neutron/plugins/nec/extensions/router_provider.py +++ b/neutron/plugins/nec/extensions/router_provider.py @@ -44,10 +44,6 @@ class Router_provider(object): def get_description(cls): return "Router Provider Support" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/router_provider/api/v1.0" - @classmethod def get_updated(cls): return "2013-08-20T10:00:00-00:00" diff --git a/neutron/plugins/vmware/extensions/advancedserviceproviders.py b/neutron/plugins/vmware/extensions/advancedserviceproviders.py index ba620363964..dc088eff292 100644 --- a/neutron/plugins/vmware/extensions/advancedserviceproviders.py +++ b/neutron/plugins/vmware/extensions/advancedserviceproviders.py @@ -39,12 +39,6 @@ class Advancedserviceproviders(object): def get_description(cls): return "Id of the advanced service providers attached to the subnet" - @classmethod - def get_namespace(cls): - return( - "http://docs.openstack.org/ext/neutron/" - "advanced_service_providers/api/v1.0") - @classmethod def get_updated(cls): return "2014-12-11T12:00:00-00:00" diff --git a/neutron/plugins/vmware/extensions/lsn.py b/neutron/plugins/vmware/extensions/lsn.py index 4a7d3ca3da5..a5b6a94ccf9 100644 --- a/neutron/plugins/vmware/extensions/lsn.py +++ b/neutron/plugins/vmware/extensions/lsn.py @@ -52,10 +52,6 @@ class Lsn(object): def get_description(cls): return "Enables configuration of NSX Logical Services Node." - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/%s/api/v2.0" % EXT_ALIAS - @classmethod def get_updated(cls): return "2013-10-05T10:00:00-00:00" diff --git a/neutron/plugins/vmware/extensions/maclearning.py b/neutron/plugins/vmware/extensions/maclearning.py index 21c66915085..f7b30f7cdd3 100644 --- a/neutron/plugins/vmware/extensions/maclearning.py +++ b/neutron/plugins/vmware/extensions/maclearning.py @@ -41,10 +41,6 @@ class Maclearning(object): def get_description(cls): return "Provides MAC learning capabilities." - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/maclearning/api/v1.0" - @classmethod def get_updated(cls): return "2013-05-1T10:00:00-00:00" diff --git a/neutron/plugins/vmware/extensions/networkgw.py b/neutron/plugins/vmware/extensions/networkgw.py index 1e9bc2ace6f..2fc18c0169e 100644 --- a/neutron/plugins/vmware/extensions/networkgw.py +++ b/neutron/plugins/vmware/extensions/networkgw.py @@ -173,10 +173,6 @@ class Networkgw(object): def get_description(cls): return "Connects Neutron networks with external networks at layer 2." - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/network-gateway/api/v1.0" - @classmethod def get_updated(cls): return "2014-01-01T00:00:00-00:00" diff --git a/neutron/plugins/vmware/extensions/nvp_qos.py b/neutron/plugins/vmware/extensions/nvp_qos.py index 470f267b5a2..14d30ce9eae 100644 --- a/neutron/plugins/vmware/extensions/nvp_qos.py +++ b/neutron/plugins/vmware/extensions/nvp_qos.py @@ -34,7 +34,3 @@ class Nvp_qos(qos.Qos): @classmethod def get_description(cls): return "NVP QoS extension (deprecated)." - - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/nvp-qos/api/v2.0" diff --git a/neutron/plugins/vmware/extensions/qos.py b/neutron/plugins/vmware/extensions/qos.py index 30e6f8c7d02..b2aeeb467c7 100644 --- a/neutron/plugins/vmware/extensions/qos.py +++ b/neutron/plugins/vmware/extensions/qos.py @@ -176,10 +176,6 @@ class Qos(object): def get_description(cls): return "NSX QoS extension." - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/qos-queue/api/v2.0" - @classmethod def get_updated(cls): return "2014-01-01T00:00:00-00:00" diff --git a/neutron/plugins/vmware/extensions/routertype.py b/neutron/plugins/vmware/extensions/routertype.py index ba7c831a23d..0705f02eb20 100644 --- a/neutron/plugins/vmware/extensions/routertype.py +++ b/neutron/plugins/vmware/extensions/routertype.py @@ -41,10 +41,6 @@ class Routertype(object): def get_description(cls): return "Enables configuration of NSXv router type." - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/router-type/api/v1.0" - @classmethod def get_updated(cls): return "2015-1-12T10:00:00-00:00" diff --git a/neutron/plugins/vmware/extensions/vnicindex.py b/neutron/plugins/vmware/extensions/vnicindex.py index 9b0e6d05ce6..27888fb4442 100644 --- a/neutron/plugins/vmware/extensions/vnicindex.py +++ b/neutron/plugins/vmware/extensions/vnicindex.py @@ -41,10 +41,6 @@ class Vnicindex(object): def get_description(cls): return ("Enable a port to be associated with a VNIC index") - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/vnic_index/api/v1.0" - @classmethod def get_updated(cls): return "2014-09-15T12:00:00-00:00" diff --git a/neutron/tests/api/test_extensions.py b/neutron/tests/api/test_extensions.py index 56be8189f7e..1e3d824c3bd 100644 --- a/neutron/tests/api/test_extensions.py +++ b/neutron/tests/api/test_extensions.py @@ -61,7 +61,6 @@ class ExtensionsTestJSON(base.BaseNetworkTest): self.assertIn('updated', ext_details.keys()) self.assertIn('name', ext_details.keys()) self.assertIn('description', ext_details.keys()) - self.assertIn('namespace', ext_details.keys()) self.assertIn('links', ext_details.keys()) self.assertIn('alias', ext_details.keys()) self.assertEqual(ext_details['name'], ext_name) diff --git a/neutron/tests/unit/api/test_extensions.py b/neutron/tests/unit/api/test_extensions.py index 9a32e865f94..38a99ea280a 100644 --- a/neutron/tests/unit/api/test_extensions.py +++ b/neutron/tests/unit/api/test_extensions.py @@ -474,7 +474,7 @@ class ExtensionManagerTest(base.BaseTestCase): """Invalid extension. This Extension doesn't implement extension methods : - get_name, get_description, get_namespace and get_updated + get_name, get_description and get_updated """ def get_alias(self): return "invalid_extension" @@ -621,16 +621,12 @@ class ExtensionControllerTest(testlib_api.WebTestCase): foxnsox = res_body["extensions"][0] self.assertEqual(foxnsox["alias"], "FOXNSOX") - self.assertEqual(foxnsox["namespace"], - "http://www.fox.in.socks/api/ext/pie/v1.0") def test_extension_can_be_accessed_by_alias(self): response = self.test_app.get("/extensions/FOXNSOX." + self.fmt) foxnsox_extension = self.deserialize(response) foxnsox_extension = foxnsox_extension['extension'] self.assertEqual(foxnsox_extension["alias"], "FOXNSOX") - self.assertEqual(foxnsox_extension["namespace"], - "http://www.fox.in.socks/api/ext/pie/v1.0") def test_show_returns_not_found_for_non_existent_extension(self): response = self.test_app.get("/extensions/non_existent" + self.fmt, diff --git a/neutron/tests/unit/dummy_plugin.py b/neutron/tests/unit/dummy_plugin.py index 0e7fcd98f4e..09d5ff658c0 100644 --- a/neutron/tests/unit/dummy_plugin.py +++ b/neutron/tests/unit/dummy_plugin.py @@ -63,10 +63,6 @@ class Dummy(object): def get_description(cls): return "Dummy stuff" - @classmethod - def get_namespace(cls): - return "http://docs.openstack.org/ext/neutron/dummy/api/v1.0" - @classmethod def get_updated(cls): return "2012-11-20T10:00:00-00:00" diff --git a/neutron/tests/unit/extension_stubs.py b/neutron/tests/unit/extension_stubs.py index e8e23063f01..f98d2149186 100644 --- a/neutron/tests/unit/extension_stubs.py +++ b/neutron/tests/unit/extension_stubs.py @@ -33,9 +33,6 @@ class StubExtension(object): def get_description(self): return "" - def get_namespace(self): - return "" - def get_updated(self): return "" diff --git a/neutron/tests/unit/extensions/extendedattribute.py b/neutron/tests/unit/extensions/extendedattribute.py index 6754bffa0e4..2f2f2de1594 100644 --- a/neutron/tests/unit/extensions/extendedattribute.py +++ b/neutron/tests/unit/extensions/extendedattribute.py @@ -39,10 +39,6 @@ class Extendedattribute(extensions.ExtensionDescriptor): def get_description(cls): return "Provides extended_attr attribute to router" - @classmethod - def get_namespace(cls): - return "" - @classmethod def get_updated(cls): return "2013-02-05T00:00:00-00:00" diff --git a/neutron/tests/unit/extensions/extensionattribute.py b/neutron/tests/unit/extensions/extensionattribute.py index 22814fc80b5..f289c8b0625 100644 --- a/neutron/tests/unit/extensions/extensionattribute.py +++ b/neutron/tests/unit/extensions/extensionattribute.py @@ -52,10 +52,6 @@ class Extensionattribute(extensions.ExtensionDescriptor): def get_description(cls): return "Extension Test Resource" - @classmethod - def get_namespace(cls): - return "" - @classmethod def get_updated(cls): return "2013-02-05T10:00:00-00:00" diff --git a/neutron/tests/unit/extensions/foxinsocks.py b/neutron/tests/unit/extensions/foxinsocks.py index 4c8fc387c50..39d2bd829bb 100644 --- a/neutron/tests/unit/extensions/foxinsocks.py +++ b/neutron/tests/unit/extensions/foxinsocks.py @@ -51,9 +51,6 @@ class Foxinsocks(object): def get_description(self): return "The Fox In Socks Extension" - def get_namespace(self): - return "http://www.fox.in.socks/api/ext/pie/v1.0" - def get_updated(self): return "2011-01-22T13:25:27-06:00" diff --git a/neutron/tests/unit/extensions/v2attributes.py b/neutron/tests/unit/extensions/v2attributes.py index ab40f260af9..6259eb35659 100644 --- a/neutron/tests/unit/extensions/v2attributes.py +++ b/neutron/tests/unit/extensions/v2attributes.py @@ -35,9 +35,6 @@ class V2attributes(object): def get_description(self): return "Demonstrates extended attributes on V2 core resources" - def get_namespace(self): - return "http://docs.openstack.org/ext/examples/v2attributes/api/v1.0" - def get_updated(self): return "2012-07-18T10:00:00-00:00" diff --git a/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py b/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py index fdc94c12890..985e4ebc834 100644 --- a/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py +++ b/neutron/tests/unit/plugins/ml2/extensions/fake_extension.py @@ -53,11 +53,6 @@ class Fake_extension(extensions.ExtensionDescriptor): def get_description(cls): return _("Adds test attributes to core resources.") - @classmethod - def get_namespace(cls): - return ("http://docs.openstack.org/ext/neutron/ml2/test/" - "fake_extension/api/v1.0") - @classmethod def get_updated(cls): return "2014-07-16T10:00:00-00:00" From 62faedddf2b0315484a04bd9092e96a3a828e46a Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Mon, 15 Jun 2015 16:15:11 +0200 Subject: [PATCH 190/292] Put output of docs job into doc/build/html This is the path where jenkins/scripts/run-docs.sh that is used by gate to generate project documentation expects to see the output. Change-Id: Id276fa59edb33f7789ab06055300b4dc2385472a --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 4203cf775e4..85914b40651 100644 --- a/tox.ini +++ b/tox.ini @@ -86,7 +86,7 @@ commands = commands = {posargs} [testenv:docs] -commands = sphinx-build -W -b html doc/source doc/build +commands = sphinx-build -W -b html doc/source doc/build/html [testenv:py34] commands = python -m testtools.run \ From a8619e9bd1247e8ec494c456aee9ee7163231f62 Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Mon, 15 Jun 2015 15:07:28 +0000 Subject: [PATCH 191/292] Python 3: use dict.values instead of dict.itervalues This works with both Python 2 and 3, and should not have any performance impact. Change-Id: I2a14945c60de513b91c6f022ff5dcc503ce2a8ad Blueprint: neutron-python3 --- neutron/api/extensions.py | 6 +++--- neutron/common/utils.py | 2 +- .../plugins/linuxbridge/agent/linuxbridge_neutron_agent.py | 2 +- neutron/plugins/ml2/drivers/type_vlan.py | 2 +- neutron/plugins/sriovnicagent/eswitch_manager.py | 2 +- neutron/plugins/sriovnicagent/sriov_nic_agent.py | 2 +- neutron/tests/tempest/common/isolated_creds.py | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neutron/api/extensions.py b/neutron/api/extensions.py index f6b4601ba21..905d8ce81a9 100644 --- a/neutron/api/extensions.py +++ b/neutron/api/extensions.py @@ -411,7 +411,7 @@ class ExtensionManager(object): resources = [] resources.append(ResourceExtension('extensions', ExtensionController(self))) - for ext in self.extensions.itervalues(): + for ext in self.extensions.values(): try: resources.extend(ext.get_resources()) except AttributeError: @@ -423,7 +423,7 @@ class ExtensionManager(object): def get_actions(self): """Returns a list of ActionExtension objects.""" actions = [] - for ext in self.extensions.itervalues(): + for ext in self.extensions.values(): try: actions.extend(ext.get_actions()) except AttributeError: @@ -435,7 +435,7 @@ class ExtensionManager(object): def get_request_extensions(self): """Returns a list of RequestExtension objects.""" request_exts = [] - for ext in self.extensions.itervalues(): + for ext in self.extensions.values(): try: request_exts.extend(ext.get_request_extensions()) except AttributeError: diff --git a/neutron/common/utils.py b/neutron/common/utils.py index 53308908b30..f4b286015a4 100644 --- a/neutron/common/utils.py +++ b/neutron/common/utils.py @@ -210,7 +210,7 @@ def parse_mappings(mapping_list, unique_values=True): if key in mappings: raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not " "unique") % {'key': key, 'mapping': mapping}) - if unique_values and value in mappings.itervalues(): + if unique_values and value in mappings.values(): raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' " "not unique") % {'value': value, 'mapping': mapping}) diff --git a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py index 2cf116619e2..aba3dee0bc0 100644 --- a/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py +++ b/neutron/plugins/linuxbridge/agent/linuxbridge_neutron_agent.py @@ -436,7 +436,7 @@ class LinuxBridgeManager(object): self.delete_vxlan(interface) continue - for physical_interface in self.interface_mappings.itervalues(): + for physical_interface in self.interface_mappings.values(): if (interface.startswith(physical_interface)): ips, gateway = self.get_interface_details(bridge_name) if ips: diff --git a/neutron/plugins/ml2/drivers/type_vlan.py b/neutron/plugins/ml2/drivers/type_vlan.py index 451e80187ae..a8280a46053 100644 --- a/neutron/plugins/ml2/drivers/type_vlan.py +++ b/neutron/plugins/ml2/drivers/type_vlan.py @@ -146,7 +146,7 @@ class VlanTypeDriver(helpers.SegmentTypeDriver): # remove from table unallocated vlans for any unconfigured # physical networks - for allocs in allocations.itervalues(): + for allocs in allocations.values(): for alloc in allocs: if not alloc.allocated: LOG.debug("Removing vlan %(vlan_id)s on physical " diff --git a/neutron/plugins/sriovnicagent/eswitch_manager.py b/neutron/plugins/sriovnicagent/eswitch_manager.py index 760a0e52ca4..5d80576a7c0 100644 --- a/neutron/plugins/sriovnicagent/eswitch_manager.py +++ b/neutron/plugins/sriovnicagent/eswitch_manager.py @@ -130,7 +130,7 @@ class EmbSwitch(object): """ vf_list = [] assigned_macs = [] - for vf_index in self.pci_slot_map.itervalues(): + for vf_index in self.pci_slot_map.values(): if not PciOsWrapper.is_assigned_vf(self.dev_name, vf_index): continue vf_list.append(vf_index) diff --git a/neutron/plugins/sriovnicagent/sriov_nic_agent.py b/neutron/plugins/sriovnicagent/sriov_nic_agent.py index 7bb92c063f9..9e3a928f5e2 100644 --- a/neutron/plugins/sriovnicagent/sriov_nic_agent.py +++ b/neutron/plugins/sriovnicagent/sriov_nic_agent.py @@ -308,7 +308,7 @@ class SriovNicAgentConfigParser(object): Validate that network_device in excluded_device exists in device mappings """ - dev_net_set = set(self.device_mappings.itervalues()) + dev_net_set = set(self.device_mappings.values()) for dev_name in self.exclude_devices.iterkeys(): if dev_name not in dev_net_set: raise ValueError(_("Device name %(dev_name)s is missing from " diff --git a/neutron/tests/tempest/common/isolated_creds.py b/neutron/tests/tempest/common/isolated_creds.py index b4de93b6271..5da24a92264 100644 --- a/neutron/tests/tempest/common/isolated_creds.py +++ b/neutron/tests/tempest/common/isolated_creds.py @@ -369,7 +369,7 @@ class IsolatedCreds(cred_provider.CredentialProvider): if not self.isolated_creds: return self._clear_isolated_net_resources() - for creds in self.isolated_creds.itervalues(): + for creds in self.isolated_creds.values(): try: self._delete_user(creds.user_id) except lib_exc.NotFound: From a89f99c6b700b1c6f918fe359c7271ac25ed4bc4 Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Wed, 10 Jun 2015 14:56:58 +0300 Subject: [PATCH 192/292] Refactor _update_ips_for_port This commit is a preparation step for using pluggable IPAM. _update_ips_for_port was refactored and split into two methods: - _get_changed_ips_for_port This method contains calculations common for pluggable and non-pluggable IPAM implementation, was moved to ipam_backend_mixin. - _update_ips_for_port This method is specific for non-pluggable IPAM implementation, so it was moved to ipam_non_pluggable_backend_common. Other changes: - _update_ips_for_port now returns namedtuple with added, removed, original ips (previously added and original ips were returned). List of removed ips is required by pluggable IPAM implementaion to apply rollback-on-failure logic; - removed unused port_id argument from _update_ips_for_port argument list; Partially-Implements: blueprint neutron-ipam Change-Id: Id50b6227c8c2d94c35473aece080a6f106a5dfd8 --- neutron/db/db_base_plugin_v2.py | 54 +------------ neutron/db/ipam_backend_mixin.py | 42 +++++++++- neutron/db/ipam_non_pluggable_backend.py | 23 ++++++ .../tests/unit/db/test_ipam_backend_mixin.py | 79 +++++++++++++++++++ 4 files changed, 147 insertions(+), 51 deletions(-) create mode 100644 neutron/tests/unit/db/test_ipam_backend_mixin.py diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index 0395d2b3f2f..ca6de5cff76 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -122,52 +122,6 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, return True return False - def _update_ips_for_port(self, context, network_id, port_id, original_ips, - new_ips, mac_address, device_owner): - """Add or remove IPs from the port.""" - ips = [] - # These ips are still on the port and haven't been removed - prev_ips = [] - - # the new_ips contain all of the fixed_ips that are to be updated - if len(new_ips) > cfg.CONF.max_fixed_ips_per_port: - msg = _('Exceeded maximim amount of fixed ips per port') - raise n_exc.InvalidInput(error_message=msg) - - # Remove all of the intersecting elements - for original_ip in original_ips[:]: - for new_ip in new_ips[:]: - if ('ip_address' in new_ip and - original_ip['ip_address'] == new_ip['ip_address']): - original_ips.remove(original_ip) - new_ips.remove(new_ip) - prev_ips.append(original_ip) - break - else: - # For ports that are not router ports, retain any automatic - # (non-optional, e.g. IPv6 SLAAC) addresses. - if device_owner not in constants.ROUTER_INTERFACE_OWNERS: - subnet = self._get_subnet(context, - original_ip['subnet_id']) - if (ipv6_utils.is_auto_address_subnet(subnet)): - original_ips.remove(original_ip) - prev_ips.append(original_ip) - - # Check if the IP's to add are OK - to_add = self._test_fixed_ips_for_port(context, network_id, new_ips, - device_owner) - for ip in original_ips: - LOG.debug("Port update. Hold %s", ip) - NeutronDbPluginV2._delete_ip_allocation(context, - network_id, - ip['subnet_id'], - ip['ip_address']) - - if to_add: - LOG.debug("Port update. Adding %s", to_add) - ips = self._allocate_fixed_ips(context, to_add, mac_address) - return ips, prev_ips - def _validate_subnet_cidr(self, context, network, new_subnet_cidr): """Validate the CIDR for a subnet. @@ -1211,13 +1165,13 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, if 'fixed_ips' in p: changed_ips = True original = self._make_port_dict(port, process_extensions=False) - added_ips, prev_ips = self._update_ips_for_port( - context, network_id, id, + changes = self._update_ips_for_port( + context, network_id, original["fixed_ips"], p['fixed_ips'], original['mac_address'], port['device_owner']) # Update ips if necessary - for ip in added_ips: + for ip in changes.add: NeutronDbPluginV2._store_ip_allocation( context, ip['ip_address'], network_id, ip['subnet_id'], port.id) @@ -1232,7 +1186,7 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, result = self._make_port_dict(port) # Keep up with fields that changed if changed_ips: - result['fixed_ips'] = prev_ips + added_ips + result['fixed_ips'] = changes.original + changes.add return result def delete_port(self, context, id): diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index 2330f1afb4d..63306a20722 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -13,11 +13,15 @@ # License for the specific language governing permissions and limitations # under the License. -import netaddr +import collections +import netaddr +from oslo_config import cfg from oslo_log import log as logging +from neutron.common import constants from neutron.common import exceptions as n_exc +from neutron.common import ipv6_utils from neutron.db import db_base_plugin_common from neutron.db import models_v2 from neutron.i18n import _LI @@ -29,6 +33,9 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): """Contains IPAM specific code which is common for both backends. """ + # Tracks changes in ip allocation for port using namedtuple + Changes = collections.namedtuple('Changes', 'add original remove') + def _update_subnet_host_routes(self, context, id, s): def _combine(ht): @@ -157,3 +164,36 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): raise n_exc.GatewayConflictWithAllocationPools( pool=pool_range, ip_address=gateway_ip) + + def _get_changed_ips_for_port(self, context, original_ips, + new_ips, device_owner): + """Calculate changes in IPs for the port.""" + # the new_ips contain all of the fixed_ips that are to be updated + if len(new_ips) > cfg.CONF.max_fixed_ips_per_port: + msg = _('Exceeded maximum amount of fixed ips per port') + raise n_exc.InvalidInput(error_message=msg) + + # These ips are still on the port and haven't been removed + prev_ips = [] + + # Remove all of the intersecting elements + for original_ip in original_ips[:]: + for new_ip in new_ips[:]: + if ('ip_address' in new_ip and + original_ip['ip_address'] == new_ip['ip_address']): + original_ips.remove(original_ip) + new_ips.remove(new_ip) + prev_ips.append(original_ip) + break + else: + # For ports that are not router ports, retain any automatic + # (non-optional, e.g. IPv6 SLAAC) addresses. + if device_owner not in constants.ROUTER_INTERFACE_OWNERS: + subnet = self._get_subnet(context, + original_ip['subnet_id']) + if (ipv6_utils.is_auto_address_subnet(subnet)): + original_ips.remove(original_ip) + prev_ips.append(original_ip) + return self.Changes(add=new_ips, + original=prev_ips, + remove=original_ips) diff --git a/neutron/db/ipam_non_pluggable_backend.py b/neutron/db/ipam_non_pluggable_backend.py index ee143667f3a..a6c57a37219 100644 --- a/neutron/db/ipam_non_pluggable_backend.py +++ b/neutron/db/ipam_non_pluggable_backend.py @@ -299,6 +299,29 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): 'subnet_id': result['subnet_id']}) return ips + def _update_ips_for_port(self, context, network_id, original_ips, + new_ips, mac_address, device_owner): + """Add or remove IPs from the port.""" + added = [] + changes = self._get_changed_ips_for_port(context, original_ips, + new_ips, device_owner) + # Check if the IP's to add are OK + to_add = self._test_fixed_ips_for_port(context, network_id, + changes.add, device_owner) + for ip in changes.remove: + LOG.debug("Port update. Hold %s", ip) + IpamNonPluggableBackend._delete_ip_allocation(context, + network_id, + ip['subnet_id'], + ip['ip_address']) + + if to_add: + LOG.debug("Port update. Adding %s", to_add) + added = self._allocate_fixed_ips(context, to_add, mac_address) + return self.Changes(add=added, + original=changes.original, + remove=changes.remove) + def _allocate_ips_for_port(self, context, port): """Allocate IP addresses for the port. diff --git a/neutron/tests/unit/db/test_ipam_backend_mixin.py b/neutron/tests/unit/db/test_ipam_backend_mixin.py new file mode 100644 index 00000000000..3488759dbd5 --- /dev/null +++ b/neutron/tests/unit/db/test_ipam_backend_mixin.py @@ -0,0 +1,79 @@ +# Copyright (c) 2015 Infoblox Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from neutron.common import constants +from neutron.db import ipam_backend_mixin +from neutron.tests import base + + +class TestIpamBackendMixin(base.BaseTestCase): + + def setUp(self): + super(TestIpamBackendMixin, self).setUp() + self.mixin = ipam_backend_mixin.IpamBackendMixin() + self.ctx = mock.Mock() + self.default_new_ips = (('id-1', '192.168.1.1'), + ('id-2', '192.168.1.2')) + self.default_original_ips = (('id-1', '192.168.1.1'), + ('id-5', '172.20.16.5')) + + def _prepare_ips(self, ips): + return [{'ip_address': ip[1], + 'subnet_id': ip[0]} for ip in ips] + + def _test_get_changed_ips_for_port(self, expected_change, original_ips, + new_ips, owner): + change = self.mixin._get_changed_ips_for_port(self.ctx, + original_ips, + new_ips, + owner) + self.assertEqual(expected_change, change) + + def test__get_changed_ips_for_port(self): + owner_router = constants.DEVICE_OWNER_ROUTER_INTF + new_ips = self._prepare_ips(self.default_new_ips) + original_ips = self._prepare_ips(self.default_original_ips) + + # generate changes before calling _get_changed_ips_for_port + # because new_ips and original_ips are affected during call + expected_change = self.mixin.Changes(add=[new_ips[1]], + original=[original_ips[0]], + remove=[original_ips[1]]) + self._test_get_changed_ips_for_port(expected_change, original_ips, + new_ips, owner_router) + + def test__get_changed_ips_for_port_autoaddress(self): + owner_not_router = constants.DEVICE_OWNER_DHCP + new_ips = self._prepare_ips(self.default_new_ips) + + original = (('id-1', '192.168.1.1'), + ('id-5', '2000:1234:5678::12FF:FE34:5678')) + original_ips = self._prepare_ips(original) + + # mock to test auto address part + slaac_subnet = {'ipv6_address_mode': constants.IPV6_SLAAC, + 'ipv6_ra_mode': constants.IPV6_SLAAC} + self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet) + + # make a copy of original_ips + # since it is changed by _get_changed_ips_for_port + expected_change = self.mixin.Changes(add=[new_ips[1]], + original=original_ips[:], + remove=[]) + + self._test_get_changed_ips_for_port(expected_change, original_ips, + new_ips, owner_not_router) From f88f3dc8d6f7240d6c0d9d5006345b3a797ae067 Mon Sep 17 00:00:00 2001 From: Pavel Bondar Date: Wed, 10 Jun 2015 16:18:40 +0300 Subject: [PATCH 193/292] Refactor update_port in db_base_plugin_v2 This commit is a preparation step for using pluggable IPAM. - moved validations into _validate_port_for_update; - updating ip addresses for port is backend specific, so moved into _update_port_with_ips in ipam_non_pluggable_backend; - writing port changes to db is common for both backends, so moved into _update_db_port in ipam_backend_mixin; - updated to use namedtuple to track add/original/remove ips; - added _make_fixed_ip_dict to exclude keys other than ip_address and subnet_id; Partially-Implements: blueprint neutron-ipam Change-Id: I1110e88f372b1d0cc7ec72049ba69a6d548da867 --- neutron/db/db_base_plugin_common.py | 6 +++ neutron/db/db_base_plugin_v2.py | 68 +++++++++--------------- neutron/db/ipam_backend_mixin.py | 11 ++++ neutron/db/ipam_non_pluggable_backend.py | 19 +++++++ 4 files changed, 61 insertions(+), 43 deletions(-) diff --git a/neutron/db/db_base_plugin_common.py b/neutron/db/db_base_plugin_common.py index f0a75ed1a86..d6a136c1db3 100644 --- a/neutron/db/db_base_plugin_common.py +++ b/neutron/db/db_base_plugin_common.py @@ -245,6 +245,12 @@ class DbBasePluginCommon(common_db_mixin.CommonDbMixin): args['ipv6_address_mode'] = subnet['ipv6_address_mode'] return args + def _make_fixed_ip_dict(self, ips): + # Excludes from dict all keys except subnet_id and ip_address + return [{'subnet_id': ip["subnet_id"], + 'ip_address': ip["ip_address"]} + for ip in ips] + def _gateway_ip_str(self, subnet, cidr_net): if subnet.get('gateway_ip') is attributes.ATTR_NOT_SPECIFIED: return str(cidr_net.network + 1) diff --git a/neutron/db/db_base_plugin_v2.py b/neutron/db/db_base_plugin_v2.py index ca6de5cff76..9147fd88996 100644 --- a/neutron/db/db_base_plugin_v2.py +++ b/neutron/db/db_base_plugin_v2.py @@ -1139,54 +1139,36 @@ class NeutronDbPluginV2(ipam_non_pluggable_backend.IpamNonPluggableBackend, return self._make_port_dict(db_port, process_extensions=False) - def update_port(self, context, id, port): - p = port['port'] + def _validate_port_for_update(self, context, db_port, new_port, new_mac): + changed_owner = 'device_owner' in new_port + current_owner = (new_port.get('device_owner') or + db_port['device_owner']) + changed_device_id = new_port.get('device_id') != db_port['device_id'] + current_device_id = new_port.get('device_id') or db_port['device_id'] + + if current_owner and changed_device_id or changed_owner: + self._enforce_device_owner_not_router_intf_or_device_id( + context, current_owner, current_device_id, + db_port['tenant_id']) + + if new_mac and new_mac != db_port['mac_address']: + self._check_mac_addr_update(context, db_port, + new_mac, current_owner) + + def update_port(self, context, id, port): + new_port = port['port'] - changed_ips = False with context.session.begin(subtransactions=True): port = self._get_port(context, id) - changed_owner = 'device_owner' in p - current_owner = p.get('device_owner') or port['device_owner'] - changed_device_id = p.get('device_id') != port['device_id'] - current_device_id = p.get('device_id') or port['device_id'] - - if current_owner and changed_device_id or changed_owner: - self._enforce_device_owner_not_router_intf_or_device_id( - context, current_owner, current_device_id, - port['tenant_id']) - - new_mac = p.get('mac_address') - if new_mac and new_mac != port['mac_address']: - self._check_mac_addr_update( - context, port, new_mac, current_owner) - - # Check if the IPs need to be updated - network_id = port['network_id'] - if 'fixed_ips' in p: - changed_ips = True - original = self._make_port_dict(port, process_extensions=False) - changes = self._update_ips_for_port( - context, network_id, - original["fixed_ips"], p['fixed_ips'], - original['mac_address'], port['device_owner']) - - # Update ips if necessary - for ip in changes.add: - NeutronDbPluginV2._store_ip_allocation( - context, ip['ip_address'], network_id, - ip['subnet_id'], port.id) - # Remove all attributes in p which are not in the port DB model - # and then update the port - try: - port.update(self._filter_non_model_columns(p, models_v2.Port)) - context.session.flush() - except db_exc.DBDuplicateEntry: - raise n_exc.MacAddressInUse(net_id=network_id, mac=new_mac) - + new_mac = new_port.get('mac_address') + self._validate_port_for_update(context, port, new_port, new_mac) + changes = self._update_port_with_ips(context, port, + new_port, new_mac) result = self._make_port_dict(port) # Keep up with fields that changed - if changed_ips: - result['fixed_ips'] = changes.original + changes.add + if changes.original or changes.add or changes.remove: + result['fixed_ips'] = self._make_fixed_ip_dict( + changes.original + changes.add) return result def delete_port(self, context, id): diff --git a/neutron/db/ipam_backend_mixin.py b/neutron/db/ipam_backend_mixin.py index 63306a20722..e38b49549be 100644 --- a/neutron/db/ipam_backend_mixin.py +++ b/neutron/db/ipam_backend_mixin.py @@ -17,6 +17,7 @@ import collections import netaddr from oslo_config import cfg +from oslo_db import exception as db_exc from oslo_log import log as logging from neutron.common import constants @@ -36,6 +37,16 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon): # Tracks changes in ip allocation for port using namedtuple Changes = collections.namedtuple('Changes', 'add original remove') + def _update_db_port(self, context, db_port, new_port, network_id, new_mac): + # Remove all attributes in new_port which are not in the port DB model + # and then update the port + try: + db_port.update(self._filter_non_model_columns(new_port, + models_v2.Port)) + context.session.flush() + except db_exc.DBDuplicateEntry: + raise n_exc.MacAddressInUse(net_id=network_id, mac=new_mac) + def _update_subnet_host_routes(self, context, id, s): def _combine(ht): diff --git a/neutron/db/ipam_non_pluggable_backend.py b/neutron/db/ipam_non_pluggable_backend.py index a6c57a37219..bb929975a6b 100644 --- a/neutron/db/ipam_non_pluggable_backend.py +++ b/neutron/db/ipam_non_pluggable_backend.py @@ -182,6 +182,25 @@ class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin): return True return False + def _update_port_with_ips(self, context, db_port, new_port, new_mac): + changes = self.Changes(add=[], original=[], remove=[]) + # Check if the IPs need to be updated + network_id = db_port['network_id'] + if 'fixed_ips' in new_port: + original = self._make_port_dict(db_port, process_extensions=False) + changes = self._update_ips_for_port( + context, network_id, + original["fixed_ips"], new_port['fixed_ips'], + original['mac_address'], db_port['device_owner']) + + # Update ips if necessary + for ip in changes.add: + IpamNonPluggableBackend._store_ip_allocation( + context, ip['ip_address'], network_id, + ip['subnet_id'], db_port.id) + self._update_db_port(context, db_port, new_port, network_id, new_mac) + return changes + def _test_fixed_ips_for_port(self, context, network_id, fixed_ips, device_owner): """Test fixed IPs for port. From 34aa030847ed24ad6ca7759459cbe9a6d0f43db3 Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Mon, 15 Jun 2015 14:47:21 -0400 Subject: [PATCH 194/292] Remove fossilized remains Clean up ancient stuff that hasn't been touched in over 3 years. Change-Id: I67fcd85027fb6614cafe8d92ddbf8c24aed58a4f --- doc/source/docbkx/docbkx-example/README | 14 - doc/source/docbkx/docbkx-example/pom.xml | 38 - .../docbkx-example/src/docbkx/example.xml | 318 --- .../src/docbkx/figures/example.sdx | 79 - .../src/docbkx/figures/example.svg | 523 ----- doc/source/docbkx/quantum-api-1.0/common.ent | 112 - .../quantum-api-1.0/figures/Arrow_east.svg | 70 - .../figures/Check_mark_23x20_02.svg | 60 - .../docbkx/quantum-api-1.0/js/shjs/sh_java.js | 337 --- .../quantum-api-1.0/js/shjs/sh_javascript.js | 347 --- .../docbkx/quantum-api-1.0/js/shjs/sh_main.js | 538 ----- .../docbkx/quantum-api-1.0/js/shjs/sh_xml.js | 115 - .../js/trc/schema/controller.js | 184 -- .../js/trc/schema/layoutManager.js | 137 -- .../js/trc/schema/sampleManager.js | 342 --- .../docbkx/quantum-api-1.0/js/trc/util.js | 564 ----- .../quantum-api-1.0/quantum-api-guide.xml | 2011 ----------------- .../samples/att-get-res-none.json | 3 - .../samples/att-get-res-none.xml | 1 - .../quantum-api-1.0/samples/att-get-res.json | 6 - .../quantum-api-1.0/samples/att-get-res.xml | 1 - .../quantum-api-1.0/samples/att-put-req.json | 6 - .../quantum-api-1.0/samples/att-put-req.xml | 2 - .../quantum-api-1.0/samples/extensions.json | 19 - .../quantum-api-1.0/samples/extensions.xml | 21 - .../docbkx/quantum-api-1.0/samples/fault.json | 7 - .../docbkx/quantum-api-1.0/samples/fault.xml | 8 - .../samples/network-get-detail-res.json | 22 - .../samples/network-get-detail-res.xml | 14 - .../samples/network-get-res.json | 7 - .../samples/network-get-res.xml | 3 - .../samples/network-post-req.json | 6 - .../samples/network-post-req.xml | 2 - .../samples/network-post-res.json | 6 - .../samples/network-post-res.xml | 2 - .../samples/networks-get-detail-res.json | 13 - .../samples/networks-get-detail-res.xml | 8 - .../samples/networks-get-res.json | 11 - .../samples/networks-get-res.xml | 4 - .../samples/networks-post-req.json | 5 - .../samples/networks-post-req.xml | 2 - .../samples/networks-post-res.json | 5 - .../samples/networks-post-res.xml | 2 - .../quantum-api-1.0/samples/notfound.json | 7 - .../quantum-api-1.0/samples/notfound.xml | 7 - .../samples/notimplemented.json | 7 - .../samples/notimplemented.xml | 5 - .../samples/port-get-detail-res.json | 12 - .../samples/port-get-detail-res.xml | 6 - .../quantum-api-1.0/samples/port-get-res.json | 8 - .../quantum-api-1.0/samples/port-get-res.xml | 3 - .../samples/port-post-req.json | 6 - .../quantum-api-1.0/samples/port-post-req.xml | 2 - .../samples/port-post-res.json | 6 - .../quantum-api-1.0/samples/port-post-res.xml | 2 - .../samples/ports-get-detail-res.json | 12 - .../samples/ports-get-detail-res.xml | 8 - .../samples/ports-get-res.json | 11 - .../quantum-api-1.0/samples/ports-get-res.xml | 6 - .../quantum-api-1.0/samples/private.json | 9 - .../quantum-api-1.0/samples/private.xml | 6 - .../quantum-api-1.0/samples/public.json | 11 - .../docbkx/quantum-api-1.0/samples/public.xml | 8 - .../quantum-api-1.0/samples/versions-atom.xml | 22 - .../quantum-api-1.0/samples/versions.json | 24 - .../quantum-api-1.0/samples/versions.xml | 12 - .../docbkx/quantum-api-1.0/style/schema.css | 82 - .../quantum-api-1.0/style/shjs/sh_acid.css | 151 -- .../style/shjs/sh_darkblue.css | 151 -- .../quantum-api-1.0/style/shjs/sh_emacs.css | 139 -- .../quantum-api-1.0/style/shjs/sh_night.css | 151 -- .../quantum-api-1.0/style/shjs/sh_pablo.css | 151 -- .../quantum-api-1.0/style/shjs/sh_print.css | 145 -- .../quantum-api-1.0/style/shjs/sh_style.css | 66 - .../style/shjs/sh_whitengrey.css | 139 -- .../docbkx/quantum-api-1.0/xsd/.htaccess | 6 - .../docbkx/quantum-api-1.0/xsd/actions.xsd | 439 ---- .../quantum-api-1.0/xsd/affinity-id.xjb | 11 - .../quantum-api-1.0/xsd/affinity-id.xsd | 39 - .../docbkx/quantum-api-1.0/xsd/api-common.xjb | 11 - .../docbkx/quantum-api-1.0/xsd/api-common.xsd | 66 - doc/source/docbkx/quantum-api-1.0/xsd/api.xjb | 21 - doc/source/docbkx/quantum-api-1.0/xsd/api.xsd | 103 - .../docbkx/quantum-api-1.0/xsd/atom.xjb | 11 - .../docbkx/quantum-api-1.0/xsd/atom/atom.xsd | 105 - .../docbkx/quantum-api-1.0/xsd/atom/xml.xsd | 294 --- .../docbkx/quantum-api-1.0/xsd/backup.xsd | 378 ---- .../docbkx/quantum-api-1.0/xsd/common.xsd | 156 -- .../quantum-api-1.0/xsd/ext/rax-dme/api.xsd | 38 - .../xsd/ext/rax-dme/rax-dme.xsd | 25 - .../docbkx/quantum-api-1.0/xsd/extensions.xsd | 203 -- .../docbkx/quantum-api-1.0/xsd/faults.xsd | 532 ----- .../docbkx/quantum-api-1.0/xsd/flavor.xsd | 244 -- .../docbkx/quantum-api-1.0/xsd/image.xsd | 443 ---- .../docbkx/quantum-api-1.0/xsd/ipgroup.xsd | 245 -- .../docbkx/quantum-api-1.0/xsd/limits.xsd | 315 --- .../docbkx/quantum-api-1.0/xsd/metadata.xsd | 89 - .../docbkx/quantum-api-1.0/xsd/server.xsd | 1013 --------- .../docbkx/quantum-api-1.0/xsd/shareip.xjb | 11 - .../docbkx/quantum-api-1.0/xsd/shareip.xsd | 83 - .../docbkx/quantum-api-1.0/xsd/txt.htaccess | 4 - .../docbkx/quantum-api-1.0/xsd/version.xsd | 355 --- .../docbkx/quantum-api-1.0/xslt/schema.xsl | 1342 ----------- 103 files changed, 13932 deletions(-) delete mode 100644 doc/source/docbkx/docbkx-example/README delete mode 100644 doc/source/docbkx/docbkx-example/pom.xml delete mode 100644 doc/source/docbkx/docbkx-example/src/docbkx/example.xml delete mode 100644 doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx delete mode 100644 doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg delete mode 100644 doc/source/docbkx/quantum-api-1.0/common.ent delete mode 100644 doc/source/docbkx/quantum-api-1.0/figures/Arrow_east.svg delete mode 100644 doc/source/docbkx/quantum-api-1.0/figures/Check_mark_23x20_02.svg delete mode 100644 doc/source/docbkx/quantum-api-1.0/js/shjs/sh_java.js delete mode 100644 doc/source/docbkx/quantum-api-1.0/js/shjs/sh_javascript.js delete mode 100644 doc/source/docbkx/quantum-api-1.0/js/shjs/sh_main.js delete mode 100644 doc/source/docbkx/quantum-api-1.0/js/shjs/sh_xml.js delete mode 100644 doc/source/docbkx/quantum-api-1.0/js/trc/schema/controller.js delete mode 100644 doc/source/docbkx/quantum-api-1.0/js/trc/schema/layoutManager.js delete mode 100644 doc/source/docbkx/quantum-api-1.0/js/trc/schema/sampleManager.js delete mode 100644 doc/source/docbkx/quantum-api-1.0/js/trc/util.js delete mode 100644 doc/source/docbkx/quantum-api-1.0/quantum-api-guide.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-get-res-none.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-get-res-none.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-get-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-get-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-put-req.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/att-put-req.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/extensions.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/extensions.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/fault.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/fault.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-get-detail-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-get-detail-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-get-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-get-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-post-req.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-post-req.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-post-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/network-post-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-get-detail-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-get-detail-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-get-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-get-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-post-req.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-post-req.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-post-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/networks-post-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/notfound.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/notfound.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/notimplemented.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/notimplemented.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-get-detail-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-get-detail-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-get-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-get-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-post-req.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-post-req.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-post-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/port-post-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/ports-get-detail-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/ports-get-detail-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/ports-get-res.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/ports-get-res.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/private.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/private.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/public.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/public.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/versions-atom.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/versions.json delete mode 100644 doc/source/docbkx/quantum-api-1.0/samples/versions.xml delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/schema.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_acid.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_darkblue.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_emacs.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_night.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_pablo.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_print.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_style.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/style/shjs/sh_whitengrey.css delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/.htaccess delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/actions.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/affinity-id.xjb delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/affinity-id.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/api-common.xjb delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/api-common.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/api.xjb delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/api.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/atom.xjb delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/atom/atom.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/atom/xml.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/backup.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/common.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/ext/rax-dme/api.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/ext/rax-dme/rax-dme.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/extensions.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/faults.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/flavor.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/image.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/ipgroup.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/limits.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/metadata.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/server.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/shareip.xjb delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/shareip.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/txt.htaccess delete mode 100644 doc/source/docbkx/quantum-api-1.0/xsd/version.xsd delete mode 100644 doc/source/docbkx/quantum-api-1.0/xslt/schema.xsl diff --git a/doc/source/docbkx/docbkx-example/README b/doc/source/docbkx/docbkx-example/README deleted file mode 100644 index e1545671b31..00000000000 --- a/doc/source/docbkx/docbkx-example/README +++ /dev/null @@ -1,14 +0,0 @@ -README - -This docbkx-example folder is provided for those who want to use the maven mojo supplied with the project to build their own documents to PDF and HTML (webhelp) format. It's intended to be a template and model. - -You can edit the src/docbkx/example.xml file using vi, emacs, or another DocBook editor. At Rackspace we use Oxygen. Both Oxygen and XML Mind offer free licenses to those working on open source project documentation. - -To build the output, install Apache Maven (https://maven.apache.org/) and then run: - -mvn clean generate-sources - -in the directory containing the pom.xml file. - -Feel free to ask questions of the openstack-docs team at https://launchpad.net/~openstack-doc. - diff --git a/doc/source/docbkx/docbkx-example/pom.xml b/doc/source/docbkx/docbkx-example/pom.xml deleted file mode 100644 index f281971a5d5..00000000000 --- a/doc/source/docbkx/docbkx-example/pom.xml +++ /dev/null @@ -1,38 +0,0 @@ - - 4.0.0 - my-groupid - my-guide - 1.0.0-SNAPSHOT - jar - OpenStack stand alone documentation examples - - - - - com.agilejava.docbkx - docbkx-maven-plugin - - - - generate-pdf - generate-webhelp - - generate-sources - - - - true - 100 - - - - - - - - - - - - - diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/example.xml b/doc/source/docbkx/docbkx-example/src/docbkx/example.xml deleted file mode 100644 index 96f1c64c10c..00000000000 --- a/doc/source/docbkx/docbkx-example/src/docbkx/example.xml +++ /dev/null @@ -1,318 +0,0 @@ - - Maven Example Documentation - - - - - - - - Badges! We don't need any stinking badges! - - - - 2011 - Timothy D. Witham - - Example v0.1 - Product Name Doesn't Exist - it's an example!™ - 2011-01-01 - - - Copyright details are filled in by the template. Change the value of the role - attribute on the legalnotice element to change the license. - - - - This document is intended for individuals who whish to produce documentation using Maven and having - the same "feel" as the documentation that is produced by the mainline OpenStack projects. - - - - this is a placeholder for the front cover - - - this is a placeholder for the back cover - - - - Overview - Welcome to the getting started with Maven documentation. Congratulations you have - successfully downloaded and built the example. - - For more details on the Product Name service, please refer to http://www.rackspacecloud.com/cloud_hosting_products/product name - - We welcome feedback, comments, and bug reports at support@rackspacecloud.com. -
- Intended Audience - This guide is intended to individuals who want to develop standalone documentation - to use within an OpenStack deployment. Using this tool chain will give you the look and - feel of the mainline OpenStack documentation. - -
-
- Document Change History - This version of the Maven Getting Started Guide replaces and obsoletes all previous versions. The - most recent changes are described in the table below: - - - - Revision Date - Summary of Changes - - - - - July. 14, 2011 - - - - Initial document creation. - - - - - - -
-
- Additional Resources - - - - - Openstack - Cloud Software - - - - - - - Docbook Main Web Site - - - - - - - Docbook Quick Reference - - - - -
-
- - Concepts - - Need to put something here. - - - - How do I? - -
- Notes and including images - So I want an note and an image in this section ... - - This is an example of a note. - - - Here's a sample figure in svg and png formats: -
- Sample Image - - - - - - - - -
-
-
- Multiple Related Documents - - What you need to do in order to have multiple documents fit within the - build structure. - -
-
- Using multiple files for a document - - What you need to do in order to have a single document that is made up of multiple - files. - -
-
- Who, What, Where, When and Why of pom.xml - - You will of noticed the pom.xml file at the root directory. - This file is used to set the project parameters for the documentation. Including - what type of documentation to produce and any post processing that needs to happen. - If you want to know more about - - pom.xml - need a link - - then follow the link. - - For the pom.xmlfile that was included in this distribution we will - parse the individual lines and explaine the meaning. - - - - -
- <project> - - What is all of this stuff and why is it important? - -
-
- <modelVersion> - - What goes in here and why? - -
-
- <groupId> - - What goes in here and why? - -
-
- <artifactId> - - What goes in here and why? - -
-
- <version> - - What goes in here and why? - -
-
- <packaging> - - What goes in here and why? - -
-
- <name> - - Name of your document. - -
-
- <build> - - Make some documents. - -
- <plugin(s)> - - What does this do and why? - -
- <groupId> - - What goes in here and why? - -
-
- <artifactId> - - What goes in here and why? - -
-
- <execution(s)> - - What goes in here and why? - -
- <goal(s)> - - Different types of goals and why you use them. - -
-
- <phase> - - What does this section do? What phases can you specify. - -
-
-
- <configuration> - - What does this section do? - -
- <xincludeSupported> - - What does this do and why? - -
-
- <chunkSectionDepth> - - What does this do and why? - -
-
- <postprocess> - - What does this section do? What are possible pieces? - -
- <copy> - - What does this section do? What are possible pieces? - -
- <fileset> - - What does this section do? What are possible pieces? - -
- <include> - - What does this section do? What are possible pieces? - -
-
-
-
-
-
-
-
-
- Who, What, Where, When and Why of build.xml - - You will of noticed the build.xml file at the root directory. - This file is used to set the project parameters for the documentation. Including - what type of documentation to produce and any post processing that needs to happen. - If you want to know more about - - pom.xml - need a link - - then follow the link. - -
-
- - Troubleshooting - Sometimes things go wrong... - -
diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx deleted file mode 100644 index 3f2d863669f..00000000000 --- a/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.sdx +++ /dev/null @@ -1,79 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg b/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg deleted file mode 100644 index 58b98232d40..00000000000 --- a/doc/source/docbkx/docbkx-example/src/docbkx/figures/example.svg +++ /dev/null @@ -1,523 +0,0 @@ - - - - -Creator: Quick Sequence Diagram Editor Producer: org.freehep.graphicsio.svg.SVGGraphics2D Revision: 12753 Source: Date: Monday, May 2, 2011 2:44:33 PM CDT - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/docbkx/quantum-api-1.0/common.ent b/doc/source/docbkx/quantum-api-1.0/common.ent deleted file mode 100644 index 67e512af571..00000000000 --- a/doc/source/docbkx/quantum-api-1.0/common.ent +++ /dev/null @@ -1,112 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - '> - - - - - - '> - - - - - - '> - - - - - - '> - - - - - - '> - - - - - - - - -