Browse Source

Merge remote-tracking branch 'origin/master' into merge-branch

Change-Id: I42fb51c428c5ddf39501b9dc1792add62206f4d6
changes/03/207903/1
Kyle Mestery 7 years ago
parent
commit
9badcd249d
  1. 2
      doc/source/devref/callbacks.rst
  2. 9
      doc/source/devref/client_command_extensions.rst
  3. 13
      doc/source/devref/contribute.rst
  4. 146
      doc/source/devref/db_layer.rst
  5. 74
      doc/source/devref/dns_order.rst
  6. 19
      doc/source/devref/fullstack_testing.rst
  7. 2
      doc/source/devref/index.rst
  8. 1
      doc/source/devref/l2_agents.rst
  9. 33
      doc/source/devref/neutron_api.rst
  10. 27
      doc/source/devref/sriov_nic_agent.rst
  11. 33
      doc/source/devref/sub_projects.rst
  12. 2
      doc/source/policies/bugs.rst
  13. 9
      doc/source/policies/core-reviewers.rst
  14. 2
      etc/metadata_agent.ini
  15. 2
      etc/neutron.conf
  16. 31
      etc/neutron/plugins/metaplugin/metaplugin.ini
  17. 70
      etc/neutron/plugins/ml2/ml2_conf_cisco.ini
  18. 17
      etc/neutron/rootwrap.d/cisco-apic.filters
  19. 13
      etc/policy.json
  20. 7
      neutron/agent/common/ovs_lib.py
  21. 23
      neutron/agent/common/utils.py
  22. 3
      neutron/agent/dhcp/agent.py
  23. 4
      neutron/agent/firewall.py
  24. 12
      neutron/agent/l3/agent.py
  25. 6
      neutron/agent/l3/config.py
  26. 38
      neutron/agent/l3/dvr_edge_router.py
  27. 69
      neutron/agent/l3/dvr_local_router.py
  28. 42
      neutron/agent/l3/dvr_router_base.py
  29. 3
      neutron/agent/l3/ha.py
  30. 11
      neutron/agent/l3/ha_router.py
  31. 14
      neutron/agent/l3/namespace_manager.py
  32. 81
      neutron/agent/l3/router_info.py
  33. 5
      neutron/agent/linux/async_process.py
  34. 6
      neutron/agent/linux/bridge_lib.py
  35. 59
      neutron/agent/linux/daemon.py
  36. 39
      neutron/agent/linux/dhcp.py
  37. 5
      neutron/agent/linux/external_process.py
  38. 85
      neutron/agent/linux/interface.py
  39. 56
      neutron/agent/linux/ip_lib.py
  40. 56
      neutron/agent/linux/iptables_firewall.py
  41. 31
      neutron/agent/linux/keepalived.py
  42. 21
      neutron/agent/linux/utils.py
  43. 32
      neutron/agent/metadata/agent.py
  44. 6
      neutron/agent/metadata/config.py
  45. 7
      neutron/agent/metadata/driver.py
  46. 11
      neutron/agent/ovsdb/api.py
  47. 6
      neutron/agent/ovsdb/impl_idl.py
  48. 3
      neutron/agent/ovsdb/impl_vsctl.py
  49. 90
      neutron/agent/ovsdb/native/commands.py
  50. 61
      neutron/agent/rpc.py
  51. 2
      neutron/agent/windows/utils.py
  52. 5
      neutron/api/extensions.py
  53. 3
      neutron/api/rpc/handlers/dhcp_rpc.py
  54. 22
      neutron/api/v2/attributes.py
  55. 57
      neutron/api/v2/base.py
  56. 6
      neutron/api/v2/resource.py
  57. 4
      neutron/api/v2/resource_helper.py
  58. 4
      neutron/api/v2/router.py
  59. 126
      neutron/cmd/sanity/checks.py
  60. 15
      neutron/cmd/sanity_check.py
  61. 5
      neutron/common/constants.py
  62. 4
      neutron/common/exceptions.py
  63. 23
      neutron/common/log.py
  64. 11
      neutron/common/utils.py
  65. 7
      neutron/context.py
  66. 3
      neutron/db/api.py
  67. 51
      neutron/db/common_db_mixin.py
  68. 34
      neutron/db/db_base_plugin_common.py
  69. 94
      neutron/db/db_base_plugin_v2.py
  70. 11
      neutron/db/dvr_mac_db.py
  71. 356
      neutron/db/flavors_db.py
  72. 144
      neutron/db/ipam_backend_mixin.py
  73. 68
      neutron/db/ipam_non_pluggable_backend.py
  74. 451
      neutron/db/ipam_pluggable_backend.py
  75. 22
      neutron/db/l3_agentschedulers_db.py
  76. 51
      neutron/db/l3_db.py
  77. 12
      neutron/db/l3_dvr_db.py
  78. 11
      neutron/db/l3_hamode_db.py
  79. 4
      neutron/db/metering/metering_db.py
  80. 17
      neutron/db/migration/alembic_migrations/external.py
  81. 3
      neutron/db/migration/alembic_migrations/script.py.mako
  82. 1
      neutron/db/migration/alembic_migrations/versions/HEAD
  83. 3
      neutron/db/migration/alembic_migrations/versions/HEADS
  84. 22
      neutron/db/migration/alembic_migrations/versions/liberty/contract/2a16083502f3_metaplugin_removal.py
  85. 20
      neutron/db/migration/alembic_migrations/versions/liberty/contract/30018084ec99_initial.py
  86. 69
      neutron/db/migration/alembic_migrations/versions/liberty/contract/4ffceebfada_rbac_network.py
  87. 37
      neutron/db/migration/alembic_migrations/versions/liberty/contract/5498d17be016_drop_legacy_ovs_and_lb.py
  88. 35
      neutron/db/migration/alembic_migrations/versions/liberty/expand/1c844d1677f7_dns_nameservers_order.py
  89. 62
      neutron/db/migration/alembic_migrations/versions/liberty/expand/31337ec0ffee_flavors.py
  90. 5
      neutron/db/migration/alembic_migrations/versions/liberty/expand/354db87e3225_nsxv_vdr_metadata.py
  91. 45
      neutron/db/migration/alembic_migrations/versions/liberty/expand/45f955889773_quota_usage.py
  92. 0
      neutron/db/migration/alembic_migrations/versions/liberty/expand/52c5312f6baf_address_scopes.py
  93. 0
      neutron/db/migration/alembic_migrations/versions/liberty/expand/599c6a226151_neutrodb_ipam.py
  94. 47
      neutron/db/migration/alembic_migrations/versions/liberty/expand/8675309a5c4f_rbac_network.py
  95. 192
      neutron/db/migration/cli.py
  96. 515
      neutron/db/migration/migrate_to_ml2.py
  97. 11
      neutron/db/migration/models/head.py
  98. 17
      neutron/db/models_v2.py
  99. 0
      neutron/db/quota/__init__.py
  100. 159
      neutron/db/quota/api.py
  101. Some files were not shown because too many files have changed in this diff Show More

2
doc/source/devref/callbacks.rst

@ -65,7 +65,7 @@ do whatever they are supposed to do. In a callback-less world this would work li
# A gets hold of the references of B and C
# A calls B
# A calls C
B->my_random_method_for_knowning_about_router_created()
B->my_random_method_for_knowing_about_router_created()
C->my_random_very_difficult_to_remember_method_about_router_created()
If B and/or C change, things become sour. In a callback-based world, things become a lot

9
doc/source/devref/client_command_extensions.rst

@ -0,0 +1,9 @@
=================================
Client command extension support
=================================
The client command extension adds support for extending the neutron client while
considering ease of creation.
The full document can be found in the python-neutronclient repository:
http://docs.openstack.org/developer/python-neutronclient/devref/client_command_extensions.html

13
doc/source/devref/contribute.rst

@ -1,14 +1,11 @@
Contributing new extensions to Neutron
======================================
**NOTE!**
---------
.. note:: **Third-party plugins/drivers which do not start decomposition in
Liberty will be marked as deprecated and removed before the Mitaka-3
milestone.**
**Third-party plugins/drivers which do not start decomposition in Liberty will
be marked as deprecated, and they will be removed before the Mxxx-3
milestone.**
Read on for details ...
Read on for details ...
Introduction
@ -46,7 +43,7 @@ by allowing third-party code to exist entirely out of tree. Further extension
mechanisms have been provided to better support external plugins and drivers
that alter the API and/or the data model.
In the Mxxx cycle we will **require** all third-party code to be moved out of
In the Mitaka cycle we will **require** all third-party code to be moved out of
the neutron tree completely.
'Outside the tree' can be anything that is publicly available: it may be a repo

146
doc/source/devref/db_layer.rst

@ -23,6 +23,152 @@ should also be added in model. If default value in database is not needed,
business logic.
How we manage database migration rules
--------------------------------------
Since Liberty, Neutron maintains two parallel alembic migration branches.
The first one, called 'expand', is used to store expansion-only migration
rules. Those rules are strictly additive and can be applied while
neutron-server is running. Examples of additive database schema changes are:
creating a new table, adding a new table column, adding a new index, etc.
The second branch, called 'contract', is used to store those migration rules
that are not safe to apply while neutron-server is running. Those include:
column or table removal, moving data from one part of the database into another
(renaming a column, transforming single table into multiple, etc.), introducing
or modifying constraints, etc.
The intent of the split is to allow invoking those safe migrations from
'expand' branch while neutron-server is running, reducing downtime needed to
upgrade the service.
To apply just expansion rules, execute:
- neutron-db-manage upgrade liberty_expand@head
After the first step is done, you can stop neutron-server, apply remaining
non-expansive migration rules, if any:
- neutron-db-manage upgrade liberty_contract@head
and finally, start your neutron-server again.
If you are not interested in applying safe migration rules while the service is
running, you can still upgrade database the old way, by stopping the service,
and then applying all available rules:
- neutron-db-manage upgrade head[s]
It will apply all the rules from both the expand and the contract branches, in
proper order.
Expand and Contract Scripts
---------------------------
The obsolete "branchless" design of a migration script included that it
indicates a specific "version" of the schema, and includes directives that
apply all necessary changes to the database at once. If we look for example at
the script ``2d2a8a565438_hierarchical_binding.py``, we will see::
# .../alembic_migrations/versions/2d2a8a565438_hierarchical_binding.py
def upgrade():
# .. inspection code ...
op.create_table(
'ml2_port_binding_levels',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
# ... more columns ...
)
for table in port_binding_tables:
op.execute((
"INSERT INTO ml2_port_binding_levels "
"SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
"FROM %s "
"WHERE host <> '' "
"AND driver <> '';"
) % table)
op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
op.drop_column('ml2_dvr_port_bindings', 'segment')
op.drop_column('ml2_dvr_port_bindings', 'driver')
# ... more DROP instructions ...
The above script contains directives that are both under the "expand"
and "contract" categories, as well as some data migrations. the ``op.create_table``
directive is an "expand"; it may be run safely while the old version of the
application still runs, as the old code simply doesn't look for this table.
The ``op.drop_constraint`` and ``op.drop_column`` directives are
"contract" directives (the drop column moreso than the drop constraint); running
at least the ``op.drop_column`` directives means that the old version of the
application will fail, as it will attempt to access these columns which no longer
exist.
The data migrations in this script are adding new
rows to the newly added ``ml2_port_binding_levels`` table.
Under the new migration script directory structure, the above script would be
stated as two scripts; an "expand" and a "contract" script::
# expansion operations
# .../alembic_migrations/versions/liberty/expand/2bde560fc638_hierarchical_binding.py
def upgrade():
op.create_table(
'ml2_port_binding_levels',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('host', sa.String(length=255), nullable=False),
# ... more columns ...
)
# contraction operations
# .../alembic_migrations/versions/liberty/contract/4405aedc050e_hierarchical_binding.py
def upgrade():
for table in port_binding_tables:
op.execute((
"INSERT INTO ml2_port_binding_levels "
"SELECT port_id, host, 0 AS level, driver, segment AS segment_id "
"FROM %s "
"WHERE host <> '' "
"AND driver <> '';"
) % table)
op.drop_constraint(fk_name_dvr[0], 'ml2_dvr_port_bindings', 'foreignkey')
op.drop_column('ml2_dvr_port_bindings', 'cap_port_filter')
op.drop_column('ml2_dvr_port_bindings', 'segment')
op.drop_column('ml2_dvr_port_bindings', 'driver')
# ... more DROP instructions ...
The two scripts would be present in different subdirectories and also part of
entirely separate versioning streams. The "expand" operations are in the
"expand" script, and the "contract" operations are in the "contract" script.
For the time being, data migration rules also belong to contract branch. There
is expectation that eventually live data migrations move into middleware that
will be aware about different database schema elements to converge on, but
Neutron is still not there.
Scripts that contain only expansion or contraction rules do not require a split
into two parts.
If a contraction script depends on a script from expansion stream, the
following directive should be added in the contraction script::
depends_on = ('<expansion-revision>',)
Tests to verify that database migrations and models are in sync
---------------------------------------------------------------

74
doc/source/devref/dns_order.rst

@ -0,0 +1,74 @@
Keep DNS Nameserver Order Consistency In Neutron
================================================
In Neutron subnets, DNS nameservers are given priority when created or updated.
This means if you create a subnet with multiple DNS servers, the order will
be retained and guests will receive the DNS servers in the order you
created them in when the subnet was created. The same thing applies for update
operations on subnets to add, remove, or update DNS servers.
Get Subnet Details Info
-----------------------
::
changzhi@stack:~/devstack$ neutron subnet-list
+--------------------------------------+------+-------------+--------------------------------------------+
| id | name | cidr | allocation_pools |
+--------------------------------------+------+-------------+--------------------------------------------+
| 1a2d261b-b233-3ab9-902e-88576a82afa6 | | 10.0.0.0/24 | {"start": "10.0.0.2", "end": "10.0.0.254"} |
+--------------------------------------+------+-------------+--------------------------------------------+
changzhi@stack:~/devstack$ neutron subnet-show 1a2d261b-b233-3ab9-902e-88576a82afa6
+------------------+--------------------------------------------+
| Field | Value |
+------------------+--------------------------------------------+
| allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} |
| cidr | 10.0.0.0/24 |
| dns_nameservers | 1.1.1.1 |
| | 2.2.2.2 |
| | 3.3.3.3 |
| enable_dhcp | True |
| gateway_ip | 10.0.0.1 |
| host_routes | |
| id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 |
| ip_version | 4 |
| name | |
| network_id | a404518c-800d-2353-9193-57dbb42ac5ee |
| tenant_id | 3868290ab10f417390acbb754160dbb2 |
+------------------+--------------------------------------------+
Update Subnet DNS Nameservers
-----------------------------
::
neutron subnet-update 1a2d261b-b233-3ab9-902e-88576a82afa6 \
--dns_nameservers list=true 3.3.3.3 2.2.2.2 1.1.1.1
changzhi@stack:~/devstack$ neutron subnet-show 1a2d261b-b233-3ab9-902e-88576a82afa6
+------------------+--------------------------------------------+
| Field | Value |
+------------------+--------------------------------------------+
| allocation_pools | {"start": "10.0.0.2", "end": "10.0.0.254"} |
| cidr | 10.0.0.0/24 |
| dns_nameservers | 3.3.3.3 |
| | 2.2.2.2 |
| | 1.1.1.1 |
| enable_dhcp | True |
| gateway_ip | 10.0.0.1 |
| host_routes | |
| id | 1a2d26fb-b733-4ab3-992e-88554a87afa6 |
| ip_version | 4 |
| name | |
| network_id | a404518c-800d-2353-9193-57dbb42ac5ee |
| tenant_id | 3868290ab10f417390acbb754160dbb2 |
+------------------+--------------------------------------------+
As shown in above output, the order of the DNS nameservers has been updated.
New virtual machines deployed to this subnet will receive the DNS nameservers
in this new priority order. Existing virtual machines that have already been
deployed will not be immediately affected by changing the DNS nameserver order
on the neutron subnet. Virtual machines that are configured to get their IP
address via DHCP will detect the DNS nameserver order change
when their DHCP lease expires or when the virtual machine is restarted.
Existing virtual machines configured with a static IP address will never
detect the updated DNS nameserver order.

19
doc/source/devref/fullstack_testing.rst

@ -29,11 +29,11 @@ Since the test runs on the machine itself, full stack testing enables
through the API and then assert that a namespace was created for it.
Full stack tests run in the Neutron tree with Neutron resources alone. You
may use the Neutron API (Keystone is set to NOAUTH so that it's out of the
picture). VMs may be simulated with a helper class that contains a container-
like object in its own namespace and IP address. It has helper methods to send
different kinds of traffic. The "VM" may be connected to br-int or br-ex,
to simulate internal or external traffic.
may use the Neutron API (The Neutron server is set to NOAUTH so that Keystone
is out of the picture). instances may be simulated with a helper class that
contains a container-like object in its own namespace and IP address. It has
helper methods to send different kinds of traffic. The "instance" may be
connected to br-int or br-ex, to simulate internal or external traffic.
Full stack testing can simulate multi node testing by starting an agent
multiple times. Specifically, each node would have its own copy of the
@ -84,9 +84,12 @@ Long Term Goals
* Currently we configure the OVS agent with VLANs segmentation (Only because
it's easier). This allows us to validate most functionality, but we might
need to support tunneling somehow.
* How do advanced services use the full stack testing infrastructure? I'd
assume we treat all of the infrastructure classes as a publicly consumed
API and have the XaaS repos import and use them.
* How will advanced services use the full stack testing infrastructure? Full
stack tests infrastructure classes are expected to change quite a bit over
the next coming months. This means that other repositories may import these
classes and break from time to time, or copy them in their repositories
instead. Since changes to full stack testing infrastructure is a given,
XaaS repositories should be copying it and not importing it directly.
* Currently we configure the Neutron server with the ML2 plugin and the OVS
mechanism driver. We may modularize the topology configuration further to
allow to rerun full stack tests against different Neutron plugins or ML2

2
doc/source/devref/index.rst

@ -34,6 +34,7 @@ Programming HowTos and Tutorials
contribute
neutron_api
sub_projects
client_command_extensions
Neutron Internals
@ -52,6 +53,7 @@ Neutron Internals
advanced_services
oslo-incubator
callbacks
dns_order
Testing
-------

1
doc/source/devref/l2_agents.rst

@ -5,3 +5,4 @@ L2 Agent Networking
openvswitch_agent
linuxbridge_agent
sriov_nic_agent

33
doc/source/devref/neutron_api.rst

@ -2,7 +2,7 @@ Neutron public API
==================
Neutron main tree serves as a library for multiple subprojects that rely on
different modules from neutron.* namespace to accomodate their needs.
different modules from neutron.* namespace to accommodate their needs.
Specifically, advanced service repositories and open source or vendor
plugin/driver repositories do it.
@ -33,3 +33,34 @@ incompatible changes that could or are known to trigger those breakages.
- commit: 6e693fc91dd79cfbf181e3b015a1816d985ad02c
- solution: switch using oslo_service.* namespace; stop using ANY neutron.openstack.* contents.
- severity: low (plugins must not rely on that subtree).
* change: oslo.utils.fileutils adopted.
- commit: I933d02aa48260069149d16caed02b020296b943a
- solution: switch using oslo_utils.fileutils module; stop using neutron.openstack.fileutils module.
- severity: low (plugins must not rely on that subtree).
* change: Reuse caller's session in DB methods.
- commit: 47dd65cf986d712e9c6ca5dcf4420dfc44900b66
- solution: Add context to args and reuse.
- severity: High (mostly undetected, because 3rd party CI run Tempest tests only).
* change: switches to oslo.log, removes neutron.openstack.common.log.
- commit: 22328baf1f60719fcaa5b0fbd91c0a3158d09c31
- solution: a) switch to oslo.log; b) copy log module into your tree and use it
(may not work due to conflicts between the module and oslo.log configuration options).
- severity: High (most CI systems are affected).
* change: Implements reorganize-unit-test-tree spec.
- commit: 1105782e3914f601b8f4be64939816b1afe8fb54
- solution: Code affected need to update existing unit tests to reflect new locations.
- severity: High (mostly undetected, because 3rd party CI run Tempest tests only).
* change: drop linux/ovs_lib compat layer.
- commit: 3bbf473b49457c4afbfc23fd9f59be8aa08a257d
- solution: switch to using neutron/agent/common/ovs_lib.py.
- severity: High (most CI systems are affected).

27
doc/source/devref/sriov_nic_agent.rst

@ -0,0 +1,27 @@
======================================
L2 Networking with SR-IOV enabled NICs
======================================
SR-IOV (Single Root I/O Virtualization) is a specification that allows
a PCIe device to appear to be multiple separate physical PCIe devices.
SR-IOV works by introducing the idea of physical functions (PFs) and virtual functions (VFs).
Physical functions (PFs) are full-featured PCIe functions.
Virtual functions (VFs) are “lightweight” functions that lack configuration resources.
SR-IOV supports VLANs for L2 network isolation, other networking technologies
such as VXLAN/GRE may be supported in the future.
SR-IOV NIC agent manages configuration of SR-IOV Virtual Functions that connect
VM instances running on the compute node to the public network.
In most common deployments, there are compute and a network nodes.
Compute node can support VM connectivity via SR-IOV enabled NIC. SR-IOV NIC Agent manages
Virtual Functions admin state. In the future it will manage additional settings, such as
quality of service, rate limit settings, spoofcheck and more.
Network node will be usually deployed with either Open vSwitch or Linux Bridge to support network node functionality.
Further Reading
---------------
* `Nir Yechiel - SR-IOV Networking – Part I: Understanding the Basics <http://redhatstackblog.redhat.com/2015/03/05/red-hat-enterprise-linux-openstack-platform-6-sr-iov-networking-part-i-understanding-the-basics/>`_
* `SR-IOV Passthrough For Networking <https://wiki.openstack.org/wiki/SR-IOV-Passthrough-For-Networking/>`_

33
doc/source/devref/sub_projects.rst

@ -7,10 +7,10 @@ part of the overall Neutron project.
Inclusion Process
-----------------
The process for proposing the move of a repo into openstack/ and under
the Neutron project is to propose a patch to the openstack/governance
repository. For example, to propose moving networking-foo, one
would add the following entry under Neutron in reference/projects.yaml::
The process for proposing a repo into openstack/ and under the Neutron
project is to propose a patch to the openstack/governance repository.
For example, to propose networking-foo, one would add the following entry
under Neutron in reference/projects.yaml::
- repo: openstack/networking-foo
tags:
@ -28,6 +28,11 @@ repositories are within the existing approved scope of the project.
http://git.openstack.org/cgit/openstack/governance/commit/?id=321a020cbcaada01976478ea9f677ebb4df7bd6d
In order to create a project, in case it does not exist, follow steps
as explained in:
http://docs.openstack.org/infra/manual/creators.html
Responsibilities
----------------
@ -86,14 +91,14 @@ repo but are summarized here to describe the functionality they provide.
+-------------------------------+-----------------------+
| networking-edge-vpn_ | vpn |
+-------------------------------+-----------------------+
| networking-fujitsu_ | ml2 |
+-------------------------------+-----------------------+
| networking-hyperv_ | ml2 |
+-------------------------------+-----------------------+
| networking-ibm_ | ml2,l3 |
+-------------------------------+-----------------------+
| networking-l2gw_ | l2 |
+-------------------------------+-----------------------+
| networking-metaplugin_ | core |
+-------------------------------+-----------------------+
| networking-midonet_ | core,lb |
+-------------------------------+-----------------------+
| networking-mlnx_ | ml2 |
@ -205,6 +210,15 @@ Edge VPN
* Git: https://git.openstack.org/cgit/stackforge/networking-edge-vpn
* Launchpad: https://launchpad.net/edge-vpn
.. _networking-fujitsu:
FUJITSU
-------
* Git: https://git.openstack.org/cgit/openstack/networking-fujitsu
* Launchpad: https://launchpad.net/networking-fujitsu
* PyPI: https://pypi.python.org/pypi/networking-fujitsu
.. _networking-hyperv:
Hyper-V
@ -239,13 +253,6 @@ L2 Gateway
* Git: https://git.openstack.org/cgit/openstack/networking-l2gw
* Launchpad: https://launchpad.net/networking-l2gw
.. _networking-metaplugin:
Metaplugin
----------
* Git: https://github.com/ntt-sic/networking-metaplugin
.. _networking-midonet:
MidoNet

2
doc/source/policies/bugs.rst

@ -13,7 +13,7 @@ triaging. The bug czar is expected to communicate with the various Neutron teams
been triaged. In addition, the bug czar should be reporting "High" and "Critical" priority bugs
to both the PTL and the core reviewer team during each weekly Neutron meeting.
The current Neutron bug czar is Eugene Nikanorov (IRC nick enikanorov).
The current Neutron bug czar is Kyle Mestery (IRC nick mestery).
Plugin and Driver Repositories
------------------------------

9
doc/source/policies/core-reviewers.rst

@ -100,9 +100,14 @@ updating the core review team for the sub-project's repositories.
| Area | Lieutenant | IRC nick |
+========================+===========================+======================+
| dragonflow | Eran Gampel | gampel |
| | Gal Sagie | gsagie |
+------------------------+---------------------------+----------------------+
| networking-l2gw | Sukhdev Kapur | sukhdev |
+------------------------+---------------------------+----------------------+
| networking-midonet | Ryu Ishimoto | ryu_ishimoto |
| | Jaume Devesa | devvesa |
| | YAMAMOTO Takashi | yamamoto |
+------------------------+---------------------------+----------------------+
| networking-odl | Flavio Fernandes | flaviof |
| | Kyle Mestery | mestery |
+------------------------+---------------------------+----------------------+
@ -110,6 +115,10 @@ updating the core review team for the sub-project's repositories.
+------------------------+---------------------------+----------------------+
| networking-ovn | Russell Bryant | russellb |
+------------------------+---------------------------+----------------------+
| networking-plumgrid | Fawad Khaliq | fawadkhaliq |
+------------------------+---------------------------+----------------------+
| networking-sfc | Cathy Zhang | cathy |
+------------------------+---------------------------+----------------------+
| networking-vshpere | Vivekanandan Narasimhan | viveknarasimhan |
+------------------------+---------------------------+----------------------+
| octavia | German Eichberger | xgerman |

2
etc/metadata_agent.ini

@ -45,7 +45,7 @@ admin_password = %SERVICE_PASSWORD%
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Metadata Proxy UNIX domain socket mode, 3 values allowed:
# Metadata Proxy UNIX domain socket mode, 4 values allowed:
# 'deduce': deduce mode from metadata_proxy_user/group values,
# 'user': set metadata proxy socket mode to 0o644, to use when
# metadata_proxy_user is agent effective user or root,

2
etc/neutron.conf

@ -593,7 +593,7 @@
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# quota_driver = neutron.db.quota.driver.DbQuotaDriver
# Resource name(s) that are supported in quota features
# This option is deprecated for removal in the M release, please refrain from using it

31
etc/neutron/plugins/metaplugin/metaplugin.ini

@ -1,31 +0,0 @@
# Config file for Metaplugin
[meta]
# Comma separated list of flavor:neutron_plugin for plugins to load.
# Extension method is searched in the list order and the first one is used.
plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2'
# Comma separated list of flavor:neutron_plugin for L3 service plugins
# to load.
# This is intended for specifying L2 plugins which support L3 functions.
# If you use a router service plugin, set this blank.
l3_plugin_list =
# Default flavor to use, when flavor:network is not specified at network
# creation.
default_flavor = 'nvp'
# Default L3 flavor to use, when flavor:router is not specified at router
# creation.
# Ignored if 'l3_plugin_list' is blank.
default_l3_flavor =
# Comma separated list of supported extension aliases.
supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler'
# Comma separated list of method:flavor to select specific plugin for a method.
# This has priority over method search order based on 'plugin_list'.
extension_map = 'get_port_stats:nvp'
# Specifies flavor for plugin to handle 'q-plugin' RPC requests.
rpc_flavor = 'ml2'

70
etc/neutron/plugins/ml2/ml2_conf_cisco.ini

@ -137,76 +137,6 @@
# mcast_ranges =
# Example: mcast_ranges = 224.0.0.1:224.0.0.3,224.0.1.1:224.0.1.
[ml2_cisco_apic]
# Hostname:port list of APIC controllers
# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80
# Username for the APIC controller
# apic_username = user
# Password for the APIC controller
# apic_password = password
# Whether use SSl for connecting to the APIC controller or not
# apic_use_ssl = True
# How to map names to APIC: use_uuid or use_name
# apic_name_mapping = use_name
# Names for APIC objects used by Neutron
# Note: When deploying multiple clouds against one APIC,
# these names must be unique between the clouds.
# apic_vmm_domain = openstack
# apic_vlan_ns_name = openstack_ns
# apic_node_profile = openstack_profile
# apic_entity_profile = openstack_entity
# apic_function_profile = openstack_function
# apic_app_profile_name = openstack_app
# Agent timers for State reporting and topology discovery
# apic_sync_interval = 30
# apic_agent_report_interval = 30
# apic_agent_poll_interval = 2
# Specify your network topology.
# This section indicates how your compute nodes are connected to the fabric's
# switches and ports. The format is as follows:
#
# [apic_switch:<swich_id_from_the_apic>]
# <compute_host>,<compute_host> = <switchport_the_host(s)_are_connected_to>
#
# You can have multiple sections, one for each switch in your fabric that is
# participating in OpenStack. e.g.
#
# [apic_switch:17]
# ubuntu,ubuntu1 = 1/10
# ubuntu2,ubuntu3 = 1/11
#
# [apic_switch:18]
# ubuntu5,ubuntu6 = 1/1
# ubuntu7,ubuntu8 = 1/2
# Describe external connectivity.
# In this section you can specify the external network configuration in order
# for the plugin to be able to teach the fabric how to route the internal
# traffic to the outside world. The external connectivity configuration
# format is as follows:
#
# [apic_external_network:<externalNetworkName>]
# switch = <switch_id_from_the_apic>
# port = <switchport_the_external_router_is_connected_to>
# encap = <encapsulation>
# cidr_exposed = <cidr_exposed_to_the_external_router>
# gateway_ip = <ip_of_the_external_gateway>
#
# An example follows:
# [apic_external_network:network_ext]
# switch=203
# port=1/34
# encap=vlan-100
# cidr_exposed=10.10.40.2/16
# gateway_ip=10.10.40.1
[ml2_cisco_ucsm]
# Cisco UCS Manager IP address

17
etc/neutron/rootwrap.d/cisco-apic.filters

@ -1,17 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# cisco-apic filters
lldpctl: CommandFilter, lldpctl, root
# ip_lib filters
ip: IpFilter, ip, root
find: RegExpFilter, find, root, find, /sys/class/net, -maxdepth, 1, -type, l, -printf, %.*
ip_exec: IpNetnsExecFilter, ip, root

13
etc/policy.json

@ -163,5 +163,16 @@
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only"
"create_lsn": "rule:admin_only",
"create_flavor": "rule:admin_only",
"update_flavor": "rule:admin_only",
"delete_flavor": "rule:admin_only",
"get_flavors": "rule:regular_user",
"get_flavor": "rule:regular_user",
"create_service_profile": "rule:admin_only",
"update_service_profile": "rule:admin_only",
"delete_service_profile": "rule:admin_only",
"get_service_profiles": "rule:admin_only",
"get_service_profile": "rule:admin_only"
}

7
neutron/agent/common/ovs_lib.py

@ -304,7 +304,12 @@ class OVSBridge(BaseOVS):
('options', {'peer': remote_name})]
return self.add_port(local_name, *attrs)
def get_iface_name_list(self):
# get the interface name list for this bridge
return self.ovsdb.list_ifaces(self.br_name).execute(check_error=True)
def get_port_name_list(self):
# get the port name list for this bridge
return self.ovsdb.list_ports(self.br_name).execute(check_error=True)
def get_port_stats(self, port_name):
@ -557,7 +562,7 @@ class DeferredOVSBridge(object):
key=operator.itemgetter(0))
itemgetter_1 = operator.itemgetter(1)
for action, action_flow_list in grouped:
flows = map(itemgetter_1, action_flow_list)
flows = list(map(itemgetter_1, action_flow_list))
self.br.do_action_flows(action, flows)
def __enter__(self):

23
neutron/agent/common/utils.py

@ -15,10 +15,33 @@
import os
from oslo_log import log as logging
from oslo_utils import importutils
from neutron.i18n import _LE
if os.name == 'nt':
from neutron.agent.windows import utils
else:
from neutron.agent.linux import utils
LOG = logging.getLogger(__name__)
execute = utils.execute
def load_interface_driver(conf):
if not conf.interface_driver:
LOG.error(_LE('An interface driver must be specified'))
raise SystemExit(1)
try:
return importutils.import_object(conf.interface_driver, conf)
except ImportError as e:
LOG.error(_LE("Error importing interface driver "
"'%(driver)s': %(inner)s"),
{'driver': conf.interface_driver,
'inner': e})
raise SystemExit(1)

3
neutron/agent/dhcp/agent.py

@ -26,7 +26,6 @@ from oslo_utils import importutils
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.linux import utils as linux_utils
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
@ -63,7 +62,7 @@ class DhcpAgent(manager.Manager):
ctx, self.conf.use_namespaces)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
linux_utils.ensure_dir(dhcp_dir)
utils.ensure_dir(dhcp_dir)
self.dhcp_version = self.dhcp_driver_cls.check_version()
self._populate_networks_cache()
self._process_monitor = external_process.ProcessMonitor(

4
neutron/agent/firewall.py

@ -19,6 +19,10 @@ import contextlib
import six
INGRESS_DIRECTION = 'ingress'
EGRESS_DIRECTION = 'egress'
@six.add_metaclass(abc.ABCMeta)
class FirewallDriver(object):
"""Firewall Driver base class.

12
neutron/agent/l3/agent.py

@ -21,9 +21,9 @@ import oslo_messaging
from oslo_service import loopingcall
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from neutron.agent.common import utils as common_utils
from neutron.agent.l3 import dvr
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_local_router as dvr_local_router
@ -165,15 +165,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
config=self.conf,
resource_type='router')
try:
self.driver = importutils.import_object(
self.conf.interface_driver,
self.conf
)
except Exception:
LOG.error(_LE("Error importing interface driver "
"'%s'"), self.conf.interface_driver)
raise SystemExit(1)
self.driver = common_utils.load_interface_driver(self.conf)
self.context = n_context.get_admin_context_without_session()
self.plugin_rpc = L3PluginApi(topics.L3PLUGIN, host)

6
neutron/agent/l3/config.py

@ -84,9 +84,11 @@ OPTS = [
cfg.StrOpt('metadata_access_mark',
default='0x1',
help=_('Iptables mangle mark used to mark metadata valid '
'requests')),
'requests. This mark will be masked with 0xffff so '
'that only the lower 16 bits will be used.')),
cfg.StrOpt('external_ingress_mark',
default='0x2',
help=_('Iptables mangle mark used to mark ingress from '
'external network')),
'external network. This mark will be masked with '
'0xffff so that only the lower 16 bits will be used.')),
]

38
neutron/agent/l3/dvr_edge_router.py

@ -28,13 +28,13 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
def __init__(self, agent, host, *args, **kwargs):
super(DvrEdgeRouter, self).__init__(agent, host, *args, **kwargs)
self.snat_namespace = None
self.snat_iptables_manager = None
def external_gateway_added(self, ex_gw_port, interface_name):
super(DvrEdgeRouter, self).external_gateway_added(
ex_gw_port, interface_name)
if self._is_this_snat_host():
snat_ports = self.get_snat_interfaces()
self._create_dvr_gateway(ex_gw_port, interface_name, snat_ports)
self._create_dvr_gateway(ex_gw_port, interface_name)
def external_gateway_updated(self, ex_gw_port, interface_name):
if not self._is_this_snat_host():
@ -70,8 +70,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
if not self._is_this_snat_host():
return
snat_ports = self.get_snat_interfaces()
sn_port = self._map_internal_interfaces(port, snat_ports)
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
@ -92,7 +91,7 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
if not self.ex_gw_port:
return
sn_port = self._map_internal_interfaces(port, self.snat_ports)
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
@ -108,12 +107,11 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
self.driver.unplug(snat_interface, namespace=ns_name,
prefix=prefix)
def _create_dvr_gateway(self, ex_gw_port, gw_interface_name,
snat_ports):
def _create_dvr_gateway(self, ex_gw_port, gw_interface_name):
"""Create SNAT namespace."""
snat_ns = self.create_snat_namespace()
# connect snat_ports to br_int from SNAT namespace
for port in snat_ports:
for port in self.get_snat_interfaces():
# create interface_name
interface_name = self.get_snat_int_device_name(port['id'])
self._internal_network_added(
@ -145,4 +143,26 @@ class DvrEdgeRouter(dvr_local_router.DvrLocalRouter):
return long_name[:self.driver.DEV_NAME_LEN]
def _is_this_snat_host(self):
return self.get_gw_port_host() == self.host
host = self.router.get('gw_port_host')
if not host:
LOG.debug("gw_port_host missing from router: %s",
self.router['id'])
return host == self.host
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
if not self._is_this_snat_host():
return
if not self.get_ex_gw_port():
return
if not self.snat_iptables_manager:
LOG.debug("DVR router: no snat rules to be handled")
return
with self.snat_iptables_manager.defer_apply():
self._empty_snat_chains(self.snat_iptables_manager)
# NOTE DVR doesn't add the jump to float snat like the super class.
self._add_snat_rules(ex_gw_port, self.snat_iptables_manager,
interface_name)

69
neutron/agent/l3/dvr_local_router.py

@ -19,7 +19,7 @@ from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import router_info as router
from neutron.agent.l3 import dvr_router_base
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import exceptions
@ -31,15 +31,11 @@ LOG = logging.getLogger(__name__)
MASK_30 = 0x3fffffff
class DvrLocalRouter(router.RouterInfo):
class DvrLocalRouter(dvr_router_base.DvrRouterBase):
def __init__(self, agent, host, *args, **kwargs):
super(DvrLocalRouter, self).__init__(*args, **kwargs)
self.agent = agent
self.host = host
super(DvrLocalRouter, self).__init__(agent, host, *args, **kwargs)
self.floating_ips_dict = {}
self.snat_iptables_manager = None
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.dist_fip_count = None
@ -50,9 +46,6 @@ class DvrLocalRouter(router.RouterInfo):
floating_ips = super(DvrLocalRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
def get_snat_interfaces(self):
return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
def _handle_fip_nat_rules(self, interface_name, action):
"""Configures NAT rules for Floating IPs for DVR.
@ -201,17 +194,6 @@ class DvrLocalRouter(router.RouterInfo):
subnet_id,
'add')
def _map_internal_interfaces(self, int_port, snat_ports):
"""Return the SNAT port for the given internal interface port."""
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports if
p['fixed_ips'][0]['subnet_id'] == subnet_id]
if match_port:
return match_port[0]
else:
LOG.error(_LE('DVR: no map match_port found!'))
@staticmethod
def _get_snat_idx(ip_cidr):
"""Generate index for DVR snat rules and route tables.
@ -291,13 +273,6 @@ class DvrLocalRouter(router.RouterInfo):
"""Removes rules and routes for SNAT redirection."""
self._snat_redirect_modify(gateway, sn_port, sn_int, is_add=False)
def get_gw_port_host(self):
host = self.router.get('gw_port_host')
if not host:
LOG.debug("gw_port_host missing from router: %s",
self.router['id'])
return host
def internal_network_added(self, port):
super(DvrLocalRouter, self).internal_network_added(port)
@ -313,8 +288,7 @@ class DvrLocalRouter(router.RouterInfo):
if not ex_gw_port:
return
snat_ports = self.get_snat_interfaces()
sn_port = self._map_internal_interfaces(port, snat_ports)
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
@ -325,7 +299,7 @@ class DvrLocalRouter(router.RouterInfo):
if not self.ex_gw_port:
return
sn_port = self._map_internal_interfaces(port, self.snat_ports)
sn_port = self.get_snat_port_for_internal_port(port)
if not sn_port:
return
@ -355,14 +329,13 @@ class DvrLocalRouter(router.RouterInfo):
ip_wrapr = ip_lib.IPWrapper(namespace=self.ns_name)
ip_wrapr.netns.execute(['sysctl', '-w',
'net.ipv4.conf.all.send_redirects=0'])
snat_ports = self.get_snat_interfaces()
for p in self.internal_ports:
gateway = self._map_internal_interfaces(p, snat_ports)
gateway = self.get_snat_port_for_internal_port(p)
id_name = self.get_internal_device_name(p['id'])
if gateway:
self._snat_redirect_add(gateway, p, id_name)
for port in snat_ports:
for port in self.get_snat_interfaces():
for ip in port['fixed_ips']:
self._update_arp_entry(ip['ip_address'],
port['mac_address'],
@ -379,35 +352,13 @@ class DvrLocalRouter(router.RouterInfo):
to_fip_interface_name = (
self.get_external_device_interface_name(ex_gw_port))
self.process_floating_ip_addresses(to_fip_interface_name)
snat_ports = self.get_snat_interfaces()
for p in self.internal_ports:
gateway = self._map_internal_interfaces(p, snat_ports)
gateway = self.get_snat_port_for_internal_port(p)
internal_interface = self.get_internal_device_name(p['id'])
self._snat_redirect_remove(gateway, p, internal_interface)
def _handle_router_snat_rules(self, ex_gw_port,
interface_name, action):
if not self.snat_iptables_manager:
LOG.debug("DVR router: no snat rules to be handled")
return
with self.snat_iptables_manager.defer_apply():
self._empty_snat_chains(self.snat_iptables_manager)
# NOTE DVR doesn't add the jump to float snat like the super class.
self._add_snat_rules(ex_gw_port, self.snat_iptables_manager,
interface_name, action)
def perform_snat_action(self, snat_callback, *args):
# NOTE DVR skips this step in a few cases...
if not self.get_ex_gw_port():
return
if self.get_gw_port_host() != self.host:
return
super(DvrLocalRouter,
self).perform_snat_action(snat_callback, *args)
def _handle_router_snat_rules(self, ex_gw_port, interface_name):
pass
def process_external(self, agent):
ex_gw_port = self.get_ex_gw_port()

42
neutron/agent/l3/dvr_router_base.py

@ -0,0 +1,42 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.agent.l3 import router_info as router
from neutron.common import constants as l3_constants
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
class DvrRouterBase(router.RouterInfo):
def __init__(self, agent, host, *args, **kwargs):
super(DvrRouterBase, self).__init__(*args, **kwargs)
self.agent = agent
self.host = host
def get_snat_interfaces(self):
return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
def get_snat_port_for_internal_port(self, int_port):
"""Return the SNAT port for the given internal interface port."""
snat_ports = self.get_snat_interfaces()
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports
if p['fixed_ips'][0]['subnet_id'] == subnet_id]
if match_port:
return match_port[0]
else:
LOG.error(_LE('DVR: no map match_port found!'))

3
neutron/agent/l3/ha.py

@ -22,6 +22,7 @@ import webob
from neutron.agent.linux import keepalived
from neutron.agent.linux import utils as agent_utils
from neutron.common import utils as common_utils
from neutron.i18n import _LI
from neutron.notifiers import batch_notifier
@ -157,4 +158,4 @@ class AgentMixin(object):
def _init_ha_conf_path(self):
ha_full_path = os.path.dirname("/%s/" % self.conf.ha_confs_path)
agent_utils.ensure_dir(ha_full_path)
common_utils.ensure_dir(ha_full_path)

11
neutron/agent/l3/ha_router.py

@ -200,6 +200,15 @@ class HaRouter(router.RouterInfo):
if enable_ra_on_gw:
self.driver.configure_ipv6_ra(self.ns_name, interface_name)
def _add_extra_subnet_onlink_routes(self, ex_gw_port, interface_name):
extra_subnets = ex_gw_port.get('extra_subnets', [])
instance = self._get_keepalived_instance()
onlink_route_cidrs = set(s['cidr'] for s in extra_subnets)
instance.virtual_routes.extra_subnets = [
keepalived.KeepalivedVirtualRoute(
onlink_route_cidr, None, interface_name, scope='link') for
onlink_route_cidr in onlink_route_cidrs]
def _should_delete_ipv6_lladdr(self, ipv6_lladdr):
"""Only the master should have any IP addresses configured.
Let keepalived manage IPv6 link local addresses, the same way we let
@ -235,6 +244,7 @@ class HaRouter(router.RouterInfo):
for ip_cidr in common_utils.fixed_ip_cidrs(ex_gw_port['fixed_ips']):
self._add_vip(ip_cidr, interface_name)
self._add_default_gw_virtual_route(ex_gw_port, interface_name)
self._add_extra_subnet_onlink_routes(ex_gw_port, interface_name)
def add_floating_ip(self, fip, interface_name, device):
fip_ip = fip['floating_ip_address']
@ -353,6 +363,7 @@ class HaRouter(router.RouterInfo):
if self.ha_port:
self.enable_keepalived()
@common_utils.synchronized('enable_radvd')
def enable_radvd(self, internal_ports=None):
if (self.keepalived_manager.get_process().active and
self.ha_state == 'master'):

14
neutron/agent/l3/namespace_manager.py

@ -12,6 +12,7 @@
from oslo_log import log as logging
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import namespaces
from neutron.agent.linux import external_process
@ -42,6 +43,12 @@ class NamespaceManager(object):
agent restarts gracefully.
"""
ns_prefix_to_class_map = {
namespaces.NS_PREFIX: namespaces.RouterNamespace,
dvr_snat_ns.SNAT_NS_PREFIX: dvr_snat_ns.SnatNamespace,
dvr_fip_ns.FIP_NS_PREFIX: dvr_fip_ns.FipNamespace,
}
def __init__(self, agent_conf, driver, clean_stale, metadata_driver=None):
"""Initialize the NamespaceManager.
@ -95,7 +102,7 @@ class NamespaceManager(object):
:returns: tuple with prefix and id or None if no prefix matches
"""
prefix = namespaces.get_prefix_from_ns_name(ns_name)
if prefix in (namespaces.NS_PREFIX, dvr_snat_ns.SNAT_NS_PREFIX):
if prefix in self.ns_prefix_to_class_map:
identifier = namespaces.get_id_from_ns_name(ns_name)
return (prefix, identifier)
@ -123,10 +130,7 @@ class NamespaceManager(object):
self._cleanup(ns_prefix, ns_id)
def _cleanup(self, ns_prefix, ns_id):
if ns_prefix == namespaces.NS_PREFIX:
ns_class = namespaces.RouterNamespace
else:
ns_class = dvr_snat_ns.SnatNamespace
ns_class = self.ns_prefix_to_class_map[ns_prefix]
ns = ns_class(ns_id, self.agent_conf, self.driver, use_ipv6=False)
try:
if self.metadata_driver:

81
neutron/agent/l3/router_info.py

@ -30,7 +30,6 @@ LOG = logging.getLogger(__name__)
INTERNAL_DEV_PREFIX = namespaces.INTERNAL_DEV_PREFIX
EXTERNAL_DEV_PREFIX = namespaces.EXTERNAL_DEV_PREFIX
EXTERNAL_INGRESS_MARK_MASK = '0xffffffff'
FLOATINGIP_STATUS_NOCHANGE = object()
@ -45,7 +44,6 @@ class RouterInfo(object):
self.router_id = router_id
self.ex_gw_port = None
self._snat_enabled = None
self._snat_action = None
self.internal_ports = []
self.floating_ips = set()
# Invoke the setter for establishing initial SNAT action
@ -97,13 +95,6 @@ class RouterInfo(object):
return
# enable_snat by default if it wasn't specified by plugin
self._snat_enabled = self._router.get('enable_snat', True)
# Set a SNAT action for the router
if self._router.get('gw_port'):
self._snat_action = ('add_rules' if self._snat_enabled
else 'remove_rules')
elif self.ex_gw_port:
# Gateway port was removed, remove rules
self._snat_action = 'remove_rules'
@property
def is_ha(self):
@ -119,14 +110,6 @@ class RouterInfo(object):
def get_external_device_interface_name(self, ex_gw_port):
return self.get_external_device_name(ex_gw_port['id'])
def perform_snat_action(self, snat_callback, *args):
# Process SNAT rules for attached subnets
if self._snat_action:
snat_callback(self._router.get('gw_port'),
*args,
action=self._snat_action)
self._snat_action = None
def _update_routing_table(self, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'],
'via', route['nexthop']]
@ -534,27 +517,38 @@ class RouterInfo(object):
prefix=EXTERNAL_DEV_PREFIX)
# Process SNAT rules for external gateway
self.perform_snat_action(self._handle_router_snat_rules,
interface_name)
gw_port = self._router.get('gw_port')
self._handle_router_snat_rules(gw_port, interface_name)
def external_gateway_nat_rules(self, ex_gw_ip, interface_name):
mark = self.agent_conf.external_ingress_mark
rules = [('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name}),
('snat', '-o %s -j SNAT --to-source %s' %
(interface_name, ex_gw_ip)),
('snat', '-m mark ! --mark %s '
'-m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' % (mark, ex_gw_ip))]