Browse Source

Remove DHCP and L3 agent remnants.

Both the L3 and DHCP agents are no longer supported.  Remove many more
remnants from when they were supported.

Change-Id: If71489f9ac56c7c26746d0fbfb4640e570591f26
Signed-off-by: Russell Bryant <rbryant@redhat.com>
changes/20/454320/2
Russell Bryant 5 years ago
parent
commit
ac226200ab
  1. 22
      devstack/computenode-local.conf.sample
  2. 1
      devstack/devstackgatekuryrrc
  3. 2
      devstack/devstackgaterc
  4. 37
      devstack/lib/networking-ovn
  5. 13
      devstack/local.conf.sample
  6. 6
      devstack/override-defaults
  7. 3
      doc/source/design/native_dhcp.rst
  8. 35
      doc/source/features.rst
  9. 31
      doc/source/install.rst
  10. 3
      networking_ovn/common/acl.py
  11. 11
      networking_ovn/common/config.py
  12. 18
      networking_ovn/ml2/mech_driver.py
  13. 6
      networking_ovn/ovn_db_sync.py
  14. 12
      networking_ovn/tests/unit/ml2/test_mech_driver.py
  15. 5
      vagrant/provisioning/setup-compute.sh
  16. 6
      vagrant/provisioning/setup-controller.sh

22
devstack/computenode-local.conf.sample

@ -30,17 +30,6 @@ enable_service n-cpu
enable_service placement-api
enable_service ovn-controller
# For multi-node deployments using conventional DHCP and metadata agents,
# we recommend testing multiple instances of those agents. In a small
# lab environment with one controller node, you can run them on two or
# more compute nodes to simulate a more realistic environment. However,
# in a large lab that potentially simulates a production environment, you
# should determine whether to run them on controller nodes, network nodes,
# or a subset of compute nodes. Deploying too many agents can cause
# scaling issues, particularly with the message bus.
#enable_service q-dhcp
#enable_service q-meta
# Set this to the address of the main DevStack host running the rest of the
# OpenStack services.
SERVICE_HOST=<IP address of host running everything else>
@ -80,14 +69,3 @@ VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN
#PHYSICAL_NETWORK=providernet
#OVS_PHYSICAL_BRIDGE=br-provider
#PUBLIC_INTERFACE=<public interface>
# NOTE: DO NOT MOVE THIS SECTION FROM THE END OF THE FILE, AS IT
# WON'T WORK OTHERWISE
#
# If you enable the DHCP agent, you can configure the availability
# zone name (default is nova).
#[[post-config|$Q_DHCP_CONF_FILE]]
#[AGENT]
#availability_zone = nova

1
devstack/devstackgatekuryrrc

@ -23,4 +23,3 @@ export OVERRIDE_ENABLED_SERVICES=kuryr,etcd-server,docker-engine,key,n-api,n-cpu
export PROJECTS="openstack/networking-ovn openstack/kuryr $PROJECTS"
export DEVSTACK_LOCAL_CONFIG="enable_plugin networking-ovn git://git.openstack.org/openstack/networking-ovn"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"enable_plugin kuryr http://git.openstack.org/openstack/kuryr"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVN_NATIVE_DHCP=True"

2
devstack/devstackgaterc

@ -25,7 +25,6 @@ if [ -z "${RALLY_SCENARIO}" ] ; then
fi
export DEVSTACK_LOCAL_CONFIG+=$'\n'"Q_USE_PROVIDERNET_FOR_PUBLIC=True"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"PHYSICAL_NETWORK=public"
export DEVSTACK_LOCAL_CONFIG+=$'\n'"OVN_NATIVE_DHCP=True"
OVS_BRANCH=$1
if [[ "${OVS_BRANCH}" == "latest-release" ]] ; then
@ -47,7 +46,6 @@ if [[ "$DEVSTACK_GATE_TOPOLOGY" == "multinode" ]] ; then
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"ENABLED_SERVICES=n-cpu,dstat,c-vol,c-bak,ovn-controller"
# NOTE(rtheis): Configure OVN on the compute node.
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"OVN_NATIVE_DHCP=True"
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"OVN_SB_REMOTE=tcp:\$SERVICE_HOST:6642"
export DEVSTACK_SUBNODE_CONFIG+=$'\n'"OVN_NB_REMOTE=tcp:\$SERVICE_HOST:6641"

37
devstack/lib/networking-ovn

@ -72,7 +72,7 @@ OVN_INSTALL_OVS_PYTHON_MODULE=$(trueorfalse False OVN_INSTALL_OVS_PYTHON_MODULE)
# GENEVE overlay protocol overhead. Defaults to 38 bytes plus the IP version
# overhead (20 bytes for IPv4 (default) or 40 bytes for IPv6) which is determined
# based on the ML2 overlay_ip_version option. The ML2 framework will use this to
# configure the DHCP agent MTU option.
# configure the MTU DHCP option.
OVN_GENEVE_OVERHEAD=${OVN_GENEVE_OVERHEAD:-38}
# This sets whether to create a public network and bridge.
@ -174,47 +174,22 @@ function configure_ovn_plugin {
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_sb_connection="$OVN_SB_REMOTE"
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn neutron_sync_mode="$OVN_NEUTRON_SYNC_MODE"
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_l3_scheduler="$OVN_L3_SCHEDULER"
populate_ml2_config /$Q_PLUGIN_CONF_FILE ovn ovn_native_dhcp="$OVN_NATIVE_DHCP"
populate_ml2_config /$Q_PLUGIN_CONF_FILE securitygroup enable_security_group="$Q_USE_SECGROUP"
inicomment /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver
fi
# NOTE(rtheis): Conventional and native DHCP are not supported at the
# same time since there aren't any confirmed benefits to supporting
# both. Also, there are a couple problems with such a setup:
# 1) Neutron still creates DHCP provisioning blocks when launching
# instances. As a result, the conventional DHCP agent must be
# up and remove the DHCP provisioning blocks or instances will
# fail to launch.
# 2) Even though the conventional DHCP agent can setup a metadata
# proxy for isolated networks, host routes are not added to an
# instance. As a result, instances aren't able to access the proxy
# via "curl http://169.254.169.254".
if is_service_enabled q-dhcp ; then
if [[ "$OVN_NATIVE_DHCP" == "True" ]]; then
die $LINENO "The q-dhcp service must be disabled with OVN_NATIVE_DHCP set to True."
fi
die $LINENO "The q-dhcp service must be disabled with OVN."
fi
if is_service_enabled q-l3 ; then
die $LINENO "The q-l3 service must be disabled with OVN."
fi
# NOTE(rtheis): OVN native layer-3 and DHCP services currently lack support
# for metadata. Thus, enabling both native services also requires enabling
# config drive to provide instances with metadata. However, if you choose the
# conventional DHCP agent instead of the native DHCP service, you can
# configure it to provide instances with metadata.
if is_service_enabled q-dhcp ; then
# Conventional DHCP agent must provide all metadata when OVN native
# layer-3 is enabled. So for consistency, regardless of the layer-3
# support, the conventional DHCP agent will be forced to provide
# metadata for all networks.
iniset $Q_DHCP_CONF_FILE DEFAULT force_metadata True
elif [[ "$OVN_NATIVE_DHCP" == "True" ]]; then
if is_service_enabled n-cpu ; then
iniset $NOVA_CONF DEFAULT force_config_drive True
fi
# NOTE(rtheis): OVN currently lacks support for metadata so enabling
# config drive is required to provide metadata to instances.
if is_service_enabled n-cpu ; then
iniset $NOVA_CONF DEFAULT force_config_drive True
fi
if is_service_enabled q-qos ; then

13
devstack/local.conf.sample

@ -108,19 +108,6 @@ disable_service q-meta
# NOTE: DO NOT MOVE THESE SECTIONS FROM THE END OF THIS FILE
# IF YOU DO, THEY WON'T WORK!!!!!
#
# Enable two DHCP agents per neutron subnet with support for availability
# zones. Requires a multi-node deployment.
#[[post-config|/$NEUTRON_CONF]]
#[DEFAULT]
#network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler
#dhcp_load_type = networks
#dhcp_agents_per_network = 2
# If you enable the DHCP agent, you can configure the availability
# zone name (default is nova).
#[[post-config|$Q_DHCP_CONF_FILE]]
#[AGENT]
#availability_zone = nova
# Enable Nova automatic host discovery for cell every 2 seconds
[[post-config|$NOVA_CONF]]

6
devstack/override-defaults

@ -6,12 +6,6 @@ Q_ML2_TENANT_NETWORK_TYPE=${Q_ML2_TENANT_NETWORK_TYPE:-"geneve"}
Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GENEVE_TYPE_OPTIONS:-"vni_ranges=1:65536"}
ML2_L3_PLUGIN="networking_ovn.l3.l3_ovn.OVNL3RouterPlugin"
# Whether to enable using OVN's native dhcp support. If this value is disabled,
# OpenStack will use the q-dhcp functionality. If you set
# OVN_NATIVE_DHCP to False you must also enable the q-dhcp service.
# By default OVN_NATIVE_DHCP is True
OVN_NATIVE_DHCP=$(trueorfalse True OVN_NATIVE_DHCP)
# This function is invoked by DevStack's Neutron plugin setup
# code and is being overridden here since the OVN devstack
# plugin will handle the install.

3
doc/source/design/native_dhcp.rst

@ -10,9 +10,6 @@ DHCPv4 requests based on statically configured address mappings. To do this it
allows a short list of DHCPv4 options to be configured and applied at each
compute host running ovn-controller.
OVN ML2 driver uses this feature if the config option 'ovn_native_dhcp' is
set to True.
OVN northbound db provides a table 'DHCP_Options' to store the DHCP options.
Logical switch port has a reference to this table.

35
doc/source/features.rst

@ -13,18 +13,14 @@ services:
* Layer-3 (routing)
Native implementation that supports distributed routing.
Native implementation that supports distributed routing. Replaces the
conventional Neutron L3 agent.
* DHCP
Native implementation or conventional DHCP agent. The native implementation
supports distributed DHCP. However, it currently lacks
support for the Neutron internal DNS and metadata proxy features.
* Metadata
The metadata API is currently only supported in conjunction with the Neutron
DHCP agent.
Native distributed implementation. Replaces the conventional Neutron DHCP
agent. Note that the native implementation does not yet support DNS or
Metadata features.
* DPDK
@ -45,8 +41,6 @@ The following Neutron API extensions are supported with OVN:
+==================================+===========================+
| agent | agent |
+----------------------------------+---------------------------+
| Address Scopes * | address-scope |
+----------------------------------+---------------------------+
| Allowed Address Pairs | allowed-address-pairs |
+----------------------------------+---------------------------+
| Auto Allocated Topology Services | auto-allocated-topology |
@ -55,20 +49,8 @@ The following Neutron API extensions are supported with OVN:
+----------------------------------+---------------------------+
| Default Subnetpools | default-subnetpools |
+----------------------------------+---------------------------+
| DHCP Agent Scheduler ** | dhcp_agent_scheduler |
+----------------------------------+---------------------------+
| Distributed Virtual Router * | dvr |
+----------------------------------+---------------------------+
| DNS Integration * | dns-integration |
+----------------------------------+---------------------------+
| HA Router extension * | l3-ha |
+----------------------------------+---------------------------+
| L3 Agent Scheduler * | l3_agent_scheduler |
+----------------------------------+---------------------------+
| Multi Provider Network | multi-provider |
+----------------------------------+---------------------------+
| Network Availability Zone ** | network_availability_zone |
+----------------------------------+---------------------------+
| Network IP Availability | network-ip-availability |
+----------------------------------+---------------------------+
| Neutron external network | external-net |
@ -97,8 +79,6 @@ The following Neutron API extensions are supported with OVN:
+----------------------------------+---------------------------+
| Resource revision numbers | revisions |
+----------------------------------+---------------------------+
| Router Availability Zone * | router_availability_zone |
+----------------------------------+---------------------------+
| security-group | security-group |
+----------------------------------+---------------------------+
| standard-attr-description | standard-attr-description |
@ -109,8 +89,3 @@ The following Neutron API extensions are supported with OVN:
+----------------------------------+---------------------------+
| Time Stamp Fields | timestamp_core |
+----------------------------------+---------------------------+
(\*) Only applicable when conventional layer-3 agent enabled.
(\*\*) Only applicable when conventional DHCP agent enabled.

31
doc/source/install.rst

@ -141,22 +141,13 @@ primary node. See the :ref:`faq` for more information.
...
notification_drivers = ovn-qos
* (Optional) Enable the native or conventional layer-3 service.
* Enable the OVN layer-3 service.
.. code-block:: ini
[DEFAULT]
...
service_plugins = L3_SERVICE
.. note::
Replace ``L3_SERVICE`` with
``networking_ovn.l3.l3_ovn.OVNL3RouterPlugin``
to enable the native layer-3 service or with
``neutron.services.l3_router.l3_router_plugin.L3RouterPlugin``
to enable the conventional layer-3 service.
See :ref:`features` and :ref:`faq` for more information.
service_plugins = networking_ovn.l3.l3_ovn.OVNL3RouterPlugin
#. Configure the ML2 plug-in. Edit the
``/etc/neutron/plugins/ml2/ml2_conf.ini`` file:
@ -243,7 +234,7 @@ primary node. See the :ref:`faq` for more information.
The ``firewall_driver`` option under ``[securitygroup]`` is ignored
since the OVN ML2 driver itself handles security groups.
* Configure OVS database access, L3 scheduler and OVN DHCP mode
* Configure OVS database access and L3 scheduler
.. code-block:: ini
@ -252,7 +243,6 @@ primary node. See the :ref:`faq` for more information.
ovn_nb_connection = tcp:IP_ADDRESS:6641
ovn_sb_connection = tcp:IP_ADDRESS:6642
ovn_l3_scheduler = OVN_L3_SCHEDULER
ovn_native_dhcp = OVN_NATIVE_DHCP
.. note::
@ -261,9 +251,7 @@ primary node. See the :ref:`faq` for more information.
``leastloaded`` if you want the scheduler to select a compute node with
the least number of gateway ports or ``chance`` if you want the
scheduler to randomly select a compute node from the available list of
compute nodes. And finally, replace ``OVN_NATIVE_DHCP`` with ``True``
if you want to enable the native DHCP service else ``False`` to use the
conventional DHCP agent.
compute nodes.
#. Start the ``neutron-server`` service.
@ -273,9 +261,6 @@ Network nodes
Deployments using OVN native layer-3 and DHCP services do not require
conventional network nodes because connectivity to external networks
(including VTEP gateways) and routing occurs on compute nodes.
OVN currently relies on the conventional metadata agent that typically
operates on network nodes. However, you can deploy this agent on
controller or compute nodes.
Compute nodes
-------------
@ -322,14 +307,6 @@ Each compute node runs the OVS and ``ovn-controller`` services. The
Deployments without VTEP gateways can safely enable both protocols.
.. note::
Overlay network protocols generally require reducing MTU on VM
interfaces to account for additional packet overhead. See the
DHCP agent configuration in the
`Installation Guide <http://docs.openstack.org/liberty/install-guide-ubuntu/neutron-controller-install-option2.html>`_
for more information.
* Configure the overlay network local endpoint IP address.
.. code-block:: console

3
networking_ovn/common/acl.py

@ -17,7 +17,6 @@ import netaddr
from neutron_lib import constants as const
from oslo_config import cfg
from networking_ovn.common import config
from networking_ovn.common import constants as ovn_const
from networking_ovn.common import utils
@ -307,7 +306,7 @@ def add_acls(plugin, admin_context, port, sg_cache, subnet_cache):
ip['subnet_id'])
# Ignore duplicate DHCP ACLs for the subnet.
if subnet['id'] not in port_subnet_ids:
acl_list += add_acl_dhcp(port, subnet, config.is_ovn_dhcp())
acl_list += add_acl_dhcp(port, subnet, True)
port_subnet_ids.add(subnet['id'])
# We create an ACL entry for each rule on each security group applied

11
networking_ovn/common/config.py

@ -74,13 +74,10 @@ ovn_opts = [
default="/var/run/openvswitch",
help=_("The directory in which vhost virtio socket "
"is created by all the vswitch daemons")),
cfg.BoolOpt('ovn_native_dhcp',
default=True,
help=_('Whether to use OVN native dhcp support')),
cfg.IntOpt('dhcp_default_lease_time',
default=(12 * 60 * 60),
help=_('Default least time (in seconds ) to use when '
'ovn_native_dhcp is enabled.'))
help=_('Default least time (in seconds) to use with '
'OVN\'s native DHCP service.'))
]
cfg.CONF.register_opts(ovn_opts, group='ovn')
@ -120,9 +117,5 @@ def get_ovn_vhost_sock_dir():
return cfg.CONF.ovn.vhost_sock_dir
def is_ovn_dhcp():
return cfg.CONF.ovn.ovn_native_dhcp
def get_ovn_dhcp_default_lease_time():
return cfg.CONF.ovn.dhcp_default_lease_time

18
networking_ovn/ml2/mech_driver.py

@ -366,26 +366,24 @@ class OVNMechanismDriver(driver_api.MechanismDriver):
def create_subnet_postcommit(self, context):
subnet = context.current
if subnet['enable_dhcp'] and config.is_ovn_dhcp():
if subnet['enable_dhcp']:
self.add_subnet_dhcp_options_in_ovn(subnet,
context.network.current)
def update_subnet_postcommit(self, context):
subnet = context.current
if config.is_ovn_dhcp() and (
subnet['enable_dhcp'] or context.original['enable_dhcp']):
if subnet['enable_dhcp'] or context.original['enable_dhcp']:
self.add_subnet_dhcp_options_in_ovn(subnet,
context.network.current)
def delete_subnet_postcommit(self, context):
subnet = context.current
if config.is_ovn_dhcp():
with self._nb_ovn.transaction(check_error=True) as txn:
subnet_dhcp_options = self._nb_ovn.get_subnet_dhcp_options(
subnet['id'])
if subnet_dhcp_options:
txn.add(self._nb_ovn.delete_dhcp_options(
subnet_dhcp_options['uuid']))
with self._nb_ovn.transaction(check_error=True) as txn:
subnet_dhcp_options = self._nb_ovn.get_subnet_dhcp_options(
subnet['id'])
if subnet_dhcp_options:
txn.add(self._nb_ovn.delete_dhcp_options(
subnet_dhcp_options['uuid']))
def add_subnet_dhcp_options_in_ovn(self, subnet, network,
ovn_dhcp_options=None):

6
networking_ovn/ovn_db_sync.py

@ -571,9 +571,6 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
def _sync_subnet_dhcp_options(self, ctx, db_networks,
ovn_subnet_dhcp_options):
if not config.is_ovn_dhcp():
return
LOG.debug('OVN-NB Sync DHCP options for Neutron subnets started')
db_subnets = {}
@ -637,9 +634,6 @@ class OvnNbSynchronizer(OvnDbSynchronizer):
def _sync_port_dhcp_options(self, ctx, ports_need_sync_dhcp_opts,
ovn_port_dhcpv4_opts, ovn_port_dhcpv6_opts):
if not config.is_ovn_dhcp():
return
LOG.debug('OVN-NB Sync DHCP options for Neutron ports with extra '
'dhcp options assigned started')

12
networking_ovn/tests/unit/ml2/test_mech_driver.py

@ -215,12 +215,6 @@ class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase):
def test_add_acls_with_sec_group_native_dhcp_enabled(self):
self._test_add_acls_with_sec_group_helper()
def test_add_acls_with_sec_group_native_dhcp_disabled(self):
config.cfg.CONF.set_override('ovn_native_dhcp',
False,
group='ovn')
self._test_add_acls_with_sec_group_helper(native_dhcp=False)
def test_port_invalid_binding_profile(self):
invalid_binding_profiles = [
{'tag': 0,
@ -454,12 +448,6 @@ class TestOVNMechanismDriver(test_plugin.Ml2PluginV2TestCase):
def test_create_port_with_security_groups_native_dhcp_enabled(self):
self._test_create_port_with_security_groups_helper(7)
def test_create_port_with_security_groups_native_dhcp_disabled(self):
config.cfg.CONF.set_override('ovn_native_dhcp',
False,
group='ovn')
self._test_create_port_with_security_groups_helper(8)
def test_update_port_changed_security_groups(self):
with self.network(set_context=True, tenant_id='test') as net1:
with self.subnet(network=net1) as subnet1:

5
vagrant/provisioning/setup-compute.sh

@ -31,11 +31,6 @@ OVN_NB_REMOTE=tcp:$OVN_DB_IP:6641
LOGFILE=/opt/stack/log/stack.sh.log
SCREEN_LOGDIR=/opt/stack/log/data
# Disable the OVN native DHCP service and enable the conventional DHCP and
# metadata agents on the compute node.
OVN_NATIVE_DHCP=False
enable_service q-dhcp q-meta
# Use provider network for public.
Q_USE_PROVIDERNET_FOR_PUBLIC=True
OVS_PHYSICAL_BRIDGE=br-provider

6
vagrant/provisioning/setup-controller.sh

@ -42,12 +42,6 @@ disable_service ovn-northd
# on the controller node that depend on it.
disable_service ovn-controller
# Disable the OVN native DHCP service and conventional DHCP and metadata
# agents on the controller node because the architecture deploys them on
# compute nodes.
OVN_NATIVE_DHCP=False
disable_service q-dhcp q-meta
# Disable the nova compute service on the controller node because the
# architecture only deploys it on separate compute nodes.
disable_service n-cpu

Loading…
Cancel
Save