Browse Source

upate files under etc for tacker

Change-Id: Ic11cfd8786588d91cec7ba135c16da95d95ddffc
changes/30/104830/1
Isaku Yamahata 8 years ago
parent
commit
91ce4886a9
  1. 30
      etc/api-paste.ini
  2. 88
      etc/dhcp_agent.ini
  3. 3
      etc/fwaas_driver.ini
  4. 30
      etc/init.d/tacker-server
  5. 79
      etc/l3_agent.ini
  6. 42
      etc/lbaas_agent.ini
  7. 59
      etc/metadata_agent.ini
  8. 15
      etc/metering_agent.ini
  9. 114
      etc/neutron/plugins/bigswitch/restproxy.ini
  10. 3
      etc/neutron/plugins/bigswitch/ssl/ca_certs/README
  11. 6
      etc/neutron/plugins/bigswitch/ssl/host_certs/README
  12. 29
      etc/neutron/plugins/brocade/brocade.ini
  13. 138
      etc/neutron/plugins/cisco/cisco_plugins.ini
  14. 22
      etc/neutron/plugins/cisco/cisco_vpn_agent.ini
  15. 41
      etc/neutron/plugins/embrane/heleos_conf.ini
  16. 63
      etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini
  17. 50
      etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini
  18. 78
      etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini
  19. 31
      etc/neutron/plugins/metaplugin/metaplugin.ini
  20. 19
      etc/neutron/plugins/midonet/midonet.ini
  21. 62
      etc/neutron/plugins/ml2/ml2_conf.ini
  22. 45
      etc/neutron/plugins/ml2/ml2_conf_arista.ini
  23. 13
      etc/neutron/plugins/ml2/ml2_conf_brocade.ini
  24. 94
      etc/neutron/plugins/ml2/ml2_conf_cisco.ini
  25. 52
      etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini
  26. 6
      etc/neutron/plugins/ml2/ml2_conf_mlnx.ini
  27. 28
      etc/neutron/plugins/ml2/ml2_conf_ncs.ini
  28. 30
      etc/neutron/plugins/ml2/ml2_conf_odl.ini
  29. 13
      etc/neutron/plugins/ml2/ml2_conf_ofa.ini
  30. 79
      etc/neutron/plugins/mlnx/mlnx_conf.ini
  31. 60
      etc/neutron/plugins/nec/nec.ini
  32. 10
      etc/neutron/plugins/nuage/nuage_plugin.ini
  33. 35
      etc/neutron/plugins/oneconvergence/nvsdplugin.ini
  34. 179
      etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
  35. 14
      etc/neutron/plugins/plumgrid/plumgrid.ini
  36. 44
      etc/neutron/plugins/ryu/ryu.ini
  37. 202
      etc/neutron/plugins/vmware/nsx.ini
  38. 14
      etc/neutron/rootwrap.d/debug.filters
  39. 38
      etc/neutron/rootwrap.d/dhcp.filters
  40. 21
      etc/neutron/rootwrap.d/iptables-firewall.filters
  41. 41
      etc/neutron/rootwrap.d/l3.filters
  42. 26
      etc/neutron/rootwrap.d/lbaas-haproxy.filters
  43. 19
      etc/neutron/rootwrap.d/linuxbridge-plugin.filters
  44. 12
      etc/neutron/rootwrap.d/nec-plugin.filters
  45. 22
      etc/neutron/rootwrap.d/openvswitch-plugin.filters
  46. 21
      etc/neutron/rootwrap.d/ryu-plugin.filters
  47. 13
      etc/neutron/rootwrap.d/vpnaas.filters
  48. 40
      etc/services.conf
  49. 30
      etc/tacker/api-paste.ini
  50. 0
      etc/tacker/policy.json
  51. 4
      etc/tacker/rootwrap.conf
  52. 21
      etc/tacker/rootwrap.d/servicevm.filters
  53. 114
      etc/tacker/tacker.conf
  54. 14
      etc/vpn_agent.ini

30
etc/api-paste.ini

@ -1,30 +0,0 @@
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = request_id catch_errors extensions neutronapiapp_v2_0
keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
[filter:request_id]
paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:catch_errors]
paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory

88
etc/dhcp_agent.ini

@ -1,88 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
# resync_interval = 5
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
# enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
# enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
# dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
# dnsmasq_config_file =
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
# dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
# dhcp_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

3
etc/fwaas_driver.ini

@ -1,3 +0,0 @@
[fwaas]
#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
#enabled = True

30
etc/init.d/neutron-server → etc/init.d/tacker-server

@ -1,36 +1,36 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: neutron-server
# Provides: tacker-server
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: neutron-server
# Description: Provides the Neutron networking service
# Short-Description: tacker-server
# Description: Provides the Tacker servicevm/device manager service
### END INIT INFO
set -e
PIDFILE=/var/run/neutron/neutron-server.pid
LOGFILE=/var/log/neutron/neutron-server.log
PIDFILE=/var/run/tacker/tacker-server.pid
LOGFILE=/var/log/tacker/tacker-server.log
DAEMON=/usr/bin/neutron-server
DAEMON=/usr/bin/tacker-server
DAEMON_ARGS="--log-file=$LOGFILE"
DAEMON_DIR=/var/run
ENABLED=true
if test -f /etc/default/neutron-server; then
. /etc/default/neutron-server
if test -f /etc/default/tacker-server; then
. /etc/default/tacker-server
fi
mkdir -p /var/run/neutron
mkdir -p /var/log/neutron
mkdir -p /var/run/tacker
mkdir -p /var/log/tacker
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
export TMPDIR=/var/lib/neutron/tmp
export TMPDIR=/var/lib/tacker/tmp
if [ ! -x ${DAEMON} ] ; then
exit 0
@ -39,13 +39,13 @@ fi
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting neutron server" "neutron-server"
log_daemon_msg "Starting tacker server" "tacker-server"
start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS
log_end_msg $?
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping neutron server" "neutron-server"
log_daemon_msg "Stopping tacker server" "tacker-server"
start-stop-daemon --stop --oknodo --pidfile ${PIDFILE}
log_end_msg $?
;;
@ -57,10 +57,10 @@ case "$1" in
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON neutron-server && exit 0 || exit $?
status_of_proc -p $PIDFILE $DAEMON tacker-server && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/neutron-server {start|stop|restart|force-reload|status}"
log_action_msg "Usage: /etc/init.d/tacker-server {start|stop|restart|force-reload|status}"
exit 1
;;
esac

79
etc/l3_agent.ini

@ -1,79 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
# handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
# external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
# metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
# send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
# periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
# periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

42
etc/lbaas_agent.ini

@ -1,42 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output).
# debug = False
# The LBaaS agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
# periodic_interval = 10
# LBaas requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version.
# Multiple device drivers reflecting different service providers could be specified:
# device_driver = path.to.provider1.driver.Driver
# device_driver = path.to.provider2.driver.Driver
# Default is:
# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
[haproxy]
# Location to store config and state files
# loadbalancer_state_path = $state_path/lbaas
# The user group
# user_group = nogroup
# When delete and re-add the same vip, send this many gratuitous ARPs to flush
# the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
# send_gratuitous_arp = 3

59
etc/metadata_agent.ini

@ -1,59 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
# The Neutron user information for accessing the Neutron API.
auth_url = http://localhost:5000/v2.0
auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
# nova_metadata_ip = 127.0.0.1
# TCP Port used by Nova metadata server
# nova_metadata_port = 8775
# Which protocol to use for requests to Nova metadata server, http or https
# nova_metadata_protocol = http
# Whether insecure SSL connection should be accepted for Nova metadata server
# requests
# nova_metadata_insecure = False
# Client certificate for nova api, needed when nova api requires client
# certificates
# nova_client_cert =
# Private key for nova client certificate
# nova_client_priv_key =
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
# metadata_proxy_shared_secret =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server. Defaults to
# half the number of CPU cores
# metadata_workers =
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 4096
# URL to connect to the cache backend.
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url = memory://?default_ttl=5

15
etc/metering_agent.ini

@ -1,15 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver
# Interval between two metering measures
# measure_interval = 30
# Interval between two metering reports
# report_interval = 300
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# use_namespaces = True

114
etc/neutron/plugins/bigswitch/restproxy.ini

@ -1,114 +0,0 @@
# Config file for neutron-proxy-plugin.
[restproxy]
# All configuration for this plugin is in section '[restproxy]'
#
# The following parameters are supported:
# servers : <host:port>[,<host:port>]* (Error if not set)
# server_auth : <username:password> (default: no auth)
# server_ssl : True | False (default: True)
# ssl_cert_directory : <path> (default: /etc/neutron/plugins/bigswitch/ssl)
# no_ssl_validation : True | False (default: False)
# ssl_sticky : True | False (default: True)
# sync_data : True | False (default: False)
# auto_sync_on_failure : True | False (default: True)
# consistency_interval : <integer> (default: 60 seconds)
# server_timeout : <integer> (default: 10 seconds)
# neutron_id : <string> (default: neutron-<hostname>)
# add_meta_server_route : True | False (default: True)
# thread_pool_size : <int> (default: 4)
# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover.
servers=localhost:8080
# The username and password for authenticating against the BigSwitch or Floodlight controller.
# server_auth=username:password
# Use SSL when connecting to the BigSwitch or Floodlight controller.
# server_ssl=True
# Directory which contains the ca_certs and host_certs to be used to validate
# controller certificates.
# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/
# If a certificate does not exist for a controller, trust and store the first
# certificate received for that controller and use it to validate future
# connections to that controller.
# ssl_sticky=True
# Do not validate the controller certificates for SSL
# Warning: This will not provide protection against man-in-the-middle attacks
# no_ssl_validation=False
# Sync data on connect
# sync_data=False
# If neutron fails to create a resource because the backend controller
# doesn't know of a dependency, automatically trigger a full data
# synchronization to the controller.
# auto_sync_on_failure=True
# Time between verifications that the backend controller
# database is consistent with Neutron. (0 to disable)
# consistency_interval = 60
# Maximum number of seconds to wait for proxy request to connect and complete.
# server_timeout=10
# User defined identifier for this Neutron deployment
# neutron_id =
# Flag to decide if a route to the metadata server should be injected into the VM
# add_meta_server_route = True
# Number of threads to use to handle large volumes of port creation requests
# thread_pool_size = 4
[nova]
# Specify the VIF_TYPE that will be controlled on the Nova compute instances
# options: ivs or ovs
# default: ovs
# vif_type = ovs
# Overrides for vif types based on nova compute node host IDs
# Comma separated list of host IDs to fix to a specific VIF type
# The VIF type is taken from the end of the configuration item
# node_override_vif_<vif_type>
# For example, the following would set the VIF type to IVS for
# host-id1 and host-id2
# node_overrride_vif_ivs=host-id1,host-id2
[router]
# Specify the default router rules installed in newly created tenant routers
# Specify multiple times for multiple rules
# Format is <tenant>:<source>:<destination>:<action>
# Optionally, a comma-separated list of nexthops may be included after <action>
# Use an * to specify default for all tenants
# Default is any any allow for all tenants
# tenant_default_router_rule=*:any:any:permit
# Maximum number of rules that a single router may have
# Default is 200
# max_router_rules=200
[restproxyagent]
# Specify the name of the bridge used on compute nodes
# for attachment.
# Default: br-int
# integration_bridge=br-int
# Change the frequency of polling by the restproxy agent.
# Value is seconds
# Default: 5
# polling_interval=5
# Virtual switch type on the compute node.
# Options: ovs or ivs
# Default: ovs
# virtual_switch_type = ovs
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

3
etc/neutron/plugins/bigswitch/ssl/ca_certs/README

@ -1,3 +0,0 @@
Certificates in this folder will be used to
verify signatures for any controllers the plugin
connects to.

6
etc/neutron/plugins/bigswitch/ssl/host_certs/README

@ -1,6 +0,0 @@
Certificates in this folder must match the name
of the controller they should be used to authenticate
with a .pem extension.
For example, the certificate for the controller
"192.168.0.1" should be named "192.168.0.1.pem".

29
etc/neutron/plugins/brocade/brocade.ini

@ -1,29 +0,0 @@
[switch]
# username = The SSH username to use
# password = The SSH password to use
# address = The address of the host to SSH to
# ostype = Should be NOS, but is unused otherwise
#
# Example:
# username = admin
# password = password
# address = 10.24.84.38
# ostype = NOS
[physical_interface]
# physical_interface = The network interface to use when creating a port
#
# Example:
# physical_interface = physnet1
[vlans]
# network_vlan_ranges = <physical network name>:nnnn:mmmm
#
# Example:
# network_vlan_ranges = physnet1:1000:2999
[linux_bridge]
# physical_interface_mappings = <physical network name>:<local interface>
#
# Example:
# physical_interface_mappings = physnet1:em1

138
etc/neutron/plugins/cisco/cisco_plugins.ini

@ -1,138 +0,0 @@
[cisco_plugins]
# (StrOpt) Period-separated module path to the plugin class to use for
# the Cisco Nexus switches.
#
# nexus_plugin = neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin
# (StrOpt) Period-separated module path to the plugin class to use for
# the virtual switches on compute nodes.
#
# vswitch_plugin = neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2
[cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# provider VLAN interface. For example, if an interface is being created
# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
#
# provider_vlan_name_prefix = p-
# Example: provider_vlan_name_prefix = PV-
# (BoolOpt) A flag indicating whether Openstack networking should manage the
# creation and removal of VLAN interfaces for provider networks on the Nexus
# switches. If the flag is set to False then Openstack will not create or
# remove VLAN interfaces for provider networks, and the administrator needs
# to manage these interfaces manually or by external orchestration.
#
# provider_vlan_auto_create = True
# (BoolOpt) A flag indicating whether Openstack networking should manage
# the adding and removing of provider VLANs from trunk ports on the Nexus
# switches. If the flag is set to False then Openstack will not add or
# remove provider VLANs from trunk ports, and the administrator needs to
# manage these operations manually or by external orchestration.
#
# provider_vlan_auto_trunk = True
# (StrOpt) Period-separated module path to the model class to use for
# the Cisco neutron plugin.
#
# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2
# (StrOpt) Period-separated module path to the driver class to use for
# the Cisco Nexus switches.
#
# If no value is configured, a fake driver will be used.
# nexus_driver = neutron.plugins.cisco.test.nexus.fake_nexus_driver.CiscoNEXUSFakeDriver
# With real hardware, use the CiscoNEXUSDriver class:
# nexus_driver = neutron.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver
# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches.
# Note: This feature is not supported on all models/versions of Cisco
# Nexus switches. To use this feature, all of the Nexus switches in the
# deployment must support it.
# nexus_l3_enable = False
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# Cisco Nexus Switch Format.
# [NEXUS_SWITCH:<IP address of switch>]
# <hostname>=<port> (1)
# ssh_port=<ssh port> (2)
# username=<credential username> (3)
# password=<credential password> (4)
#
# (1) For each host connected to a port on the switch, specify the hostname
# and the Nexus physical port (interface) it is connected to.
# (2) The TCP port for connecting via SSH to manage the switch. This is
# port number 22 unless the switch has been configured otherwise.
# (3) The username for logging into the switch to manage it.
# (4) The password for logging into the switch to manage it.
#
# Example:
# [NEXUS_SWITCH:1.1.1.1]
# compute1=1/1
# compute2=1/2
# ssh_port=22
# username=admin
# password=mySecretPassword
#
# N1KV Format.
# [N1KV:<IP address of VSM>]
# username=<credential username>
# password=<credential password>
#
# Example:
# [N1KV:2.2.2.2]
# username=admin
# password=mySecretPassword
[cisco_n1k]
# (StrOpt) Specify the name of the integration bridge to which the VIFs are
# attached.
#
# integration_bridge = br-int
# (StrOpt) Name of the policy profile to be associated with a port when no
# policy profile is specified during port creates.
#
# default_policy_profile =
# Example: default_policy_profile = service_profile
# (StrOpt) Name of the policy profile to be associated with a port owned by
# network node (dhcp, router).
#
# network_node_policy_profile =
# Example: network_node_policy_profile = dhcp_pp
# (StrOpt) Name of the network profile to be associated with a network when no
# network profile is specified during network creates. Admin should pre-create
# a network profile with this name.
#
# default_network_profile =
# Example: default_network_profile = network_pool
# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
# policy profiles.
#
# poll_duration =
# Example: poll_duration = 180
# (IntOpt) Number of threads to use to make HTTP requests to the VSM.
#
# http_pool_size = 4

22
etc/neutron/plugins/cisco/cisco_vpn_agent.ini

@ -1,22 +0,0 @@
[cisco_csr_ipsec]
# Status check interval in seconds, for VPNaaS IPSec connections used on CSR
# status_check_interval = 60
# Cisco CSR management port information for REST access used by VPNaaS
# TODO(pcm): Remove once CSR is integrated in as a Neutron router.
#
# Format is:
# [cisco_csr_rest:<public IP>]
# rest_mgmt = <mgmt port IP>
# tunnel_ip = <tunnel IP>
# username = <user>
# password = <password>
# timeout = <timeout>
#
# where:
# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR)
# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel
# mgmt port IP -- IP address of CSR for REST API access (not console port)
# user ---------- Username for REST management port access to Cisco CSR
# password ------ Password for REST management port access to Cisco CSR
# timeout ------- REST request timeout to Cisco CSR (optional)

41
etc/neutron/plugins/embrane/heleos_conf.ini

@ -1,41 +0,0 @@
[heleos]
#configure the ESM management address
#in the first version of this plugin, only one ESM can be specified
#Example:
#esm_mgmt=
#configure admin username and password
#admin_username=
#admin_password=
#router image id
#Example:
#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0
#mgmt shared security zone id
#defines the shared management security zone. Each tenant can have a private one configured through the ESM
#Example:
#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a
#in-band shared security zone id
#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM
#Example:
#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc
#oob-band shared security zone id
#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM
#Example:
#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871
#dummy security zone id
#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces
#Example:
#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08
#resource pool id
#define the shared resource pool. Each tenant can have a private one configured through the ESM
#Example
#resource_pool_id=
#define if the requests have to be executed asynchronously by the plugin or not
#async_requests=

63
etc/neutron/plugins/hyperv/hyperv_neutron_plugin.ini

@ -1,63 +0,0 @@
[hyperv]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value 'local' is useful only for single-box testing and
# provides no connectivity between hosts. You MUST either change this
# to 'vlan' and configure network_vlan_ranges below or to 'flat'.
# Set to 'none' to disable creation of tenant networks.
#
# tenant_network_type = local
# Example: tenant_network_type = vlan
# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only gre and local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (ListOpt) Comma separated list of <physical_network>:<vswitch>
# where the physical networks can be expressed with wildcards,
# e.g.: ."*:external".
# The referred external virtual switches need to be already present on
# the Hyper-V server.
# If a given physical network name will not match any value in the list
# the plugin will look for a virtual switch with the same name.
#
# physical_network_vswitch_mappings = *:external
# Example: physical_network_vswitch_mappings = net1:external1,net2:external2
# (StrOpt) Private virtual switch name used for local networking.
#
# local_network_vswitch = private
# Example: local_network_vswitch = custom_vswitch
# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's
# metric APIs. Collected data can by retrieved by other apps and services,
# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above.
#
# enable_metrics_collection = False
#-----------------------------------------------------------------------------
# Sample Configurations.
#-----------------------------------------------------------------------------
#
# Neutron server:
#
# [HYPERV]
# tenant_network_type = vlan
# network_vlan_ranges = default:2000:3999
#
# Agent running on Hyper-V node:
#
# [AGENT]
# polling_interval = 2
# physical_network_vswitch_mappings = *:external
# local_network_vswitch = private

50
etc/neutron/plugins/ibm/sdnve_neutron_plugin.ini

@ -1,50 +0,0 @@
[sdnve]
# (ListOpt) The IP address of one (or more) SDN-VE controllers
# Default value is: controller_ips = 127.0.0.1
# Example: controller_ips = 127.0.0.1,127.0.0.2
# (StrOpt) The integration bridge for OF based implementation
# The default value for integration_bridge is None
# Example: integration_bridge = br-int
# (ListOpt) The interface mapping connecting the integration
# bridge to external network as a list of physical network names and
# interfaces: <physical_network_name>:<interface_name>
# Example: interface_mappings = default:eth2
# (BoolOpt) Used to reset the integration bridge, if exists
# The default value for reset_bridge is True
# Example: reset_bridge = False
# (BoolOpt) Used to set the OVS controller as out-of-band
# The default value for out_of_band is True
# Example: out_of_band = False
#
# (BoolOpt) The fake controller for testing purposes
# Default value is: use_fake_controller = False
# (StrOpt) The port number for use with controller
# The default value for the port is 8443
# Example: port = 8443
# (StrOpt) The userid for use with controller
# The default value for the userid is admin
# Example: userid = sdnve_user
# (StrOpt) The password for use with controller
# The default value for the password is admin
# Example: password = sdnve_password
#
# (StrOpt) The default type of tenants (and associated resources)
# Available choices are: OVERLAY or OF
# The default value for tenant type is OVERLAY
# Example: default_tenant_type = OVERLAY
# (StrOpt) The string in tenant description that indicates
# Default value for OF tenants: of_signature = SDNVE-OF
# (StrOpt) The string in tenant description that indicates
# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY
[sdnve_agent]
# (IntOpt) Agent's polling interval in seconds
# polling_interval = 2
# (StrOpt) What to use for root helper
# The default value: root_helper = 'sudo'
# (BoolOpt) Whether to use rpc or not
# The default value: rpc = True
[securitygroup]
# The security group is not supported:
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver

78
etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini

@ -1,78 +0,0 @@
[vlans]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value 'local' is useful only for single-box testing and
# provides no connectivity between hosts. You MUST change this to
# 'vlan' and configure network_vlan_ranges below in order for tenant
# networks to provide connectivity between hosts. Set to 'none' to
# disable creation of tenant networks.
#
# tenant_network_type = local
# Example: tenant_network_type = vlan
# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999
[linux_bridge]
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_interface> tuples mapping physical
# network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical
# networks listed in network_vlan_ranges on the server should have
# mappings to appropriate interfaces on each agent.
#
# physical_interface_mappings =
# Example: physical_interface_mappings = physnet1:eth1
[vxlan]
# (BoolOpt) enable VXLAN on the agent
# VXLAN support can be enabled when agent is managed by ml2 plugin using
# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin.
# enable_vxlan = False
#
# (IntOpt) use specific TTL for vxlan interface protocol packets
# ttl =
#
# (IntOpt) use specific TOS for vxlan interface protocol packets
# tos =
#
# (StrOpt) multicast group to use for broadcast emulation.
# This group must be the same on all the agents.
# vxlan_group = 224.0.0.1
#
# (StrOpt) Local IP address to use for VXLAN endpoints (required)
# local_ip =
#
# (BoolOpt) Flag to enable l2population extension. This option should be used
# in conjunction with ml2 plugin l2population mechanism driver (in that case,
# both linuxbridge and l2population mechanism drivers should be loaded).
# It enables plugin to populate VXLAN forwarding table, in order to limit
# the use of broadcast emulation (multicast will be turned off if kernel and
# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
# l2_population = False
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
# agents.
#
# rpc_support_old_agents = False
# Example: rpc_support_old_agents = True
[securitygroup]
# Firewall driver for realizing neutron security group function
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

31
etc/neutron/plugins/metaplugin/metaplugin.ini

@ -1,31 +0,0 @@
# Config file for Metaplugin
[meta]
# Comma separated list of flavor:neutron_plugin for plugins to load.
# Extension method is searched in the list order and the first one is used.
plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2'
# Comma separated list of flavor:neutron_plugin for L3 service plugins
# to load.
# This is intended for specifying L2 plugins which support L3 functions.
# If you use a router service plugin, set this blank.
l3_plugin_list =
# Default flavor to use, when flavor:network is not specified at network
# creation.
default_flavor = 'nvp'
# Default L3 flavor to use, when flavor:router is not specified at router
# creation.
# Ignored if 'l3_plugin_list' is blank.
default_l3_flavor =
# Comma separated list of supported extension aliases.
supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler'
# Comma separated list of method:flavor to select specific plugin for a method.
# This has priority over method search order based on 'plugin_list'.
extension_map = 'get_port_stats:nvp'
# Specifies flavor for plugin to handle 'q-plugin' RPC requests.
rpc_flavor = 'ml2'

19
etc/neutron/plugins/midonet/midonet.ini

@ -1,19 +0,0 @@
[midonet]
# MidoNet API server URI
# midonet_uri = http://localhost:8080/midonet-api
# MidoNet admin username
# username = admin
# MidoNet admin password
# password = passw0rd
# ID of the project that MidoNet admin user belongs to
# project_id = 77777777-7777-7777-7777-777777777777
# Virtual provider router ID
# provider_router_id = 00112233-0011-0011-0011-001122334455
# Path to midonet host uuid file
# midonet_host_uuid_path = /etc/midolman/host_uuid.properties

62
etc/neutron/plugins/ml2/ml2_conf.ini

@ -1,62 +0,0 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
# flat_networks =
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
# tunnel_id_ranges =
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
# vni_ranges =
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
# vxlan_group =
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

45
etc/neutron/plugins/ml2/ml2_conf_arista.ini

@ -1,45 +0,0 @@
# Defines configuration options specific for Arista ML2 Mechanism driver
[ml2_arista]
# (StrOpt) EOS IP address. This is required field. If not set, all
# communications to Arista EOS will fail
#
# eapi_host =
# Example: eapi_host = 192.168.0.1
#
# (StrOpt) EOS command API username. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_username =
# Example: arista_eapi_username = admin
#
# (StrOpt) EOS command API password. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_password =
# Example: eapi_password = my_password
#
# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs
# ("node1.domain.com") or as short names ("node1"). This is
# optional. If not set, a value of "True" is assumed.
#
# use_fqdn =
# Example: use_fqdn = True
#
# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
# This field defines how often the synchronization is performed.
# This is an optional field. If not set, a value of 180 seconds
# is assumed.
#
# sync_interval =
# Example: sync_interval = 60
#
# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller.
# This is useful when multiple OpenStack/Neutron controllers are
# managing the same Arista HW clusters. Note that this name must
# match with the region name registered (or known) to keystone
# service. Authentication with Keysotne is performed by EOS.
# This is optional. If not set, a value of "RegionOne" is assumed.
#
# region_name =
# Example: region_name = RegionOne

13
etc/neutron/plugins/ml2/ml2_conf_brocade.ini

@ -1,13 +0,0 @@
[ml2_brocade]
# username = <mgmt admin username>
# password = <mgmt admin password>
# address = <switch mgmt ip address>
# ostype = NOS
# physical_networks = physnet1,physnet2
#
# Example:
# username = admin
# password = password
# address = 10.24.84.38
# ostype = NOS
# physical_networks = physnet1,physnet2

94
etc/neutron/plugins/ml2/ml2_conf_cisco.ini

@ -1,94 +0,0 @@
[ml2_cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
#
# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch.
# This string value must be present in the ml2_conf.ini network_vlan_ranges
# variable.
#
# managed_physical_network =
# Example: managed_physical_network = physnet1
# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# Cisco Nexus Switch Format.
# [ml2_mech_cisco_nexus:<IP address of switch>]
# <hostname>=<intf_type:port> (1)
# ssh_port=<ssh port> (2)
# username=<credential username> (3)
# password=<credential password> (4)
#
# (1) For each host connected to a port on the switch, specify the hostname
# and the Nexus physical port (interface) it is connected to.
# Valid intf_type's are 'ethernet' and 'port-channel'.
# The default setting for <intf_type:> is 'ethernet' and need not be
# added to this setting.
# (2) The TCP port for connecting via SSH to manage the switch. This is
# port number 22 unless the switch has been configured otherwise.
# (3) The username for logging into the switch to manage it.
# (4) The password for logging into the switch to manage it.
#
# Example:
# [ml2_mech_cisco_nexus:1.1.1.1]
# compute1=1/1
# compute2=ethernet:1/2
# compute3=port-channel:1
# ssh_port=22
# username=admin
# password=mySecretPassword
[ml2_cisco_apic]
# Hostname for the APIC controller
# apic_host=1.1.1.1
# Username for the APIC controller
# apic_username=user
# Password for the APIC controller
# apic_password=password
# Port for the APIC Controller
# apic_port=80
# Names for APIC objects used by Neutron
# Note: When deploying multiple clouds against one APIC,
# these names must be unique between the clouds.
# apic_vmm_domain=openstack
# apic_vlan_ns_name=openstack_ns
# apic_node_profile=openstack_profile
# apic_entity_profile=openstack_entity
# apic_function_profile=openstack_function
# The following flag will cause all the node profiles on the APIC to
# be cleared when neutron-server starts. This is typically used only
# for test environments that require clean-slate startup conditions.
# apic_clear_node_profiles=False
# Specify your network topology.
# This section indicates how your compute nodes are connected to the fabric's
# switches and ports. The format is as follows:
#
# [switch:<swich_id_from_the_apic>]
# <compute_host>,<compute_host>=<switchport_the_host(s)_are_connected_to>
#
# You can have multiple sections, one for each switch in your fabric that is
# participating in Openstack. e.g.
#
# [switch:17]
# ubuntu,ubuntu1=1/10
# ubuntu2,ubuntu3=1/11
#
# [switch:18]
# ubuntu5,ubuntu6=1/1
# ubuntu7,ubuntu8=1/2

52
etc/neutron/plugins/ml2/ml2_conf_fslsdn.ini

@ -1,52 +0,0 @@
# Defines Configuration options for FSL SDN OS Mechanism Driver
# Cloud Resource Discovery (CRD) authorization credentials
[ml2_fslsdn]
#(StrOpt) User name for authentication to CRD.
# e.g.: user12
#
# crd_user_name =
#(StrOpt) Password for authentication to CRD.
# e.g.: secret
#
# crd_password =
#(StrOpt) Tenant name for CRD service.
# e.g.: service
#
# crd_tenant_name =
#(StrOpt) CRD auth URL.
# e.g.: http://127.0.0.1:5000/v2.0/
#
# crd_auth_url =
#(StrOpt) URL for connecting to CRD Service.
# e.g.: http://127.0.0.1:9797
#
# crd_url=
#(IntOpt) Timeout value for connecting to CRD service
# in seconds, e.g.: 30
#
# crd_url_timeout=
#(StrOpt) Region name for connecting to CRD in
# admin context, e.g.: RegionOne
#
# crd_region_name=
#(BoolOpt)If set, ignore any SSL validation issues (boolean value)
# e.g.: False
#
# crd_api_insecure=
#(StrOpt)Authorization strategy for connecting to CRD in admin
# context, e.g.: keystone
#
# crd_auth_strategy=
#(StrOpt)Location of CA certificates file to use for CRD client
# requests.
#
# crd_ca_certificates_file=

6
etc/neutron/plugins/ml2/ml2_conf_mlnx.ini

@ -1,6 +0,0 @@
[eswitch]
# (StrOpt) Type of Network Interface to allocate for VM:
# mlnx_direct or hostdev according to libvirt terminology
# vnic_type = mlnx_direct
# (BoolOpt) Enable server compatibility with old nova
# apply_profile_patch = False

28
etc/neutron/plugins/ml2/ml2_conf_ncs.ini

@ -1,28 +0,0 @@
# Defines configuration options specific to the Tail-f NCS Mechanism Driver
[ml2_ncs]
# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack
# subtree.
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://ncs/api/running/services/openstack
# (StrOpt) Username for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout =
# Example: timeout = 15

30
etc/neutron/plugins/ml2/ml2_conf_odl.ini

@ -1,30 +0,0 @@
# Configuration for the OpenDaylight MechanismDriver
[ml2_odl]
# (StrOpt) OpenDaylight REST URL
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron
# (StrOpt) Username for HTTP basic authentication to ODL.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to ODL.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout = 10
# Example: timeout = 15
# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout.
# This is an optional parameter, default value is 30 minutes.
#
# session_timeout = 30
# Example: session_timeout = 60

13
etc/neutron/plugins/ml2/ml2_conf_ofa.ini

@ -1,13 +0,0 @@
# Defines configuration options specific to the OpenFlow Agent Mechanism Driver
[ovs]
# Please refer to configuration options to the OpenvSwitch
[agent]
# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath.
# This is an optional parameter, default value is 60 seconds.
#
# get_datapath_retry_times =
# Example: get_datapath_retry_times = 30
# Please refer to configuration options to the OpenvSwitch else the above.

79
etc/neutron/plugins/mlnx/mlnx_conf.ini

@ -1,79 +0,0 @@
[mlnx]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value is 'vlan' You MUST configure network_vlan_ranges below
# in order for tenant networks to provide connectivity between hosts.
# Set to 'none' to disable creation of tenant networks.
#
# tenant_network_type = vlan
# Example: tenant_network_type = vlan
# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = default:1:100
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_network_type> tuples mapping physical
# network names to physical network types. All physical
# networks listed in network_vlan_ranges should have
# mappings to appropriate physical network type.
# Type of the physical network can be either eth (Ethernet) or
# ib (InfiniBand). If empty, physical network eth type is assumed.
#
# physical_network_type_mappings =
# Example: physical_network_type_mappings = default:eth
# (StrOpt) Type of the physical network, can be either 'eth' or 'ib'
# The default value is 'eth'
# physical_network_type = eth
[eswitch]
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_interface> tuples mapping physical
# network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical
# networks listed in network_vlan_ranges on the server should have
# mappings to appropriate interfaces on each agent.
#
# physical_interface_mappings =
# Example: physical_interface_mappings = default:eth2
# (StrOpt) Type of Network Interface to allocate for VM:
# direct or hosdev according to libvirt terminology
# vnic_type = mlnx_direct
# (StrOpt) Eswitch daemon end point connection url
# daemon_endpoint = 'tcp://127.0.0.1:60001'
# The number of milliseconds the agent will wait for
# response on request to daemon
# request_timeout = 3000
# The number of retries the agent will send request
# to daemon before giving up
# retries = 3
# The backoff rate multiplier for waiting period between retries
# on request to daemon, i.e. value of 2 will double
# the request timeout each retry
# backoff_rate = 2
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
# agents.
#
# rpc_support_old_agents = False
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

60
etc/neutron/plugins/nec/nec.ini

@ -1,60 +0,0 @@
# Sample Configurations
[ovs]
# Do not change this parameter unless you have a good reason to.
# This is the name of the OVS integration bridge. There is one per hypervisor.
# The integration bridge acts as a virtual "patch port". All VM VIFs are
# attached to this bridge and then "patched" according to their network
# connectivity.
# integration_bridge = br-int
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
[securitygroup]
# Firewall driver for realizing neutron security group function
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
[ofc]
# Specify OpenFlow Controller Host, Port and Driver to connect.
# host = 127.0.0.1
# port = 8888
# Base URL of OpenFlow Controller REST API.
# It is prepended to a path of each API request.
# path_prefix =
# Drivers are in neutron/plugins/nec/drivers/ .
# driver = trema
# PacketFilter is available when it's enabled in this configuration
# and supported by the driver.
# enable_packet_filter = true
# Use SSL to connect
# use_ssl = false
# Key file
# key_file =
# Certificate file
# cert_file =
# Disable SSL certificate verification
# insecure_ssl = false
# Maximum attempts per OFC API request. NEC plugin retries
# API request to OFC when OFC returns ServiceUnavailable (503).
# The value must be greater than 0.
# api_max_attempts = 3
[provider]
# Default router provider to use.
# default_router_provider = l3-agent
# List of enabled router providers.
# router_providers = l3-agent,openflow

10
etc/neutron/plugins/nuage/nuage_plugin.ini

@ -1,10 +0,0 @@
# Please fill in the correct data for all the keys below and uncomment key-value pairs
[restproxy]
#default_net_partition_name = <default-net-partition-name>
#auth_resource = /auth
#server = ip:port
#organization = org
#serverauth = uname:pass
#serverssl = True
#base_uri = /base

35
etc/neutron/plugins/oneconvergence/nvsdplugin.ini

@ -1,35 +0,0 @@
[nvsd]
# Configure the NVSD controller. The plugin proxies the api calls using
# to NVSD controller which implements the required functionality.
# IP address of NVSD controller api server
# nvsd_ip = <ip address of nvsd controller>
# Port number of NVSD controller api server
# nvsd_port = 8082
# Authentication credentials to access the api server
# nvsd_user = <nvsd controller username>