upate files under etc for tacker

Change-Id: Ic11cfd8786588d91cec7ba135c16da95d95ddffc
This commit is contained in:
Isaku Yamahata 2014-06-26 17:36:47 +09:00
parent e940f1e597
commit 91ce4886a9
54 changed files with 147 additions and 2209 deletions

View File

@ -1,30 +0,0 @@
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = request_id catch_errors extensions neutronapiapp_v2_0
keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
[filter:request_id]
paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:catch_errors]
paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory

View File

@ -1,88 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
# resync_interval = 5
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
# enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
# enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
# dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
# dnsmasq_config_file =
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
# dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
# dhcp_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -1,3 +0,0 @@
[fwaas]
#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
#enabled = True

30
etc/init.d/neutron-server → etc/init.d/tacker-server Executable file → Normal file
View File

@ -1,36 +1,36 @@
#! /bin/sh
### BEGIN INIT INFO
# Provides: neutron-server
# Provides: tacker-server
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: neutron-server
# Description: Provides the Neutron networking service
# Short-Description: tacker-server
# Description: Provides the Tacker servicevm/device manager service
### END INIT INFO
set -e
PIDFILE=/var/run/neutron/neutron-server.pid
LOGFILE=/var/log/neutron/neutron-server.log
PIDFILE=/var/run/tacker/tacker-server.pid
LOGFILE=/var/log/tacker/tacker-server.log
DAEMON=/usr/bin/neutron-server
DAEMON=/usr/bin/tacker-server
DAEMON_ARGS="--log-file=$LOGFILE"
DAEMON_DIR=/var/run
ENABLED=true
if test -f /etc/default/neutron-server; then
. /etc/default/neutron-server
if test -f /etc/default/tacker-server; then
. /etc/default/tacker-server
fi
mkdir -p /var/run/neutron
mkdir -p /var/log/neutron
mkdir -p /var/run/tacker
mkdir -p /var/log/tacker
. /lib/lsb/init-functions
export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
export TMPDIR=/var/lib/neutron/tmp
export TMPDIR=/var/lib/tacker/tmp
if [ ! -x ${DAEMON} ] ; then
exit 0
@ -39,13 +39,13 @@ fi
case "$1" in
start)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Starting neutron server" "neutron-server"
log_daemon_msg "Starting tacker server" "tacker-server"
start-stop-daemon -Sbmv --pidfile $PIDFILE --chdir $DAEMON_DIR --exec $DAEMON -- $DAEMON_ARGS
log_end_msg $?
;;
stop)
test "$ENABLED" = "true" || exit 0
log_daemon_msg "Stopping neutron server" "neutron-server"
log_daemon_msg "Stopping tacker server" "tacker-server"
start-stop-daemon --stop --oknodo --pidfile ${PIDFILE}
log_end_msg $?
;;
@ -57,10 +57,10 @@ case "$1" in
;;
status)
test "$ENABLED" = "true" || exit 0
status_of_proc -p $PIDFILE $DAEMON neutron-server && exit 0 || exit $?
status_of_proc -p $PIDFILE $DAEMON tacker-server && exit 0 || exit $?
;;
*)
log_action_msg "Usage: /etc/init.d/neutron-server {start|stop|restart|force-reload|status}"
log_action_msg "Usage: /etc/init.d/tacker-server {start|stop|restart|force-reload|status}"
exit 1
;;
esac

View File

@ -1,79 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
# handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
# external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
# metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
# send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
# periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
# periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -1,42 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output).
# debug = False
# The LBaaS agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
# periodic_interval = 10
# LBaas requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent requires drivers to manage the loadbalancer. HAProxy is the opensource version.
# Multiple device drivers reflecting different service providers could be specified:
# device_driver = path.to.provider1.driver.Driver
# device_driver = path.to.provider2.driver.Driver
# Default is:
# device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
[haproxy]
# Location to store config and state files
# loadbalancer_state_path = $state_path/lbaas
# The user group
# user_group = nogroup
# When delete and re-add the same vip, send this many gratuitous ARPs to flush
# the ARP cache in the Router. Set it below or equal to 0 to disable this feature.
# send_gratuitous_arp = 3

View File

@ -1,59 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
# The Neutron user information for accessing the Neutron API.
auth_url = http://localhost:5000/v2.0
auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
# nova_metadata_ip = 127.0.0.1
# TCP Port used by Nova metadata server
# nova_metadata_port = 8775
# Which protocol to use for requests to Nova metadata server, http or https
# nova_metadata_protocol = http
# Whether insecure SSL connection should be accepted for Nova metadata server
# requests
# nova_metadata_insecure = False
# Client certificate for nova api, needed when nova api requires client
# certificates
# nova_client_cert =
# Private key for nova client certificate
# nova_client_priv_key =
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
# metadata_proxy_shared_secret =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server. Defaults to
# half the number of CPU cores
# metadata_workers =
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 4096
# URL to connect to the cache backend.
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url = memory://?default_ttl=5

View File

@ -1,15 +0,0 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = True
# driver = neutron.services.metering.drivers.iptables.iptables_driver.IptablesMeteringDriver
# Interval between two metering measures
# measure_interval = 30
# Interval between two metering reports
# report_interval = 300
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# use_namespaces = True

View File

@ -1,114 +0,0 @@
# Config file for neutron-proxy-plugin.
[restproxy]
# All configuration for this plugin is in section '[restproxy]'
#
# The following parameters are supported:
# servers : <host:port>[,<host:port>]* (Error if not set)
# server_auth : <username:password> (default: no auth)
# server_ssl : True | False (default: True)
# ssl_cert_directory : <path> (default: /etc/neutron/plugins/bigswitch/ssl)
# no_ssl_validation : True | False (default: False)
# ssl_sticky : True | False (default: True)
# sync_data : True | False (default: False)
# auto_sync_on_failure : True | False (default: True)
# consistency_interval : <integer> (default: 60 seconds)
# server_timeout : <integer> (default: 10 seconds)
# neutron_id : <string> (default: neutron-<hostname>)
# add_meta_server_route : True | False (default: True)
# thread_pool_size : <int> (default: 4)
# A comma separated list of BigSwitch or Floodlight servers and port numbers. The plugin proxies the requests to the BigSwitch/Floodlight server, which performs the networking configuration. Note that only one server is needed per deployment, but you may wish to deploy multiple servers to support failover.
servers=localhost:8080
# The username and password for authenticating against the BigSwitch or Floodlight controller.
# server_auth=username:password
# Use SSL when connecting to the BigSwitch or Floodlight controller.
# server_ssl=True
# Directory which contains the ca_certs and host_certs to be used to validate
# controller certificates.
# ssl_cert_directory=/etc/neutron/plugins/bigswitch/ssl/
# If a certificate does not exist for a controller, trust and store the first
# certificate received for that controller and use it to validate future
# connections to that controller.
# ssl_sticky=True
# Do not validate the controller certificates for SSL
# Warning: This will not provide protection against man-in-the-middle attacks
# no_ssl_validation=False
# Sync data on connect
# sync_data=False
# If neutron fails to create a resource because the backend controller
# doesn't know of a dependency, automatically trigger a full data
# synchronization to the controller.
# auto_sync_on_failure=True
# Time between verifications that the backend controller
# database is consistent with Neutron. (0 to disable)
# consistency_interval = 60
# Maximum number of seconds to wait for proxy request to connect and complete.
# server_timeout=10
# User defined identifier for this Neutron deployment
# neutron_id =
# Flag to decide if a route to the metadata server should be injected into the VM
# add_meta_server_route = True
# Number of threads to use to handle large volumes of port creation requests
# thread_pool_size = 4
[nova]
# Specify the VIF_TYPE that will be controlled on the Nova compute instances
# options: ivs or ovs
# default: ovs
# vif_type = ovs
# Overrides for vif types based on nova compute node host IDs
# Comma separated list of host IDs to fix to a specific VIF type
# The VIF type is taken from the end of the configuration item
# node_override_vif_<vif_type>
# For example, the following would set the VIF type to IVS for
# host-id1 and host-id2
# node_overrride_vif_ivs=host-id1,host-id2
[router]
# Specify the default router rules installed in newly created tenant routers
# Specify multiple times for multiple rules
# Format is <tenant>:<source>:<destination>:<action>
# Optionally, a comma-separated list of nexthops may be included after <action>
# Use an * to specify default for all tenants
# Default is any any allow for all tenants
# tenant_default_router_rule=*:any:any:permit
# Maximum number of rules that a single router may have
# Default is 200
# max_router_rules=200
[restproxyagent]
# Specify the name of the bridge used on compute nodes
# for attachment.
# Default: br-int
# integration_bridge=br-int
# Change the frequency of polling by the restproxy agent.
# Value is seconds
# Default: 5
# polling_interval=5
# Virtual switch type on the compute node.
# Options: ovs or ivs
# Default: ovs
# virtual_switch_type = ovs
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

View File

@ -1,3 +0,0 @@
Certificates in this folder will be used to
verify signatures for any controllers the plugin
connects to.

View File

@ -1,6 +0,0 @@
Certificates in this folder must match the name
of the controller they should be used to authenticate
with a .pem extension.
For example, the certificate for the controller
"192.168.0.1" should be named "192.168.0.1.pem".

View File

@ -1,29 +0,0 @@
[switch]
# username = The SSH username to use
# password = The SSH password to use
# address = The address of the host to SSH to
# ostype = Should be NOS, but is unused otherwise
#
# Example:
# username = admin
# password = password
# address = 10.24.84.38
# ostype = NOS
[physical_interface]
# physical_interface = The network interface to use when creating a port
#
# Example:
# physical_interface = physnet1
[vlans]
# network_vlan_ranges = <physical network name>:nnnn:mmmm
#
# Example:
# network_vlan_ranges = physnet1:1000:2999
[linux_bridge]
# physical_interface_mappings = <physical network name>:<local interface>
#
# Example:
# physical_interface_mappings = physnet1:em1

View File

@ -1,138 +0,0 @@
[cisco_plugins]
# (StrOpt) Period-separated module path to the plugin class to use for
# the Cisco Nexus switches.
#
# nexus_plugin = neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin
# (StrOpt) Period-separated module path to the plugin class to use for
# the virtual switches on compute nodes.
#
# vswitch_plugin = neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2
[cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# provider VLAN interface. For example, if an interface is being created
# for provider VLAN 3003 it will be named 'p-3003' using the default prefix.
#
# provider_vlan_name_prefix = p-
# Example: provider_vlan_name_prefix = PV-
# (BoolOpt) A flag indicating whether Openstack networking should manage the
# creation and removal of VLAN interfaces for provider networks on the Nexus
# switches. If the flag is set to False then Openstack will not create or
# remove VLAN interfaces for provider networks, and the administrator needs
# to manage these interfaces manually or by external orchestration.
#
# provider_vlan_auto_create = True
# (BoolOpt) A flag indicating whether Openstack networking should manage
# the adding and removing of provider VLANs from trunk ports on the Nexus
# switches. If the flag is set to False then Openstack will not add or
# remove provider VLANs from trunk ports, and the administrator needs to
# manage these operations manually or by external orchestration.
#
# provider_vlan_auto_trunk = True
# (StrOpt) Period-separated module path to the model class to use for
# the Cisco neutron plugin.
#
# model_class = neutron.plugins.cisco.models.virt_phy_sw_v2.VirtualPhysicalSwitchModelV2
# (StrOpt) Period-separated module path to the driver class to use for
# the Cisco Nexus switches.
#
# If no value is configured, a fake driver will be used.
# nexus_driver = neutron.plugins.cisco.test.nexus.fake_nexus_driver.CiscoNEXUSFakeDriver
# With real hardware, use the CiscoNEXUSDriver class:
# nexus_driver = neutron.plugins.cisco.nexus.cisco_nexus_network_driver_v2.CiscoNEXUSDriver
# (BoolOpt) A flag to enable Layer 3 support on the Nexus switches.
# Note: This feature is not supported on all models/versions of Cisco
# Nexus switches. To use this feature, all of the Nexus switches in the
# deployment must support it.
# nexus_l3_enable = False
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# Cisco Nexus Switch Format.
# [NEXUS_SWITCH:<IP address of switch>]
# <hostname>=<port> (1)
# ssh_port=<ssh port> (2)
# username=<credential username> (3)
# password=<credential password> (4)
#
# (1) For each host connected to a port on the switch, specify the hostname
# and the Nexus physical port (interface) it is connected to.
# (2) The TCP port for connecting via SSH to manage the switch. This is
# port number 22 unless the switch has been configured otherwise.
# (3) The username for logging into the switch to manage it.
# (4) The password for logging into the switch to manage it.
#
# Example:
# [NEXUS_SWITCH:1.1.1.1]
# compute1=1/1
# compute2=1/2
# ssh_port=22
# username=admin
# password=mySecretPassword
#
# N1KV Format.
# [N1KV:<IP address of VSM>]
# username=<credential username>
# password=<credential password>
#
# Example:
# [N1KV:2.2.2.2]
# username=admin
# password=mySecretPassword
[cisco_n1k]
# (StrOpt) Specify the name of the integration bridge to which the VIFs are
# attached.
#
# integration_bridge = br-int
# (StrOpt) Name of the policy profile to be associated with a port when no
# policy profile is specified during port creates.
#
# default_policy_profile =
# Example: default_policy_profile = service_profile
# (StrOpt) Name of the policy profile to be associated with a port owned by
# network node (dhcp, router).
#
# network_node_policy_profile =
# Example: network_node_policy_profile = dhcp_pp
# (StrOpt) Name of the network profile to be associated with a network when no
# network profile is specified during network creates. Admin should pre-create
# a network profile with this name.
#
# default_network_profile =
# Example: default_network_profile = network_pool
# (IntOpt) Time in seconds for which the plugin polls the VSM for updates in
# policy profiles.
#
# poll_duration =
# Example: poll_duration = 180
# (IntOpt) Number of threads to use to make HTTP requests to the VSM.
#
# http_pool_size = 4

View File

@ -1,22 +0,0 @@
[cisco_csr_ipsec]
# Status check interval in seconds, for VPNaaS IPSec connections used on CSR
# status_check_interval = 60
# Cisco CSR management port information for REST access used by VPNaaS
# TODO(pcm): Remove once CSR is integrated in as a Neutron router.
#
# Format is:
# [cisco_csr_rest:<public IP>]
# rest_mgmt = <mgmt port IP>
# tunnel_ip = <tunnel IP>
# username = <user>
# password = <password>
# timeout = <timeout>
#
# where:
# public IP ----- Public IP address of router used with a VPN service (1:1 with CSR)
# tunnel IP ----- Public IP address of the CSR used for the IPSec tunnel
# mgmt port IP -- IP address of CSR for REST API access (not console port)
# user ---------- Username for REST management port access to Cisco CSR
# password ------ Password for REST management port access to Cisco CSR
# timeout ------- REST request timeout to Cisco CSR (optional)

View File

@ -1,41 +0,0 @@
[heleos]
#configure the ESM management address
#in the first version of this plugin, only one ESM can be specified
#Example:
#esm_mgmt=
#configure admin username and password
#admin_username=
#admin_password=
#router image id
#Example:
#router_image=932ce713-e210-3d54-a0a5-518b0b5ee1b0
#mgmt shared security zone id
#defines the shared management security zone. Each tenant can have a private one configured through the ESM
#Example:
#mgmt_id=c0bc9b6c-f110-46cf-bb01-733bfe4b5a1a
#in-band shared security zone id
#defines the shared in-band security zone. Each tenant can have a private one configured through the ESM
#Example:
#inband_id=a6b7999d-3806-4b04-81f6-e0c5c8271afc
#oob-band shared security zone id
#defines the shared out-of-band security zone. Each tenant can have a private one configured through the ESM
#Example:
#oob_id=e7eda5cc-b977-46cb-9c14-cab43c1b7871
#dummy security zone id
#defines the dummy security zone ID. this security zone will be used by the DVAs with no neutron interfaces
#Example:
#dummy_utif_id=d9911310-25fc-4733-a2e0-c0eda024ef08
#resource pool id
#define the shared resource pool. Each tenant can have a private one configured through the ESM
#Example
#resource_pool_id=
#define if the requests have to be executed asynchronously by the plugin or not
#async_requests=

View File

@ -1,63 +0,0 @@
[hyperv]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value 'local' is useful only for single-box testing and
# provides no connectivity between hosts. You MUST either change this
# to 'vlan' and configure network_vlan_ranges below or to 'flat'.
# Set to 'none' to disable creation of tenant networks.
#
# tenant_network_type = local
# Example: tenant_network_type = vlan
# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only gre and local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (ListOpt) Comma separated list of <physical_network>:<vswitch>
# where the physical networks can be expressed with wildcards,
# e.g.: ."*:external".
# The referred external virtual switches need to be already present on
# the Hyper-V server.
# If a given physical network name will not match any value in the list
# the plugin will look for a virtual switch with the same name.
#
# physical_network_vswitch_mappings = *:external
# Example: physical_network_vswitch_mappings = net1:external1,net2:external2
# (StrOpt) Private virtual switch name used for local networking.
#
# local_network_vswitch = private
# Example: local_network_vswitch = custom_vswitch
# (BoolOpt) Enables metrics collections for switch ports by using Hyper-V's
# metric APIs. Collected data can by retrieved by other apps and services,
# e.g.: Ceilometer. Requires Hyper-V / Windows Server 2012 and above.
#
# enable_metrics_collection = False
#-----------------------------------------------------------------------------
# Sample Configurations.
#-----------------------------------------------------------------------------
#
# Neutron server:
#
# [HYPERV]
# tenant_network_type = vlan
# network_vlan_ranges = default:2000:3999
#
# Agent running on Hyper-V node:
#
# [AGENT]
# polling_interval = 2
# physical_network_vswitch_mappings = *:external
# local_network_vswitch = private

View File

@ -1,50 +0,0 @@
[sdnve]
# (ListOpt) The IP address of one (or more) SDN-VE controllers
# Default value is: controller_ips = 127.0.0.1
# Example: controller_ips = 127.0.0.1,127.0.0.2
# (StrOpt) The integration bridge for OF based implementation
# The default value for integration_bridge is None
# Example: integration_bridge = br-int
# (ListOpt) The interface mapping connecting the integration
# bridge to external network as a list of physical network names and
# interfaces: <physical_network_name>:<interface_name>
# Example: interface_mappings = default:eth2
# (BoolOpt) Used to reset the integration bridge, if exists
# The default value for reset_bridge is True
# Example: reset_bridge = False
# (BoolOpt) Used to set the OVS controller as out-of-band
# The default value for out_of_band is True
# Example: out_of_band = False
#
# (BoolOpt) The fake controller for testing purposes
# Default value is: use_fake_controller = False
# (StrOpt) The port number for use with controller
# The default value for the port is 8443
# Example: port = 8443
# (StrOpt) The userid for use with controller
# The default value for the userid is admin
# Example: userid = sdnve_user
# (StrOpt) The password for use with controller
# The default value for the password is admin
# Example: password = sdnve_password
#
# (StrOpt) The default type of tenants (and associated resources)
# Available choices are: OVERLAY or OF
# The default value for tenant type is OVERLAY
# Example: default_tenant_type = OVERLAY
# (StrOpt) The string in tenant description that indicates
# Default value for OF tenants: of_signature = SDNVE-OF
# (StrOpt) The string in tenant description that indicates
# Default value for OVERLAY tenants: overlay_signature = SDNVE-OVERLAY
[sdnve_agent]
# (IntOpt) Agent's polling interval in seconds
# polling_interval = 2
# (StrOpt) What to use for root helper
# The default value: root_helper = 'sudo'
# (BoolOpt) Whether to use rpc or not
# The default value: rpc = True
[securitygroup]
# The security group is not supported:
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver

View File

@ -1,78 +0,0 @@
[vlans]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value 'local' is useful only for single-box testing and
# provides no connectivity between hosts. You MUST change this to
# 'vlan' and configure network_vlan_ranges below in order for tenant
# networks to provide connectivity between hosts. Set to 'none' to
# disable creation of tenant networks.
#
# tenant_network_type = local
# Example: tenant_network_type = vlan
# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999
[linux_bridge]
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_interface> tuples mapping physical
# network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical
# networks listed in network_vlan_ranges on the server should have
# mappings to appropriate interfaces on each agent.
#
# physical_interface_mappings =
# Example: physical_interface_mappings = physnet1:eth1
[vxlan]
# (BoolOpt) enable VXLAN on the agent
# VXLAN support can be enabled when agent is managed by ml2 plugin using
# linuxbridge mechanism driver. Useless if set while using linuxbridge plugin.
# enable_vxlan = False
#
# (IntOpt) use specific TTL for vxlan interface protocol packets
# ttl =
#
# (IntOpt) use specific TOS for vxlan interface protocol packets
# tos =
#
# (StrOpt) multicast group to use for broadcast emulation.
# This group must be the same on all the agents.
# vxlan_group = 224.0.0.1
#
# (StrOpt) Local IP address to use for VXLAN endpoints (required)
# local_ip =
#
# (BoolOpt) Flag to enable l2population extension. This option should be used
# in conjunction with ml2 plugin l2population mechanism driver (in that case,
# both linuxbridge and l2population mechanism drivers should be loaded).
# It enables plugin to populate VXLAN forwarding table, in order to limit
# the use of broadcast emulation (multicast will be turned off if kernel and
# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
# l2_population = False
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
# agents.
#
# rpc_support_old_agents = False
# Example: rpc_support_old_agents = True
[securitygroup]
# Firewall driver for realizing neutron security group function
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

View File

@ -1,31 +0,0 @@
# Config file for Metaplugin
[meta]
# Comma separated list of flavor:neutron_plugin for plugins to load.
# Extension method is searched in the list order and the first one is used.
plugin_list = 'ml2:neutron.plugins.ml2.plugin.Ml2Plugin,nvp:neutron.plugins.vmware.plugin.NsxPluginV2'
# Comma separated list of flavor:neutron_plugin for L3 service plugins
# to load.
# This is intended for specifying L2 plugins which support L3 functions.
# If you use a router service plugin, set this blank.
l3_plugin_list =
# Default flavor to use, when flavor:network is not specified at network
# creation.
default_flavor = 'nvp'
# Default L3 flavor to use, when flavor:router is not specified at router
# creation.
# Ignored if 'l3_plugin_list' is blank.
default_l3_flavor =
# Comma separated list of supported extension aliases.
supported_extension_aliases = 'provider,binding,agent,dhcp_agent_scheduler'
# Comma separated list of method:flavor to select specific plugin for a method.
# This has priority over method search order based on 'plugin_list'.
extension_map = 'get_port_stats:nvp'
# Specifies flavor for plugin to handle 'q-plugin' RPC requests.
rpc_flavor = 'ml2'

View File

@ -1,19 +0,0 @@
[midonet]
# MidoNet API server URI
# midonet_uri = http://localhost:8080/midonet-api
# MidoNet admin username
# username = admin
# MidoNet admin password
# password = passw0rd
# ID of the project that MidoNet admin user belongs to
# project_id = 77777777-7777-7777-7777-777777777777
# Virtual provider router ID
# provider_router_id = 00112233-0011-0011-0011-001122334455
# Path to midonet host uuid file
# midonet_host_uuid_path = /etc/midolman/host_uuid.properties

View File

@ -1,62 +0,0 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
# flat_networks =
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
# tunnel_id_ranges =
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
# vni_ranges =
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
# vxlan_group =
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

View File

@ -1,45 +0,0 @@
# Defines configuration options specific for Arista ML2 Mechanism driver
[ml2_arista]
# (StrOpt) EOS IP address. This is required field. If not set, all
# communications to Arista EOS will fail
#
# eapi_host =
# Example: eapi_host = 192.168.0.1
#
# (StrOpt) EOS command API username. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_username =
# Example: arista_eapi_username = admin
#
# (StrOpt) EOS command API password. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_password =
# Example: eapi_password = my_password
#
# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs
# ("node1.domain.com") or as short names ("node1"). This is
# optional. If not set, a value of "True" is assumed.
#
# use_fqdn =
# Example: use_fqdn = True
#
# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
# This field defines how often the synchronization is performed.
# This is an optional field. If not set, a value of 180 seconds
# is assumed.
#
# sync_interval =
# Example: sync_interval = 60
#
# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller.
# This is useful when multiple OpenStack/Neutron controllers are
# managing the same Arista HW clusters. Note that this name must
# match with the region name registered (or known) to keystone
# service. Authentication with Keysotne is performed by EOS.
# This is optional. If not set, a value of "RegionOne" is assumed.
#
# region_name =
# Example: region_name = RegionOne

View File

@ -1,13 +0,0 @@
[ml2_brocade]
# username = <mgmt admin username>
# password = <mgmt admin password>
# address = <switch mgmt ip address>
# ostype = NOS
# physical_networks = physnet1,physnet2
#
# Example:
# username = admin
# password = password
# address = 10.24.84.38
# ostype = NOS
# physical_networks = physnet1,physnet2

View File

@ -1,94 +0,0 @@
[ml2_cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
#
# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch.
# This string value must be present in the ml2_conf.ini network_vlan_ranges
# variable.
#
# managed_physical_network =
# Example: managed_physical_network = physnet1
# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# Cisco Nexus Switch Format.
# [ml2_mech_cisco_nexus:<IP address of switch>]
# <hostname>=<intf_type:port> (1)
# ssh_port=<ssh port> (2)
# username=<credential username> (3)
# password=<credential password> (4)
#
# (1) For each host connected to a port on the switch, specify the hostname
# and the Nexus physical port (interface) it is connected to.
# Valid intf_type's are 'ethernet' and 'port-channel'.
# The default setting for <intf_type:> is 'ethernet' and need not be
# added to this setting.
# (2) The TCP port for connecting via SSH to manage the switch. This is
# port number 22 unless the switch has been configured otherwise.
# (3) The username for logging into the switch to manage it.
# (4) The password for logging into the switch to manage it.
#
# Example:
# [ml2_mech_cisco_nexus:1.1.1.1]
# compute1=1/1
# compute2=ethernet:1/2
# compute3=port-channel:1
# ssh_port=22
# username=admin
# password=mySecretPassword
[ml2_cisco_apic]
# Hostname for the APIC controller
# apic_host=1.1.1.1
# Username for the APIC controller
# apic_username=user
# Password for the APIC controller
# apic_password=password
# Port for the APIC Controller
# apic_port=80
# Names for APIC objects used by Neutron
# Note: When deploying multiple clouds against one APIC,
# these names must be unique between the clouds.
# apic_vmm_domain=openstack
# apic_vlan_ns_name=openstack_ns
# apic_node_profile=openstack_profile
# apic_entity_profile=openstack_entity
# apic_function_profile=openstack_function
# The following flag will cause all the node profiles on the APIC to
# be cleared when neutron-server starts. This is typically used only
# for test environments that require clean-slate startup conditions.
# apic_clear_node_profiles=False
# Specify your network topology.
# This section indicates how your compute nodes are connected to the fabric's
# switches and ports. The format is as follows:
#
# [switch:<swich_id_from_the_apic>]
# <compute_host>,<compute_host>=<switchport_the_host(s)_are_connected_to>
#
# You can have multiple sections, one for each switch in your fabric that is
# participating in Openstack. e.g.
#
# [switch:17]
# ubuntu,ubuntu1=1/10
# ubuntu2,ubuntu3=1/11
#
# [switch:18]
# ubuntu5,ubuntu6=1/1
# ubuntu7,ubuntu8=1/2

View File

@ -1,52 +0,0 @@
# Defines Configuration options for FSL SDN OS Mechanism Driver
# Cloud Resource Discovery (CRD) authorization credentials
[ml2_fslsdn]
#(StrOpt) User name for authentication to CRD.
# e.g.: user12
#
# crd_user_name =
#(StrOpt) Password for authentication to CRD.
# e.g.: secret
#
# crd_password =
#(StrOpt) Tenant name for CRD service.
# e.g.: service
#
# crd_tenant_name =
#(StrOpt) CRD auth URL.
# e.g.: http://127.0.0.1:5000/v2.0/
#
# crd_auth_url =
#(StrOpt) URL for connecting to CRD Service.
# e.g.: http://127.0.0.1:9797
#
# crd_url=
#(IntOpt) Timeout value for connecting to CRD service
# in seconds, e.g.: 30
#
# crd_url_timeout=
#(StrOpt) Region name for connecting to CRD in
# admin context, e.g.: RegionOne
#
# crd_region_name=
#(BoolOpt)If set, ignore any SSL validation issues (boolean value)
# e.g.: False
#
# crd_api_insecure=
#(StrOpt)Authorization strategy for connecting to CRD in admin
# context, e.g.: keystone
#
# crd_auth_strategy=
#(StrOpt)Location of CA certificates file to use for CRD client
# requests.
#
# crd_ca_certificates_file=

View File

@ -1,6 +0,0 @@
[eswitch]
# (StrOpt) Type of Network Interface to allocate for VM:
# mlnx_direct or hostdev according to libvirt terminology
# vnic_type = mlnx_direct
# (BoolOpt) Enable server compatibility with old nova
# apply_profile_patch = False

View File

@ -1,28 +0,0 @@
# Defines configuration options specific to the Tail-f NCS Mechanism Driver
[ml2_ncs]
# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack
# subtree.
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://ncs/api/running/services/openstack
# (StrOpt) Username for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout =
# Example: timeout = 15

View File

@ -1,30 +0,0 @@
# Configuration for the OpenDaylight MechanismDriver
[ml2_odl]
# (StrOpt) OpenDaylight REST URL
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron
# (StrOpt) Username for HTTP basic authentication to ODL.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to ODL.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout = 10
# Example: timeout = 15
# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout.
# This is an optional parameter, default value is 30 minutes.
#
# session_timeout = 30
# Example: session_timeout = 60

View File

@ -1,13 +0,0 @@
# Defines configuration options specific to the OpenFlow Agent Mechanism Driver
[ovs]
# Please refer to configuration options to the OpenvSwitch
[agent]
# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath.
# This is an optional parameter, default value is 60 seconds.
#
# get_datapath_retry_times =
# Example: get_datapath_retry_times = 30
# Please refer to configuration options to the OpenvSwitch else the above.

View File

@ -1,79 +0,0 @@
[mlnx]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value is 'vlan' You MUST configure network_vlan_ranges below
# in order for tenant networks to provide connectivity between hosts.
# Set to 'none' to disable creation of tenant networks.
#
# tenant_network_type = vlan
# Example: tenant_network_type = vlan
# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = default:1:100
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_network_type> tuples mapping physical
# network names to physical network types. All physical
# networks listed in network_vlan_ranges should have
# mappings to appropriate physical network type.
# Type of the physical network can be either eth (Ethernet) or
# ib (InfiniBand). If empty, physical network eth type is assumed.
#
# physical_network_type_mappings =
# Example: physical_network_type_mappings = default:eth
# (StrOpt) Type of the physical network, can be either 'eth' or 'ib'
# The default value is 'eth'
# physical_network_type = eth
[eswitch]
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_interface> tuples mapping physical
# network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical
# networks listed in network_vlan_ranges on the server should have
# mappings to appropriate interfaces on each agent.
#
# physical_interface_mappings =
# Example: physical_interface_mappings = default:eth2
# (StrOpt) Type of Network Interface to allocate for VM:
# direct or hosdev according to libvirt terminology
# vnic_type = mlnx_direct
# (StrOpt) Eswitch daemon end point connection url
# daemon_endpoint = 'tcp://127.0.0.1:60001'
# The number of milliseconds the agent will wait for
# response on request to daemon
# request_timeout = 3000
# The number of retries the agent will send request
# to daemon before giving up
# retries = 3
# The backoff rate multiplier for waiting period between retries
# on request to daemon, i.e. value of 2 will double
# the request timeout each retry
# backoff_rate = 2
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (BoolOpt) Enable server RPC compatibility with old (pre-havana)
# agents.
#
# rpc_support_old_agents = False
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

View File

@ -1,60 +0,0 @@
# Sample Configurations
[ovs]
# Do not change this parameter unless you have a good reason to.
# This is the name of the OVS integration bridge. There is one per hypervisor.
# The integration bridge acts as a virtual "patch port". All VM VIFs are
# attached to this bridge and then "patched" according to their network
# connectivity.
# integration_bridge = br-int
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
[securitygroup]
# Firewall driver for realizing neutron security group function
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
[ofc]
# Specify OpenFlow Controller Host, Port and Driver to connect.
# host = 127.0.0.1
# port = 8888
# Base URL of OpenFlow Controller REST API.
# It is prepended to a path of each API request.
# path_prefix =
# Drivers are in neutron/plugins/nec/drivers/ .
# driver = trema
# PacketFilter is available when it's enabled in this configuration
# and supported by the driver.
# enable_packet_filter = true
# Use SSL to connect
# use_ssl = false
# Key file
# key_file =
# Certificate file
# cert_file =
# Disable SSL certificate verification
# insecure_ssl = false
# Maximum attempts per OFC API request. NEC plugin retries
# API request to OFC when OFC returns ServiceUnavailable (503).
# The value must be greater than 0.
# api_max_attempts = 3
[provider]
# Default router provider to use.
# default_router_provider = l3-agent
# List of enabled router providers.
# router_providers = l3-agent,openflow

View File

@ -1,10 +0,0 @@
# Please fill in the correct data for all the keys below and uncomment key-value pairs
[restproxy]
#default_net_partition_name = <default-net-partition-name>
#auth_resource = /auth
#server = ip:port
#organization = org
#serverauth = uname:pass
#serverssl = True
#base_uri = /base

View File

@ -1,35 +0,0 @@
[nvsd]
# Configure the NVSD controller. The plugin proxies the api calls using
# to NVSD controller which implements the required functionality.
# IP address of NVSD controller api server
# nvsd_ip = <ip address of nvsd controller>
# Port number of NVSD controller api server
# nvsd_port = 8082
# Authentication credentials to access the api server
# nvsd_user = <nvsd controller username>
# nvsd_passwd = <password>
# API request timeout in seconds
# request_timeout = <default request timeout>
# Maximum number of retry attempts to login to the NVSD controller
# Specify 0 to retry until success (default)
# nvsd_retries = 0
[securitygroup]
# Specify firewall_driver option, if neutron security groups are disabled,
# then NoopFirewallDriver otherwise OVSHybridIptablesFirewallDriver.
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
[agent]
# root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
[database]
# connection = mysql://root:<passwd>@127.0.0.1/<neutron_db>?charset=utf8

View File

@ -1,179 +0,0 @@
[ovs]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value 'local' is useful only for single-box testing and
# provides no connectivity between hosts. You MUST either change this
# to 'vlan' and configure network_vlan_ranges below or change this to
# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for
# tenant networks to provide connectivity between hosts. Set to 'none'
# to disable creation of tenant networks.
#
# tenant_network_type = local
# Example: tenant_network_type = gre
# Example: tenant_network_type = vxlan
# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only gre, vxlan and local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999
# (BoolOpt) Set to True in the server and the agents to enable support
# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and
# GRE or VXLAN tunneling.
#
# WARNING: This option will be deprecated in the Icehouse release, at which
# point setting tunnel_type below will be required to enable
# tunneling.
#
# enable_tunneling = False
# (StrOpt) The type of tunnel network, if any, supported by the plugin. If
# this is set, it will cause tunneling to be enabled. If this is not set and
# the option enable_tunneling is set, this will default to 'gre'.
#
# tunnel_type =
# Example: tunnel_type = gre
# Example: tunnel_type = vxlan
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples
# enumerating ranges of GRE or VXLAN tunnel IDs that are available for
# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'.
#
# tunnel_id_ranges =
# Example: tunnel_id_ranges = 1:1000
# Do not change this parameter unless you have a good reason to.
# This is the name of the OVS integration bridge. There is one per hypervisor.
# The integration bridge acts as a virtual "patch bay". All VM VIFs are
# attached to this bridge and then "patched" according to their network
# connectivity.
#
# integration_bridge = br-int
# Only used for the agent if tunnel_id_ranges (above) is not empty for
# the server. In most cases, the default value should be fine.
#
# tunnel_bridge = br-tun
# Peer patch port in integration bridge for tunnel bridge
# int_peer_patch_port = patch-tun
# Peer patch port in tunnel bridge for integration bridge
# tun_peer_patch_port = patch-int
# Uncomment this line for the agent if tunnel_id_ranges (above) is not
# empty for the server. Set local-ip to be the local IP address of
# this hypervisor.
#
# local_ip =
# (ListOpt) Comma-separated list of <physical_network>:<bridge> tuples
# mapping physical network names to the agent's node-specific OVS
# bridge names to be used for flat and VLAN networks. The length of
# bridge names should be no more than 11. Each bridge must
# exist, and should have a physical network interface configured as a
# port. All physical networks listed in network_vlan_ranges on the
# server should have mappings to appropriate bridges on each agent.
#
# bridge_mappings =
# Example: bridge_mappings = physnet1:br-eth1
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# Minimize polling by monitoring ovsdb for interface changes
# minimize_polling = True
# When minimize_polling = True, the number of seconds to wait before
# respawning the ovsdb monitor after losing communication with it
# ovsdb_monitor_respawn_interval = 30
# (ListOpt) The types of tenant network tunnels supported by the agent.
# Setting this will enable tunneling support in the agent. This can be set to
# either 'gre' or 'vxlan'. If this is unset, it will default to [] and
# disable tunneling support in the agent. When running the agent with the OVS
# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section.
# When running the agent with ML2, you can specify as many values here as
# your compute hosts supports.
#
# tunnel_types =
# Example: tunnel_types = gre
# Example: tunnel_types = vxlan
# Example: tunnel_types = vxlan, gre
# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By
# default, this will make use of the Open vSwitch default value of '4789' if
# not specified.
#
# vxlan_udp_port =
# Example: vxlan_udp_port = 8472
# (IntOpt) This is the MTU size of veth interfaces.
# Do not change unless you have a good reason to.
# The default MTU size of veth interfaces is 1500.
# veth_mtu =
# Example: veth_mtu = 1504
# (BoolOpt) Flag to enable l2-population extension. This option should only be
# used in conjunction with ml2 plugin and l2population mechanism driver. It'll
# enable plugin to populate remote ports macs and IPs (using fdb_add/remove
# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to
# optimize tunnel management.
#
# l2_population = False
# Enable local ARP responder. Requires OVS 2.1. This is only used by the l2
# population ML2 MechanismDriver.
#
# arp_responder = False
# (BoolOpt) Set or un-set the don't fragment (DF) bit on outgoing IP packet
# carrying GRE/VXLAN tunnel. The default value is True.
#
# dont_fragment = True
[securitygroup]
# Firewall driver for realizing neutron security group function.
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
#-----------------------------------------------------------------------------
# Sample Configurations.
#-----------------------------------------------------------------------------
#
# 1. With VLANs on eth1.
# [ovs]
# network_vlan_ranges = default:2000:3999
# tunnel_id_ranges =
# integration_bridge = br-int
# bridge_mappings = default:br-eth1
#
# 2. With GRE tunneling.
# [ovs]
# network_vlan_ranges =
# tunnel_id_ranges = 1:1000
# integration_bridge = br-int
# tunnel_bridge = br-tun
# local_ip = 10.0.0.3
#
# 3. With VXLAN tunneling.
# [ovs]
# network_vlan_ranges =
# tenant_network_type = vxlan
# tunnel_type = vxlan
# tunnel_id_ranges = 1:1000
# integration_bridge = br-int
# tunnel_bridge = br-tun
# local_ip = 10.0.0.3
# [agent]
# tunnel_types = vxlan

View File

@ -1,14 +0,0 @@
# Config file for Neutron PLUMgrid Plugin
[plumgriddirector]
# This line should be pointing to the PLUMgrid Director,
# for the PLUMgrid platform.
# director_server=<director-ip-address>
# director_server_port=<director-port>
# Authentification parameters for the Director.
# These are the admin credentials to manage and control
# the PLUMgrid Director server.
# username=<director-admin-username>
# password=<director-admin-password>
# servertimeout=5
# driver=<plugin-driver>

View File

@ -1,44 +0,0 @@
[ovs]
# integration_bridge = br-int
# openflow_rest_api = <host IP address of ofp rest api service>:<port: 8080>
# openflow_rest_api = 127.0.0.1:8080
# tunnel key range: 0 < tunnel_key_min < tunnel_key_max
# VLAN: 12bits, GRE, VXLAN: 24bits
# tunnel_key_min = 1
# tunnel_key_max = 0xffffff
# tunnel_ip = <ip address for tunneling>
# tunnel_interface = interface for tunneling
# when tunnel_ip is NOT specified, ip address is read
# from this interface
# tunnel_ip =
# tunnel_interface =
tunnel_interface = eth0
# ovsdb_port = port number on which ovsdb is listening
# ryu-agent uses this parameter to setup ovsdb.
# ovs-vsctl set-manager ptcp:<ovsdb_port>
# See set-manager section of man ovs-vsctl for details.
# currently ptcp is only supported.
# ovsdb_ip = <host IP address on which ovsdb is listening>
# ovsdb_interface = interface for ovsdb
# when ovsdb_addr NOT specifiied, ip address is gotten
# from this interface
# ovsdb_port = 6634
# ovsdb_ip =
# ovsdb_interface =
ovsdb_interface = eth0
[securitygroup]
# Firewall driver for realizing neutron security group function
# firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
[agent]
# Agent's polling interval in seconds
# polling_interval = 2

View File

@ -1,202 +0,0 @@
[DEFAULT]
# User name for NSX controller
# nsx_user = admin
# Password for NSX controller
# nsx_password = admin
# Total time limit for a cluster request
# (including retries across different controllers)
# req_timeout = 30
# Time before aborting a request on an unresponsive controller
# http_timeout = 30
# Maximum number of times a particular request should be retried
# retries = 2
# Maximum number of times a redirect response should be followed
# redirects = 2
# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
# is omitted, 443 is assumed. This option MUST be specified, e.g.:
# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
# UUID of the pre-existing default NSX Transport zone to be used for creating
# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
# (Optional) UUID for the default l3 gateway service to use with this cluster.
# To be specified if planning to use logical routers with external gateways.
# default_l3_gw_service_uuid =
# (Optional) UUID for the default l2 gateway service to use with this cluster.
# To be specified for providing a predefined gateway tenant for connecting their networks.
# default_l2_gw_service_uuid =
# (Optional) UUID for the default service cluster. A service cluster is introduced to
# represent a group of gateways and it is needed in order to use Logical Services like
# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
# default_service_cluster_uuid =
# Name of the default interface name to be used on network-gateway. This value
# will be used for any device associated with a network gateway for which an
# interface name was not specified
# default_interface_name = breth0
[quotas]
# number of network gateways allowed per tenant, -1 means unlimited
# quota_network_gateway = 5
[vcns]
# URL for VCNS manager
# manager_uri = https://management_ip
# User name for VCNS manager
# user = admin
# Password for VCNS manager
# password = default
# (Optional) Datacenter ID for Edge deployment
# datacenter_moid =
# (Optional) Deployment Container ID for NSX Edge deployment
# If not specified, either a default global container will be used, or
# the resource pool and datastore specified below will be used
# deployment_container_id =
# (Optional) Resource pool ID for NSX Edge deployment
# resource_pool_id =
# (Optional) Datastore ID for NSX Edge deployment
# datastore_id =
# (Required) UUID of logic switch for physical network connectivity
# external_network =
# (Optional) Asynchronous task status check interval
# default is 2000 (millisecond)
# task_status_check_interval = 2000
[nsx]
# Maximum number of ports for each bridged logical switch
# The recommended value for this parameter varies with NSX version
# Please use:
# NSX 2.x -> 64
# NSX 3.0, 3.1 -> 5000
# NSX 3.2 -> 10000
# max_lp_per_bridged_ls = 5000
# Maximum number of ports for each overlay (stt, gre) logical switch
# max_lp_per_overlay_ls = 256
# Number of connections to each controller node.
# default is 10
# concurrent_connections = 10
# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
# nsx_gen_timeout = -1
# Acceptable values for 'metadata_mode' are:
# - 'access_network': this enables a dedicated connection to the metadata
# proxy for metadata server access via Neutron router.
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
# This option is only useful if running on a host that does not support
# namespaces otherwise access_network should be used.
# metadata_mode = access_network
# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
# default_transport_type = stt
# Specifies in which mode the plugin needs to operate in order to provide DHCP and
# metadata proxy services to tenant instances. If 'agent' is chosen (default)
# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
# provide such services. In this mode, the plugin supports API extensions 'agent'
# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
# the plugin will use NSX logical services for DHCP and metadata proxy. This
# simplifies the deployment model for Neutron, in that the plugin no longer requires
# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above.
# Furthermore, a 'combined' mode is also provided and is used to support existing
# deployments that want to adopt the agentless mode going forward. With this mode,
# existing networks keep being served by the existing infrastructure (thus preserving
# backward compatibility, whereas new networks will be served by the new infrastructure.
# Migration tools are provided to 'move' one network from one model to another; with
# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
# ignored, as new networks will no longer be scheduled to existing dhcp agents.
# agent_mode = agent
# Specifies which mode packet replication should be done in. If set to service
# a service node is required in order to perform packet replication. This can
# also be set to source if one wants replication to be performed locally (NOTE:
# usually only useful for testing if one does not want to deploy a service node).
# replication_mode = service
[nsx_sync]
# Interval in seconds between runs of the status synchronization task.
# The plugin will aim at resynchronizing operational status for all
# resources in this interval, and it should be therefore large enough
# to ensure the task is feasible. Otherwise the plugin will be
# constantly synchronizing resource status, ie: a new task is started
# as soon as the previous is completed.
# If this value is set to 0, the state synchronization thread for this
# Neutron instance will be disabled.
# state_sync_interval = 10
# Random additional delay between two runs of the state synchronization task.
# An additional wait time between 0 and max_random_sync_delay seconds
# will be added on top of state_sync_interval.
# max_random_sync_delay = 0
# Minimum delay, in seconds, between two status synchronization requests for NSX.
# Depending on chunk size, controller load, and other factors, state
# synchronization requests might be pretty heavy. This means the
# controller might take time to respond, and its load might be quite
# increased by them. This parameter allows to specify a minimum
# interval between two subsequent requests.
# The value for this parameter must never exceed state_sync_interval.
# If this does, an error will be raised at startup.
# min_sync_req_delay = 1
# Minimum number of resources to be retrieved from NSX in a single status
# synchronization request.
# The actual size of the chunk will increase if the number of resources is such
# that using the minimum chunk size will cause the interval between two
# requests to be less than min_sync_req_delay
# min_chunk_size = 500
# Enable this option to allow punctual state synchronization on show
# operations. In this way, show operations will always fetch the operational
# status of the resource from the NSX backend, and this might have
# a considerable impact on overall performance.
# always_read_status = False
[nsx_lsn]
# Pull LSN information from NSX in case it is missing from the local
# data store. This is useful to rebuild the local store in case of
# server recovery
# sync_on_missing_data = False
[nsx_dhcp]
# (Optional) Comma separated list of additional dns servers. Default is an empty list
# extra_domain_name_servers =
# Domain to use for building the hostnames
# domain_name = openstacklocal
# Default DHCP lease time
# default_lease_time = 43200
[nsx_metadata]
# IP address used by Metadata server
# metadata_server_address = 127.0.0.1
# TCP Port used by Metadata server
# metadata_server_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it MUST match with the configuration used by the Metadata server
# metadata_shared_secret =

View File

@ -1,14 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# This is needed because we should ping
# from inside a namespace which requires root
ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+

View File

@ -1,38 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# dhcp-agent
dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID=
# dhcp-agent uses kill as well, that's handled by the generic KillFilter
# it looks like these are the only signals needed, per
# neutron/agent/linux/dhcp.py
kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP
kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
ivs-ctl: CommandFilter, ivs-ctl, root
mm-ctl: CommandFilter, mm-ctl, root
dhcp_release: CommandFilter, dhcp_release, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
metadata_proxy_quantum: CommandFilter, quantum-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
metadata_proxy_local_quantum: CommandFilter, /usr/local/bin/quantum-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, /usr/bin/python, -9
kill_metadata7: KillFilter, root, /usr/bin/python2.7, -9
kill_metadata6: KillFilter, root, /usr/bin/python2.6, -9
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -1,21 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_manager.py
# "iptables-save", ...
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# neutron/agent/linux/iptables_manager.py
# "iptables", "-A", ...
iptables: CommandFilter, iptables, root
ip6tables: CommandFilter, ip6tables, root

View File

@ -1,41 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# arping
arping: CommandFilter, arping, root
# l3_agent
sysctl: CommandFilter, sysctl, root
route: CommandFilter, route, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
metadata_proxy_quantum: CommandFilter, quantum-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
metadata_proxy_local_quantum: CommandFilter, /usr/local/bin/quantum-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, /usr/bin/python, -9
kill_metadata7: KillFilter, root, /usr/bin/python2.7, -9
kill_metadata6: KillFilter, root, /usr/bin/python2.6, -9
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
# ovs_lib (if OVSInterfaceDriver is used)
ovs-vsctl: CommandFilter, ovs-vsctl, root
# iptables_manager
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root

View File

@ -1,26 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# haproxy
haproxy: CommandFilter, haproxy, root
# lbaas-agent uses kill as well, that's handled by the generic KillFilter
kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
mm-ctl: CommandFilter, mm-ctl, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
route: CommandFilter, route, root
# arping
arping: CommandFilter, arping, root

View File

@ -1,19 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# linuxbridge-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
brctl: CommandFilter, brctl, root
bridge: CommandFilter, bridge, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -1,12 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# nec_neutron_agent
ovs-vsctl: CommandFilter, ovs-vsctl, root

View File

@ -1,22 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# openvswitch-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
ovs-vsctl: CommandFilter, ovs-vsctl, root
ovs-ofctl: CommandFilter, ovs-ofctl, root
kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
ovsdb-client: CommandFilter, ovsdb-client, root
xe: CommandFilter, xe, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -1,21 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# ryu-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
# neutron/plugins/ryu/agent/ryu_neutron_agent.py:
# "ovs-vsctl", "--timeout=2", ...
ovs-vsctl: CommandFilter, ovs-vsctl, root
# neutron/plugins/ryu/agent/ryu_neutron_agent.py:
# "xe", "vif-param-get", ...
xe: CommandFilter, xe, root

View File

@ -1,13 +0,0 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
openswan: CommandFilter, ipsec, root

View File

@ -1,40 +0,0 @@
[radware]
#vdirect_address = 0.0.0.0
#ha_secondary_address=
#vdirect_user = vDirect
#vdirect_password = radware
#service_ha_pair = False
#service_throughput = 1000
#service_ssl_throughput = 200
#service_compression_throughput = 100
#service_cache = 20
#service_adc_type = VA
#service_adc_version=
#service_session_mirroring_enabled = False
#service_isl_vlan = -1
#service_resource_pool_ids = []
#actions_to_skip = 'setup_l2_l3'
#l4_action_name = 'BaseCreate'
#l2_l3_workflow_name = openstack_l2_l3
#l4_workflow_name = openstack_l4
#l2_l3_ctor_params = service: _REPLACE_, ha_network_name: HA-Network, ha_ip_pool_name: default, allocate_ha_vrrp: True, allocate_ha_ips: True
#l2_l3_setup_params = data_port: 1, data_ip_address: 192.168.200.99, data_ip_mask: 255.255.255.0, gateway: 192.168.200.1, ha_port: 2
[netscaler_driver]
#netscaler_ncc_uri = https://ncc_server.acme.org/ncc/v1/api
#netscaler_ncc_username = admin
#netscaler_ncc_password = secret
[heleoslb]
#esm_mgmt =
#admin_username =
#admin_password =
#lb_image =
#inband_id =
#oob_id =
#mgmt_id =
#dummy_utif_id =
#resource_pool_id =
#async_requests =
#lb_flavor = small
#sync_interval = 60

30
etc/tacker/api-paste.ini Normal file
View File

@ -0,0 +1,30 @@
[composite:tacker]
use = egg:Paste#urlmap
/: tackerversions
/v1.0: tackerapi_v1_0
[composite:tackerapi_v1_0]
use = call:tacker.auth:pipeline_factory
noauth = request_id catch_errors extensions tackerapiapp_v1_0
keystone = request_id catch_errors authtoken keystonecontext extensions tackerapiapp_v1_0
[filter:request_id]
paste.filter_factory = tacker.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:catch_errors]
paste.filter_factory = tacker.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory
[filter:keystonecontext]
paste.filter_factory = tacker.auth:TackerKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = tacker.api.extensions:extension_middleware_factory
[app:tackerversions]
paste.app_factory = tacker.api.versions:Versions.factory
[app:tackerapiapp_v1_0]
paste.app_factory = tacker.api.v1.router:APIRouter.factory

View File

@ -1,10 +1,10 @@
# Configuration for neutron-rootwrap
# Configuration for tacker-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap
filters_path=/etc/tacker/rootwrap.d,/usr/share/tacker/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')

View File

@ -0,0 +1,21 @@
# tacker-rootwrap command filters for nodes on which tacker is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# rpc proxy
servicevm_ns_rpc_proxy: CommandFilter, tacker-servicevm-ns-rpc-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
servicevm_ns_rpc_proxy_local: CommandFilter, /usr/local/bin/tacker-servicevm-ns-rpc-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, /usr/bin/python, -9
kill_metadata7: KillFilter, root, /usr/bin/python2.7, -9
kill_metadata6: KillFilter, root, /usr/bin/python2.6, -9

View File

@ -5,9 +5,9 @@
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Neutron state files. This directory must be writable by the
# Where to store Tacker state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/neutron
# state_path = /var/lib/tacker
# Where to store lock files
lock_path = $state_path/lock
@ -35,18 +35,18 @@ lock_path = $state_path/lock
# bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
# bind_port = 8888
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# The __path__ of tacker.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# (StrOpt) Tacker core plugin entrypoint to be loaded from the
# tacker.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the tacker source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
@ -54,8 +54,8 @@ lock_path = $state_path/lock
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# tacker.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the tacker source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
@ -94,7 +94,7 @@ lock_path = $state_path/lock
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# Attention: the following parameter MUST be set to False if Tacker is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
@ -103,7 +103,7 @@ lock_path = $state_path/lock
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
# rpc_backend = tacker.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64
# Size of RPC connection pool
@ -114,9 +114,9 @@ lock_path = $state_path/lock
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# allowed_rpc_exception_modules = tacker.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# control_exchange = tacker
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
@ -154,7 +154,7 @@ lock_path = $state_path/lock
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# rpc_backend=tacker.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
@ -176,7 +176,7 @@ lock_path = $state_path/lock
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# rpc_backend=tacker.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
@ -190,11 +190,11 @@ lock_path = $state_path/lock
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# notification_driver = tacker.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# notification_driver = tacker.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
notification_driver = tacker.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
# default_notification_level = INFO
@ -231,19 +231,19 @@ notification_driver = neutron.openstack.common.notifier.rpc_notifier
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# network_scheduler_driver = tacker.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# router_scheduler_driver = tacker.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# loadbalancer_pool_scheduler_driver = tacker.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# tacker server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# routers to first L3 agent which sends sync_routers message to tacker server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
@ -295,7 +295,7 @@ notification_driver = neutron.openstack.common.notifier.rpc_notifier
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# ======== tacker nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
@ -330,11 +330,11 @@ notification_driver = neutron.openstack.common.notifier.rpc_notifier
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
# ======== end of tacker nova interactions ==========
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# quota_driver = tacker.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
# quota_items = network,subnet,port
@ -386,7 +386,7 @@ notification_driver = neutron.openstack.common.notifier.rpc_notifier
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# Use "sudo tacker-rootwrap /etc/tacker/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
# root_helper = sudo
@ -409,9 +409,9 @@ admin_password = %SERVICE_PASSWORD%
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# connection = mysql://root:pass@127.0.0.1:3306/tacker
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# main tacker server. (Leave it as is if the database runs on this host.)
# connection = sqlite://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
@ -465,15 +465,59 @@ admin_password = %SERVICE_PASSWORD%
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
service_provider=LOADBALANCER:Haproxy:tacker.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
#service_provider=VPN:openswan:tacker.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# service_provider = LOADBALANCER:Radware:tacker.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# service_provider=LOADBALANCER:NetScaler:tacker.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# service_provider=VPN:cisco:tacker.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
# service_provider=LOADBALANCER:Embrane:tacker.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
# Uncomment the following line to make nova vm based lbaas driver
# service_provider = LOADBALANCER:HostingDevice:tacker.services.loadbalancer.drivers.hosting_device.plugin_driver.HostingDevicePluginDriver:default
[servicevm]
# Specify drivers for hosting device
# exmpale: device_driver = noop
# exmpale: device_driver = nova
device_driver = nova
# Specify drivers for mgmt
# exmpale: mgmt_driver = noop
mgmt_driver = noop
[servicevm_nova]
# parameters for novaclient to talk to nova
#project_id =
#auth_url =
#user_name =
#api_key =
#ca_file =
#insecure =
project_id = admin
auth_url = http://198.175.107.121:5000/v2.0
user_name = admin
api_key = admin-password
[servicevm_agent]
# VM agent requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = tacker.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True

View File

@ -1,14 +0,0 @@
[DEFAULT]
# VPN-Agent configuration file
# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also
[vpnagent]
# vpn device drivers which vpn agent will use
# If we want to use multiple drivers, we need to define this option multiple times.
# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver
# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver
# vpn_device_driver=another_driver
[ipsec]
# Status check interval
# ipsec_status_check_interval=60