603 lines
20 KiB
Plaintext
603 lines
20 KiB
Plaintext
[DEFAULT]
|
|
# Print more verbose output (set logging level to INFO instead of default WARNING level).
|
|
# verbose = False
|
|
|
|
# =========Start Global Config Option for Distributed L3 Router===============
|
|
# Setting the "router_distributed" flag to "True" will default to the creation
|
|
# of distributed tenant routers. The admin can override this flag by specifying
|
|
# the type of the router on the create request (admin-only attribute). Default
|
|
# value is "False" to support legacy mode (centralized) routers.
|
|
#
|
|
# router_distributed = False
|
|
#
|
|
# ===========End Global Config Option for Distributed L3 Router===============
|
|
|
|
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
|
|
# debug = False
|
|
|
|
# Where to store Neutron state files. This directory must be writable by the
|
|
# user executing the agent.
|
|
# state_path = /var/lib/neutron
|
|
|
|
# Where to store lock files
|
|
lock_path = $state_path/lock
|
|
|
|
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
|
|
# log_date_format = %Y-%m-%d %H:%M:%S
|
|
|
|
# use_syslog -> syslog
|
|
# log_file and log_dir -> log_dir/log_file
|
|
# (not log_file) and log_dir -> log_dir/{binary_name}.log
|
|
# use_stderr -> stderr
|
|
# (not user_stderr) and (not log_file) -> stdout
|
|
# publish_errors -> notification system
|
|
|
|
# use_syslog = False
|
|
# syslog_log_facility = LOG_USER
|
|
|
|
# use_stderr = True
|
|
# log_file =
|
|
# log_dir =
|
|
|
|
# publish_errors = False
|
|
|
|
# Address to bind the API server to
|
|
# bind_host = 0.0.0.0
|
|
|
|
# Port the bind the API server to
|
|
# bind_port = 9696
|
|
|
|
# Path to the extensions. Note that this can be a colon-separated list of
|
|
# paths. For example:
|
|
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
|
|
# The __path__ of neutron.extensions is appended to this, so if your
|
|
# extensions are in there you don't need to specify them here
|
|
# api_extensions_path =
|
|
|
|
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
|
|
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
|
|
# plugins included in the neutron source distribution. For compatibility with
|
|
# previous versions, the class name of a plugin can be specified instead of its
|
|
# entrypoint name.
|
|
#
|
|
# core_plugin =
|
|
# Example: core_plugin = ml2
|
|
|
|
# (ListOpt) List of service plugin entrypoints to be loaded from the
|
|
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
|
|
# the plugins included in the neutron source distribution. For compatibility
|
|
# with previous versions, the class name of a plugin can be specified instead
|
|
# of its entrypoint name.
|
|
#
|
|
# service_plugins =
|
|
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
|
|
|
|
# Paste configuration file
|
|
# api_paste_config = api-paste.ini
|
|
|
|
# The strategy to be used for auth.
|
|
# Supported values are 'keystone'(default), 'noauth'.
|
|
# auth_strategy = keystone
|
|
|
|
# Base MAC address. The first 3 octets will remain unchanged. If the
|
|
# 4h octet is not 00, it will also be used. The others will be
|
|
# randomly generated.
|
|
# 3 octet
|
|
# base_mac = fa:16:3e:00:00:00
|
|
# 4 octet
|
|
# base_mac = fa:16:3e:4f:00:00
|
|
|
|
# DVR Base MAC address. The first 3 octets will remain unchanged. If the
|
|
# 4th octet is not 00, it will also be used. The others will be randomly
|
|
# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
|
|
# avoid mixing them up with MAC's allocated for tenant ports.
|
|
# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
|
|
# The default is 3 octet
|
|
# dvr_base_mac = fa:16:3f:00:00:00
|
|
|
|
# Maximum amount of retries to generate a unique MAC address
|
|
# mac_generation_retries = 16
|
|
|
|
# DHCP Lease duration (in seconds). Use -1 to
|
|
# tell dnsmasq to use infinite lease times.
|
|
# dhcp_lease_duration = 86400
|
|
|
|
# Allow sending resource operation notification to DHCP agent
|
|
# dhcp_agent_notification = True
|
|
|
|
# Enable or disable bulk create/update/delete operations
|
|
# allow_bulk = True
|
|
# Enable or disable pagination
|
|
# allow_pagination = False
|
|
# Enable or disable sorting
|
|
# allow_sorting = False
|
|
# Enable or disable overlapping IPs for subnets
|
|
# Attention: the following parameter MUST be set to False if Neutron is
|
|
# being used in conjunction with nova security groups
|
|
# allow_overlapping_ips = False
|
|
# Ensure that configured gateway is on subnet
|
|
# force_gateway_on_subnet = False
|
|
|
|
# Default maximum number of items returned in a single response,
|
|
# value == infinite and value < 0 means no max limit, and value must
|
|
# be greater than 0. If the number of items requested is greater than
|
|
# pagination_max_limit, server will just return pagination_max_limit
|
|
# of number of items.
|
|
# pagination_max_limit = -1
|
|
|
|
# Maximum number of DNS nameservers per subnet
|
|
# max_dns_nameservers = 5
|
|
|
|
# Maximum number of host routes per subnet
|
|
# max_subnet_host_routes = 20
|
|
|
|
# Maximum number of fixed ips per port
|
|
# max_fixed_ips_per_port = 5
|
|
|
|
# Maximum number of routes per router
|
|
# max_routes = 30
|
|
|
|
# =========== items for agent management extension =============
|
|
# Seconds to regard the agent as down; should be at least twice
|
|
# report_interval, to be sure the agent is down for good
|
|
# agent_down_time = 75
|
|
# =========== end of items for agent management extension =====
|
|
|
|
# =========== items for agent scheduler extension =============
|
|
# Driver to use for scheduling network to DHCP agent
|
|
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
|
|
# Driver to use for scheduling router to a default L3 agent
|
|
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
|
|
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
|
|
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
|
|
|
|
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
|
|
# networks to first DHCP agent which sends get_active_networks message to
|
|
# neutron server
|
|
# network_auto_schedule = True
|
|
|
|
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
|
|
# routers to first L3 agent which sends sync_routers message to neutron server
|
|
# router_auto_schedule = True
|
|
|
|
# Number of DHCP agents scheduled to host a network. This enables redundant
|
|
# DHCP agents for configured networks.
|
|
# dhcp_agents_per_network = 1
|
|
|
|
# =========== end of items for agent scheduler extension =====
|
|
|
|
# =========== WSGI parameters related to the API server ==============
|
|
# Number of separate worker processes to spawn. The default, 0, runs the
|
|
# worker thread in the current process. Greater than 0 launches that number of
|
|
# child processes as workers. The parent process manages them.
|
|
# api_workers = 0
|
|
|
|
# Number of separate RPC worker processes to spawn. The default, 0, runs the
|
|
# worker thread in the current process. Greater than 0 launches that number of
|
|
# child processes as RPC workers. The parent process manages them.
|
|
# This feature is experimental until issues are addressed and testing has been
|
|
# enabled for various plugins for compatibility.
|
|
# rpc_workers = 0
|
|
|
|
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
|
|
# starting API server. Not supported on OS X.
|
|
# tcp_keepidle = 600
|
|
|
|
# Number of seconds to keep retrying to listen
|
|
# retry_until_window = 30
|
|
|
|
# Number of backlog requests to configure the socket with.
|
|
# backlog = 4096
|
|
|
|
# Max header line to accommodate large tokens
|
|
# max_header_line = 16384
|
|
|
|
# Enable SSL on the API server
|
|
# use_ssl = False
|
|
|
|
# Certificate file to use when starting API server securely
|
|
# ssl_cert_file = /path/to/certfile
|
|
|
|
# Private key file to use when starting API server securely
|
|
# ssl_key_file = /path/to/keyfile
|
|
|
|
# CA certificate file to use when starting API server securely to
|
|
# verify connecting clients. This is an optional parameter only required if
|
|
# API clients need to authenticate to the API server using SSL certificates
|
|
# signed by a trusted CA
|
|
# ssl_ca_file = /path/to/cafile
|
|
# ======== end of WSGI parameters related to the API server ==========
|
|
|
|
|
|
# ======== neutron nova interactions ==========
|
|
# Send notification to nova when port status is active.
|
|
# notify_nova_on_port_status_changes = True
|
|
|
|
# Send notifications to nova when port data (fixed_ips/floatingips) change
|
|
# so nova can update it's cache.
|
|
# notify_nova_on_port_data_changes = True
|
|
|
|
# URL for connection to nova (Only supports one nova region currently).
|
|
# nova_url = http://127.0.0.1:8774/v2
|
|
|
|
# Name of nova region to use. Useful if keystone manages more than one region
|
|
# nova_region_name =
|
|
|
|
# Username for connection to nova in admin context
|
|
# nova_admin_username =
|
|
|
|
# The uuid of the admin nova tenant
|
|
# nova_admin_tenant_id =
|
|
|
|
# Password for connection to nova in admin context.
|
|
# nova_admin_password =
|
|
|
|
# Authorization URL for connection to nova in admin context.
|
|
# nova_admin_auth_url =
|
|
|
|
# CA file for novaclient to verify server certificates
|
|
# nova_ca_certificates_file =
|
|
|
|
# Boolean to control ignoring SSL errors on the nova url
|
|
# nova_api_insecure = False
|
|
|
|
# Number of seconds between sending events to nova if there are any events to send
|
|
# send_events_interval = 2
|
|
|
|
# ======== end of neutron nova interactions ==========
|
|
|
|
#
|
|
# Options defined in oslo.messaging
|
|
#
|
|
|
|
# Use durable queues in amqp. (boolean value)
|
|
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
|
|
#amqp_durable_queues=false
|
|
|
|
# Auto-delete queues in amqp. (boolean value)
|
|
#amqp_auto_delete=false
|
|
|
|
# Size of RPC connection pool. (integer value)
|
|
#rpc_conn_pool_size=30
|
|
|
|
# Qpid broker hostname. (string value)
|
|
#qpid_hostname=localhost
|
|
|
|
# Qpid broker port. (integer value)
|
|
#qpid_port=5672
|
|
|
|
# Qpid HA cluster host:port pairs. (list value)
|
|
#qpid_hosts=$qpid_hostname:$qpid_port
|
|
|
|
# Username for Qpid connection. (string value)
|
|
#qpid_username=
|
|
|
|
# Password for Qpid connection. (string value)
|
|
#qpid_password=
|
|
|
|
# Space separated list of SASL mechanisms to use for auth.
|
|
# (string value)
|
|
#qpid_sasl_mechanisms=
|
|
|
|
# Seconds between connection keepalive heartbeats. (integer
|
|
# value)
|
|
#qpid_heartbeat=60
|
|
|
|
# Transport to use, either 'tcp' or 'ssl'. (string value)
|
|
#qpid_protocol=tcp
|
|
|
|
# Whether to disable the Nagle algorithm. (boolean value)
|
|
#qpid_tcp_nodelay=true
|
|
|
|
# The qpid topology version to use. Version 1 is what was
|
|
# originally used by impl_qpid. Version 2 includes some
|
|
# backwards-incompatible changes that allow broker federation
|
|
# to work. Users should update to version 2 when they are
|
|
# able to take everything down, as it requires a clean break.
|
|
# (integer value)
|
|
#qpid_topology_version=1
|
|
|
|
# SSL version to use (valid only if SSL enabled). valid values
|
|
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
|
|
# distributions. (string value)
|
|
#kombu_ssl_version=
|
|
|
|
# SSL key file (valid only if SSL enabled). (string value)
|
|
#kombu_ssl_keyfile=
|
|
|
|
# SSL cert file (valid only if SSL enabled). (string value)
|
|
#kombu_ssl_certfile=
|
|
|
|
# SSL certification authority file (valid only if SSL
|
|
# enabled). (string value)
|
|
#kombu_ssl_ca_certs=
|
|
|
|
# How long to wait before reconnecting in response to an AMQP
|
|
# consumer cancel notification. (floating point value)
|
|
#kombu_reconnect_delay=1.0
|
|
|
|
# The RabbitMQ broker address where a single node is used.
|
|
# (string value)
|
|
#rabbit_host=localhost
|
|
|
|
# The RabbitMQ broker port where a single node is used.
|
|
# (integer value)
|
|
#rabbit_port=5672
|
|
|
|
# RabbitMQ HA cluster host:port pairs. (list value)
|
|
#rabbit_hosts=$rabbit_host:$rabbit_port
|
|
|
|
# Connect over SSL for RabbitMQ. (boolean value)
|
|
#rabbit_use_ssl=false
|
|
|
|
# The RabbitMQ userid. (string value)
|
|
#rabbit_userid=guest
|
|
|
|
# The RabbitMQ password. (string value)
|
|
#rabbit_password=guest
|
|
|
|
# the RabbitMQ login method (string value)
|
|
#rabbit_login_method=AMQPLAIN
|
|
|
|
# The RabbitMQ virtual host. (string value)
|
|
#rabbit_virtual_host=/
|
|
|
|
# How frequently to retry connecting with RabbitMQ. (integer
|
|
# value)
|
|
#rabbit_retry_interval=1
|
|
|
|
# How long to backoff for between retries when connecting to
|
|
# RabbitMQ. (integer value)
|
|
#rabbit_retry_backoff=2
|
|
|
|
# Maximum number of RabbitMQ connection retries. Default is 0
|
|
# (infinite retry count). (integer value)
|
|
#rabbit_max_retries=0
|
|
|
|
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
|
|
# this option, you must wipe the RabbitMQ database. (boolean
|
|
# value)
|
|
#rabbit_ha_queues=false
|
|
|
|
# If passed, use a fake RabbitMQ provider. (boolean value)
|
|
#fake_rabbit=false
|
|
|
|
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
|
|
# interface, or IP. The "host" option should point or resolve
|
|
# to this address. (string value)
|
|
#rpc_zmq_bind_address=*
|
|
|
|
# MatchMaker driver. (string value)
|
|
#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
|
|
|
|
# ZeroMQ receiver listening port. (integer value)
|
|
#rpc_zmq_port=9501
|
|
|
|
# Number of ZeroMQ contexts, defaults to 1. (integer value)
|
|
#rpc_zmq_contexts=1
|
|
|
|
# Maximum number of ingress messages to locally buffer per
|
|
# topic. Default is unlimited. (integer value)
|
|
#rpc_zmq_topic_backlog=<None>
|
|
|
|
# Directory for holding IPC sockets. (string value)
|
|
#rpc_zmq_ipc_dir=/var/run/openstack
|
|
|
|
# Name of this node. Must be a valid hostname, FQDN, or IP
|
|
# address. Must match "host" option, if running Nova. (string
|
|
# value)
|
|
#rpc_zmq_host=oslo
|
|
|
|
# Seconds to wait before a cast expires (TTL). Only supported
|
|
# by impl_zmq. (integer value)
|
|
#rpc_cast_timeout=30
|
|
|
|
# Heartbeat frequency. (integer value)
|
|
#matchmaker_heartbeat_freq=300
|
|
|
|
# Heartbeat time-to-live. (integer value)
|
|
#matchmaker_heartbeat_ttl=600
|
|
|
|
# Size of RPC greenthread pool. (integer value)
|
|
#rpc_thread_pool_size=64
|
|
|
|
# Driver or drivers to handle sending notifications. (multi
|
|
# valued)
|
|
#notification_driver=
|
|
|
|
# AMQP topic used for OpenStack notifications. (list value)
|
|
# Deprecated group/name - [rpc_notifier2]/topics
|
|
#notification_topics=notifications
|
|
|
|
# Seconds to wait for a response from a call. (integer value)
|
|
#rpc_response_timeout=60
|
|
|
|
# A URL representing the messaging driver to use and its full
|
|
# configuration. If not set, we fall back to the rpc_backend
|
|
# option and driver specific configuration. (string value)
|
|
#transport_url=<None>
|
|
|
|
# The messaging driver to use, defaults to rabbit. Other
|
|
# drivers include qpid and zmq. (string value)
|
|
#rpc_backend=rabbit
|
|
|
|
# The default exchange under which topics are scoped. May be
|
|
# overridden by an exchange name specified in the
|
|
# transport_url option. (string value)
|
|
#control_exchange=openstack
|
|
|
|
|
|
[matchmaker_redis]
|
|
|
|
#
|
|
# Options defined in oslo.messaging
|
|
#
|
|
|
|
# Host to locate redis. (string value)
|
|
#host=127.0.0.1
|
|
|
|
# Use this port to connect to redis host. (integer value)
|
|
#port=6379
|
|
|
|
# Password for Redis server (optional). (string value)
|
|
#password=<None>
|
|
|
|
|
|
[matchmaker_ring]
|
|
|
|
#
|
|
# Options defined in oslo.messaging
|
|
#
|
|
|
|
# Matchmaker ring file (JSON). (string value)
|
|
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
|
|
#ringfile=/etc/oslo/matchmaker_ring.json
|
|
|
|
[quotas]
|
|
# Default driver to use for quota checks
|
|
# quota_driver = neutron.db.quota_db.DbQuotaDriver
|
|
|
|
# Resource name(s) that are supported in quota features
|
|
# quota_items = network,subnet,port
|
|
|
|
# Default number of resource allowed per tenant. A negative value means
|
|
# unlimited.
|
|
# default_quota = -1
|
|
|
|
# Number of networks allowed per tenant. A negative value means unlimited.
|
|
# quota_network = 10
|
|
|
|
# Number of subnets allowed per tenant. A negative value means unlimited.
|
|
# quota_subnet = 10
|
|
|
|
# Number of ports allowed per tenant. A negative value means unlimited.
|
|
# quota_port = 50
|
|
|
|
# Number of security groups allowed per tenant. A negative value means
|
|
# unlimited.
|
|
# quota_security_group = 10
|
|
|
|
# Number of security group rules allowed per tenant. A negative value means
|
|
# unlimited.
|
|
# quota_security_group_rule = 100
|
|
|
|
# Number of vips allowed per tenant. A negative value means unlimited.
|
|
# quota_vip = 10
|
|
|
|
# Number of pools allowed per tenant. A negative value means unlimited.
|
|
# quota_pool = 10
|
|
|
|
# Number of pool members allowed per tenant. A negative value means unlimited.
|
|
# The default is unlimited because a member is not a real resource consumer
|
|
# on Openstack. However, on back-end, a member is a resource consumer
|
|
# and that is the reason why quota is possible.
|
|
# quota_member = -1
|
|
|
|
# Number of health monitors allowed per tenant. A negative value means
|
|
# unlimited.
|
|
# The default is unlimited because a health monitor is not a real resource
|
|
# consumer on Openstack. However, on back-end, a member is a resource consumer
|
|
# and that is the reason why quota is possible.
|
|
# quota_health_monitor = -1
|
|
|
|
# Number of routers allowed per tenant. A negative value means unlimited.
|
|
# quota_router = 10
|
|
|
|
# Number of floating IPs allowed per tenant. A negative value means unlimited.
|
|
# quota_floatingip = 50
|
|
|
|
[agent]
|
|
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
|
|
# root filter facility.
|
|
# Change to "sudo" to skip the filtering and just run the comand directly
|
|
# root_helper = sudo
|
|
|
|
# =========== items for agent management extension =============
|
|
# seconds between nodes reporting state to server; should be less than
|
|
# agent_down_time, best if it is half or less than agent_down_time
|
|
# report_interval = 30
|
|
|
|
# =========== end of items for agent management extension =====
|
|
|
|
[keystone_authtoken]
|
|
auth_host = 127.0.0.1
|
|
auth_port = 35357
|
|
auth_protocol = http
|
|
admin_tenant_name = %SERVICE_TENANT_NAME%
|
|
admin_user = %SERVICE_USER%
|
|
admin_password = %SERVICE_PASSWORD%
|
|
|
|
[database]
|
|
# This line MUST be changed to actually run the plugin.
|
|
# Example:
|
|
# connection = mysql://root:pass@127.0.0.1:3306/neutron
|
|
# Replace 127.0.0.1 above with the IP address of the database used by the
|
|
# main neutron server. (Leave it as is if the database runs on this host.)
|
|
# connection = sqlite://
|
|
# NOTE: In deployment the [database] section and its connection attribute may
|
|
# be set in the corresponding core plugin '.ini' file. However, it is suggested
|
|
# to put the [database] section and its connection attribute in this
|
|
# configuration file.
|
|
|
|
# Database engine for which script will be generated when using offline
|
|
# migration
|
|
# engine =
|
|
|
|
# The SQLAlchemy connection string used to connect to the slave database
|
|
# slave_connection =
|
|
|
|
# Database reconnection retry times - in event connectivity is lost
|
|
# set to -1 implies an infinite retry count
|
|
# max_retries = 10
|
|
|
|
# Database reconnection interval in seconds - if the initial connection to the
|
|
# database fails
|
|
# retry_interval = 10
|
|
|
|
# Minimum number of SQL connections to keep open in a pool
|
|
# min_pool_size = 1
|
|
|
|
# Maximum number of SQL connections to keep open in a pool
|
|
# max_pool_size = 10
|
|
|
|
# Timeout in seconds before idle sql connections are reaped
|
|
# idle_timeout = 3600
|
|
|
|
# If set, use this value for max_overflow with sqlalchemy
|
|
# max_overflow = 20
|
|
|
|
# Verbosity of SQL debugging information. 0=None, 100=Everything
|
|
# connection_debug = 0
|
|
|
|
# Add python stack traces to SQL as comment strings
|
|
# connection_trace = False
|
|
|
|
# If set, use this value for pool_timeout with sqlalchemy
|
|
# pool_timeout = 10
|
|
|
|
[service_providers]
|
|
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
|
|
# Must be in form:
|
|
# service_provider=<service_type>:<name>:<driver>[:default]
|
|
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
|
|
# Combination of <service type> and <name> must be unique; <driver> must also be unique
|
|
# This is multiline option, example for default provider:
|
|
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
|
|
# example of non-default provider:
|
|
# service_provider=FIREWALL:name2:firewall_driver_path
|
|
# --- Reference implementations ---
|
|
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
|
|
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
|
|
# In order to activate Radware's lbaas driver you need to uncomment the next line.
|
|
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
|
|
# Otherwise comment the HA Proxy line
|
|
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
|
|
# uncomment the following line to make the 'netscaler' LBaaS provider available.
|
|
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
|
|
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
|
|
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
|
|
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
|
|
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
|
|
# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
|
|
# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default
|