octavia/etc/octavia.conf

338 lines
10 KiB
Plaintext
Raw Normal View History

[DEFAULT]
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# bind_host = 127.0.0.1
# bind_port = 9876
# api_handler = queue_producer
#
# How should authentication be handled (keystone, noauth)
# auth_strategy = noauth
#
# Plugin options are hot_plug_plugin (Hot-pluggable controller plugin)
#
# octavia_plugins = hot_plug_plugin
# Hostname to be used by the host machine for services running on it.
# The default value is the hostname of the host machine.
# host =
# AMQP Transport URL
# For Single Host, specify one full transport URL:
# transport_url = rabbit://<user>:<pass>@127.0.0.1:5672/<vhost>
# For HA, specify queue nodes in cluster, comma delimited:
# transport_url = rabbit://<user>:<pass>@server01,<user>:<pass>@server02/<vhost>
# transport_url =
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql+pymysql://root:pass@127.0.0.1:3306/octavia
# Replace 127.0.0.1 above with the IP address of the database used by the
# main octavia server. (Leave it as is if the database runs on this host.)
# connection = mysql+pymysql://
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
[health_manager]
# bind_ip = 127.0.0.1
# bind_port = 5555
# controller_ip_port_list example: 127.0.0.1:5555, 127.0.0.1:5555
# controller_ip_port_list =
# failover_threads = 10
# status_update_threads = 50
# heartbeat_interval = 10
# heartbeat_key =
# heartbeat_timeout = 60
# health_check_interval = 3
# sock_rlimit = 0
# EventStreamer options are
# queue_event_streamer,
# noop_event_streamer
# event_streamer_driver = noop_event_streamer
[keystone_authtoken]
# This group of config options are imported from keystone middleware. Thus the
# option names should match the names declared in the middleware.
# auth_uri = https://localhost:5000/v3
# admin_user = octavia
# admin_password = password
# admin_tenant_name = service
# insecure = False
# cafile =
[certificates]
# Certificate Generator options are local_cert_generator
# anchor_cert_generator
# cert_generator = local_cert_generator
# For local certificate signing (development only):
# ca_certificate = /etc/ssl/certs/ssl-cert-snakeoil.pem
# ca_private_key = /etc/ssl/private/ssl-cert-snakeoil.key
# ca_private_key_passphrase =
# signing_digest = sha256
# storage_path = /var/lib/octavia/certificates/
# For the TLS management
# Certificate Manager options are local_cert_manager
# barbican_cert_manager
# cert_manager = barbican_cert_manager
# For Barbican authentication (if using any Barbican based cert class)
# barbican_auth = barbican_acl_auth
#
# Region in Identity service catalog to use for communication with the Barbican service.
# region_name =
#
# Endpoint type to use for communication with the Barbican service.
# endpoint_type = publicURL
[anchor]
# Use OpenStack anchor to sign the amphora REST API certificates
# url = http://localhost:9999/v1/sign/default
# username =
# password =
[networking]
# Network to communicate with amphora
# lb_network_name =
# The maximum attempts to retry an action with the networking service.
# max_retries = 15
# Seconds to wait before retrying an action with the networking service.
# retry_interval = 1
# The maximum time to wait, in seconds, for a port to detach from an amphora
# port_detach_timeout = 300
[haproxy_amphora]
# base_path = /var/lib/octavia
# base_cert_dir = /var/lib/octavia/certs
# Absolute path to a custom HAProxy template file
# haproxy_template =
# connection_max_retries = 300
# connection_retry_interval = 5
# build_rate_limit = -1
# build_active_retries = 300
# build_retry_interval = 5
# This setting is deprecated. It is now automatically discovered.
# user_group =
# Maximum number of entries that can fit in the stick table.
# The size supports "k", "m", "g" suffixes.
# haproxy_stick_size = 10k
# REST Driver specific
# bind_host = 0.0.0.0
# bind_port = 9443
#
# This setting is only needed with IPv6 link-local addresses (fe80::/64) are
# used for communication between Octavia and its Amphora, if IPv4 or other IPv6
# addresses are used it can be ignored.
# lb_network_interface = o-hm0
#
# haproxy_cmd = /usr/sbin/haproxy
# respawn_count = 2
# respawn_interval = 2
# client_cert = /etc/octavia/certs/client.pem
# server_ca = /etc/octavia/certs/server_ca.pem
#
# This setting is deprecated. It is now automatically discovered.
# use_upstart = True
#
# rest_request_conn_timeout = 10
# rest_request_read_timeout = 60
[controller_worker]
# workers = 1
# amp_active_retries = 10
# amp_active_wait_sec = 10
# Glance parameters to extract image ID to use for amphora. Only one of
# parameters is needed. Using tags is the recommended way to refer to images.
# amp_image_id =
# amp_image_tag =
# Optional owner ID used to restrict glance images to one owner ID.
# This is a recommended security setting.
# amp_image_owner_id =
# Nova parameters to use when booting amphora
# amp_flavor_id =
# amp_ssh_key_name =
# amp_ssh_access_allowed = True
# Networks to attach to the Amphorae examples:
# - One primary network
# - - amp_boot_network_list = 22222222-3333-4444-5555-666666666666
# - Multiple networks
# - - amp_boot_network_list = 11111111-2222-33333-4444-555555555555, 22222222-3333-4444-5555-666666666666
# - All networks defined in the list will be attached to each amphora
# amp_boot_network_list =
# amp_secgroup_list =
# client_ca = /etc/octavia/certs/ca_01.pem
# Amphora driver options are amphora_noop_driver,
# amphora_haproxy_rest_driver
#
# amphora_driver = amphora_noop_driver
#
# Compute driver options are compute_noop_driver
# compute_nova_driver
#
# compute_driver = compute_noop_driver
#
# Network driver options are network_noop_driver
# allowed_address_pairs_driver
#
# network_driver = network_noop_driver
#
Amphora Flows and Drivers for Active Standby This patch implements the Active/Standby blueprint in https://blueprints.launchpad.net/octavia/+spec/activepassiveamphora The following points describe the main changes: 1. The patch introduces new flows and subflows to create M amphorae. The controller worker parses the loadbalancer_topology configuration. If the loadbalancer_topology value is ACTIVE_STANDBY, the controller invokes a new flow independent from the SINGLE topology case, which is left untouched. The new flow uses conditional taskflows to check for spare amphorae at runtime. This removes the need for the exception workaround we earlier had. The controller creates the amphorae in parallel using an unordered flow. A new database task alter an amphora role as either MASTER or BACKUP and assigns a VRRP priority to each amphora. After the amphorae are created, the controller invokes a separate flow for post amphora configuration including plug_vip methods, vrrp configuration upload, and keepalived service start. 2. The patch introduces new data models that include a new table for VRRP group configuration per loadbalancer, and update the amphora, loadbalancer, and listener tables to support the new active/standby capability. The VRRPGroup table hides authentication data, and makes future extensions of VRRP capabilities easy. 3. This patch updates the existing Haproxy configuration templates to include peer synchronization. In case of ACTIVE_STANDBY configuration, the jinja configuration renders the peer section in the Haproxy configuration and assigns short names to the amphorae as listener peers. As listeners implies different Haproxy process, each listener synchronizes on a different port evaluated as BASE_PORT (1024) + NUMBER_OF_LISTENERS accounting for ports in use. 4. This patch introduces a new Jinja configuration templater and a REST driver for Keepalived (developed as a Mixin). By default, Keepalived runs "all" check scripts found in a predefined directory. The keepalived driver is a Mixin that can be plugged in other services' drivers. It is the responsibility of these services drivers to introduce their own check scripts. In this patch a lightweight check script for Haproxy was introduced along with changes in the amphora agent installation script. 5. The VRRP requires enabling protocol 112 for Master/Backup advertisements, and enabling protocol 51 for authentication header. This patch enables these protocols as needed in the loadbalancer security group. Note: Updates to the failover flow to support active/standby will come in a dependent patch. Note: The amphora-agent is pinned to this patch in this patch set. This is required so the scenario tests will pass. It will be removed in a follow up patch. Co-Authored-By: Sherif Abdelwahab <sherif.abdelwahab@hp.com> Co-Authored-By: Michael Johnson <johnsomor@gmail.com> Implements: blueprint activepassiveamphora Depends-On: Ifdf20378b26cdd13e0a3ff87cec8990fe89c0661 Change-Id: Ic4e04594e114ba682088d68d5f1af3f8f376db83
2015-07-27 15:49:05 -07:00
# Load balancer topology options are SINGLE, ACTIVE_STANDBY
# loadbalancer_topology = SINGLE
# user_data_config_drive = False
[task_flow]
# engine = serial
# max_workers = 5
[oslo_messaging]
# Queue Consumer Thread Pool Size
# rpc_thread_pool_size = 2
# Topic (i.e. Queue) Name
# topic = octavia_prov
# Topic for octavia's events sent to a queue
# event_stream_topic = neutron_lbaas_event
[house_keeping]
# Interval in seconds to initiate spare amphora checks
# spare_check_interval = 30
# spare_amphora_pool_size = 0
# Cleanup interval for Deleted amphora
# cleanup_interval = 30
# Amphora expiry age in seconds. Default is 1 week
# amphora_expiry_age = 604800
# Load balancer expiry age in seconds. Default is 1 week
# load_balancer_expiry_age = 604800
[amphora_agent]
# agent_server_ca = /etc/octavia/certs/client_ca.pem
# agent_server_cert = /etc/octavia/certs/server.pem
# Defaults for agent_server_network_dir when not specified here are:
# Ubuntu: /etc/netns/amphora-haproxy/network/interfaces.d/
# Centos/fedora/rhel: /etc/netns/amphora-haproxy/sysconfig/network-scripts/
#
# agent_server_network_dir =
# agent_server_network_file =
# agent_request_read_timeout = 120
Amphora Flows and Drivers for Active Standby This patch implements the Active/Standby blueprint in https://blueprints.launchpad.net/octavia/+spec/activepassiveamphora The following points describe the main changes: 1. The patch introduces new flows and subflows to create M amphorae. The controller worker parses the loadbalancer_topology configuration. If the loadbalancer_topology value is ACTIVE_STANDBY, the controller invokes a new flow independent from the SINGLE topology case, which is left untouched. The new flow uses conditional taskflows to check for spare amphorae at runtime. This removes the need for the exception workaround we earlier had. The controller creates the amphorae in parallel using an unordered flow. A new database task alter an amphora role as either MASTER or BACKUP and assigns a VRRP priority to each amphora. After the amphorae are created, the controller invokes a separate flow for post amphora configuration including plug_vip methods, vrrp configuration upload, and keepalived service start. 2. The patch introduces new data models that include a new table for VRRP group configuration per loadbalancer, and update the amphora, loadbalancer, and listener tables to support the new active/standby capability. The VRRPGroup table hides authentication data, and makes future extensions of VRRP capabilities easy. 3. This patch updates the existing Haproxy configuration templates to include peer synchronization. In case of ACTIVE_STANDBY configuration, the jinja configuration renders the peer section in the Haproxy configuration and assigns short names to the amphorae as listener peers. As listeners implies different Haproxy process, each listener synchronizes on a different port evaluated as BASE_PORT (1024) + NUMBER_OF_LISTENERS accounting for ports in use. 4. This patch introduces a new Jinja configuration templater and a REST driver for Keepalived (developed as a Mixin). By default, Keepalived runs "all" check scripts found in a predefined directory. The keepalived driver is a Mixin that can be plugged in other services' drivers. It is the responsibility of these services drivers to introduce their own check scripts. In this patch a lightweight check script for Haproxy was introduced along with changes in the amphora agent installation script. 5. The VRRP requires enabling protocol 112 for Master/Backup advertisements, and enabling protocol 51 for authentication header. This patch enables these protocols as needed in the loadbalancer security group. Note: Updates to the failover flow to support active/standby will come in a dependent patch. Note: The amphora-agent is pinned to this patch in this patch set. This is required so the scenario tests will pass. It will be removed in a follow up patch. Co-Authored-By: Sherif Abdelwahab <sherif.abdelwahab@hp.com> Co-Authored-By: Michael Johnson <johnsomor@gmail.com> Implements: blueprint activepassiveamphora Depends-On: Ifdf20378b26cdd13e0a3ff87cec8990fe89c0661 Change-Id: Ic4e04594e114ba682088d68d5f1af3f8f376db83
2015-07-27 15:49:05 -07:00
[keepalived_vrrp]
# Amphora Role/Priority advertisement interval in seconds
# vrrp_advert_int = 1
# Service health check interval and success/fail count
# vrrp_check_interval = 5
# vrpp_fail_count = 2
# vrrp_success_count = 2
# Amphora MASTER gratuitous ARP refresh settings
# vrrp_garp_refresh_interval = 5
# vrrp_garp_refresh_count = 2
[service_auth]
# memcached_servers =
# signing_dir =
# cafile = /opt/stack/data/ca-bundle.pem
# project_domain_name = Default
# project_name = admin
# user_domain_name = Default
# password = password
# username = admin
# auth_type = password
# auth_url = http://localhost:5555/
[nova]
# The name of the nova service in the keystone catalog
# service_name =
# Custom nova endpoint if override is necessary
# endpoint =
# Region in Identity service catalog to use for communication with the
# OpenStack services.
# region_name =
# Endpoint type in Identity service catalog to use for communication with
# the OpenStack services.
# endpoint_type = publicURL
# CA certificates file to verify neutron connections when TLS is enabled
# ca_certificates_file =
# Disable certificate validation on SSL connections
# insecure = False
# If non-zero, generate a random name of the length provided for each amphora,
# in the format "a[A-Z0-9]*".
# Otherwise, the default name format will be used: "amphora-{UUID}".
# random_amphora_name_length = 0
#
# Availability zone to use for creating Amphorae
# availability_zone =
# Enable anti-affinity in nova
# enable_anti_affinity = False
# Set the anti-affinity policy to what is suitable.
# Nova supports: anti-affinity and soft-anti-affinity
# anti_affinity_policy = anti-affinity
[glance]
# The name of the glance service in the keystone catalog
# service_name =
# Custom glance endpoint if override is necessary
# endpoint =
# Region in Identity service catalog to use for communication with the
# OpenStack services.
# region_name =
# Endpoint type in Identity service catalog to use for communication with
# the OpenStack services.
# endpoint_type = publicURL
# CA certificates file to verify neutron connections when TLS is enabled
# insecure = False
# ca_certificates_file =
[neutron]
# The name of the neutron service in the keystone catalog
# service_name =
# Custom neutron endpoint if override is necessary
# endpoint =
# Region in Identity service catalog to use for communication with the
# OpenStack services.
# region_name =
# Endpoint type in Identity service catalog to use for communication with
# the OpenStack services.
# endpoint_type = publicURL
# CA certificates file to verify neutron connections when TLS is enabled
# insecure = False
# ca_certificates_file =
[quotas]
# default_load_balancer_quota = -1
# default_listener_quota = -1
# default_member_quota = -1
# default_pool_quota = -1
# default_health_monitor_quota = -1