[DEFAULT] # Print debugging output (set logging level to DEBUG instead of default WARNING level). # debug = False # Plugin options are hot_plug_plugin (Hot-pluggable controller plugin) # octavia_plugins = hot_plug_plugin # Hostname to be used by the host machine for services running on it. # The default value is the hostname of the host machine. # host = # AMQP Transport URL # For Single Host, specify one full transport URL: # transport_url = rabbit://:@127.0.0.1:5672/ # For HA, specify queue nodes in cluster, comma delimited: # transport_url = rabbit://:@server01,:@server02/ # transport_url = [api_settings] # bind_host = 127.0.0.1 # bind_port = 9876 # api_handler = queue_producer # How should authentication be handled (keystone, noauth) # auth_strategy = keystone # allow_pagination = True # allow_sorting = True # pagination_max_limit = 1000 # Base URI for the API for use in pagination links. # This will be autodetected from the request if not overridden here. # Example: # api_base_uri = http://localhost:9876 # api_base_uri = # Enable/disable exposing API endpoints. By default, both v1 and v2 are enabled. # api_v1_enabled = True # api_v2_enabled = True # Enable/disable ability for users to create TLS Terminated listeners # allow_tls_terminated_listeners = True [database] # This line MUST be changed to actually run the plugin. # Example: # connection = mysql+pymysql://root:pass@127.0.0.1:3306/octavia # Replace 127.0.0.1 above with the IP address of the database used by the # main octavia server. (Leave it as is if the database runs on this host.) # connection = mysql+pymysql:// # NOTE: In deployment the [database] section and its connection attribute may # be set in the corresponding core plugin '.ini' file. However, it is suggested # to put the [database] section and its connection attribute in this # configuration file. [health_manager] # bind_ip = 127.0.0.1 # bind_port = 5555 # controller_ip_port_list example: 127.0.0.1:5555, 127.0.0.1:5555 # controller_ip_port_list = # failover_threads = 10 # status_update_threads will default to the number of processors on the host # status_update_threads = # heartbeat_interval = 10 # heartbeat_key = # heartbeat_timeout = 60 # health_check_interval = 3 # sock_rlimit = 0 # Health/StatsUpdate options are # *_db # *_logger # health_update_driver = health_db # stats_update_driver = stats_db # EventStreamer options are # queue_event_streamer, # noop_event_streamer # event_streamer_driver = noop_event_streamer # Enable provisioning status sync with neutron db # sync_provisioning_status = False [keystone_authtoken] # This group of config options are imported from keystone middleware. Thus the # option names should match the names declared in the middleware. # The auth_uri is the public endpoint and is returned in headers on a 401 # auth_uri = https://localhost:5000/v3 # The auth_url is the admin endpoint actually used for validating tokens # auth_url = https://localhost:35357/v3 # username = octavia # password = password # project_name = service # Domain names must be set, these are *not* default but work for most clouds # project_domain_name = Default # user_domain_name = Default # insecure = False # cafile = [certificates] # Certificate Generator options are local_cert_generator # anchor_cert_generator # cert_generator = local_cert_generator # For local certificate signing (development only): # ca_certificate = /etc/ssl/certs/ssl-cert-snakeoil.pem # ca_private_key = /etc/ssl/private/ssl-cert-snakeoil.key # ca_private_key_passphrase = # signing_digest = sha256 # storage_path = /var/lib/octavia/certificates/ # For the TLS management # Certificate Manager options are local_cert_manager # barbican_cert_manager # castellan_cert_manager # cert_manager = barbican_cert_manager # For Barbican authentication (if using any Barbican based cert class) # barbican_auth = barbican_acl_auth # # Region in Identity service catalog to use for communication with the Barbican service. # region_name = # # Endpoint type to use for communication with the Barbican service. # endpoint_type = publicURL [anchor] # Use OpenStack anchor to sign the amphora REST API certificates # url = http://localhost:9999/v1/sign/default # username = # password = [networking] # The maximum attempts to retry an action with the networking service. # max_retries = 15 # Seconds to wait before retrying an action with the networking service. # retry_interval = 1 # The maximum time to wait, in seconds, for a port to detach from an amphora # port_detach_timeout = 300 # Allow/disallow specific network object types when creating VIPs. # allow_vip_network_id = True # allow_vip_subnet_id = True # allow_vip_port_id = True # List of network_ids that are valid for VIP creation. # If this field empty, no validation is performed. # valid_vip_networks = [haproxy_amphora] # base_path = /var/lib/octavia # base_cert_dir = /var/lib/octavia/certs # Absolute path to a custom HAProxy template file # haproxy_template = # connection_max_retries = 300 # connection_retry_interval = 5 # build_rate_limit = -1 # build_active_retries = 300 # build_retry_interval = 5 # This setting is deprecated. It is now automatically discovered. # user_group = # Maximum number of entries that can fit in the stick table. # The size supports "k", "m", "g" suffixes. # haproxy_stick_size = 10k # REST Driver specific # bind_host = 0.0.0.0 # bind_port = 9443 # # This setting is only needed with IPv6 link-local addresses (fe80::/64) are # used for communication between Octavia and its Amphora, if IPv4 or other IPv6 # addresses are used it can be ignored. # lb_network_interface = o-hm0 # # haproxy_cmd = /usr/sbin/haproxy # respawn_count = 2 # respawn_interval = 2 # client_cert = /etc/octavia/certs/client.pem # server_ca = /etc/octavia/certs/server_ca.pem # # This setting is deprecated. It is now automatically discovered. # use_upstart = True # # rest_request_conn_timeout = 10 # rest_request_read_timeout = 60 [controller_worker] # workers = 1 # amp_active_retries = 10 # amp_active_wait_sec = 10 # Glance parameters to extract image ID to use for amphora. Only one of # parameters is needed. Using tags is the recommended way to refer to images. # amp_image_id = # amp_image_tag = # Optional owner ID used to restrict glance images to one owner ID. # This is a recommended security setting. # amp_image_owner_id = # Nova parameters to use when booting amphora # amp_flavor_id = # Upload the ssh key as the service_auth user described elsewhere in this config. # Leaving this variable blank will install no ssh key on the amphora. # amp_ssh_key_name = # Networks to attach to the Amphorae examples: # - One primary network # - - amp_boot_network_list = 22222222-3333-4444-5555-666666666666 # - Multiple networks # - - amp_boot_network_list = 11111111-2222-33333-4444-555555555555, 22222222-3333-4444-5555-666666666666 # - All networks defined in the list will be attached to each amphora # amp_boot_network_list = # amp_secgroup_list = # client_ca = /etc/octavia/certs/ca_01.pem # Amphora driver options are amphora_noop_driver, # amphora_haproxy_rest_driver # # amphora_driver = amphora_noop_driver # # Compute driver options are compute_noop_driver # compute_nova_driver # # compute_driver = compute_noop_driver # # Network driver options are network_noop_driver # allowed_address_pairs_driver # # network_driver = network_noop_driver # # Distributor driver options are distributor_noop_driver # single_VIP_amphora # # distributor_driver = distributor_noop_driver # # Load balancer topology options are SINGLE, ACTIVE_STANDBY # loadbalancer_topology = SINGLE # user_data_config_drive = False [task_flow] # engine = serial # max_workers = 5 # # This setting prevents the controller worker from reverting taskflow flows. # This will leave resources in an inconsistent state and should only be used # for debugging purposes. # disable_revert = False [oslo_messaging] # Queue Consumer Thread Pool Size # rpc_thread_pool_size = 2 # Topic (i.e. Queue) Name # topic = octavia_prov # Topic for octavia's events sent to a queue # event_stream_topic = neutron_lbaas_event # Transport URL to use for the neutron-lbaas synchronization event stream # when neutron and octavia have separate queues. # For Single Host, specify one full transport URL: # event_stream_transport_url = rabbit://:@127.0.0.1:5672/ # For HA, specify queue nodes in cluster, comma delimited: # event_stream_transport_url = rabbit://:@server01,:@server02/ # event_stream_transport_url = [house_keeping] # Interval in seconds to initiate spare amphora checks # spare_check_interval = 30 # spare_amphora_pool_size = 0 # Cleanup interval for Deleted amphora # cleanup_interval = 30 # Amphora expiry age in seconds. Default is 1 week # amphora_expiry_age = 604800 # Load balancer expiry age in seconds. Default is 1 week # load_balancer_expiry_age = 604800 [amphora_agent] # agent_server_ca = /etc/octavia/certs/client_ca.pem # agent_server_cert = /etc/octavia/certs/server.pem # Defaults for agent_server_network_dir when not specified here are: # Ubuntu: /etc/netns/amphora-haproxy/network/interfaces.d/ # Centos/fedora/rhel: /etc/netns/amphora-haproxy/sysconfig/network-scripts/ # # agent_server_network_dir = # agent_server_network_file = # agent_request_read_timeout = 120 [keepalived_vrrp] # Amphora Role/Priority advertisement interval in seconds # vrrp_advert_int = 1 # Service health check interval and success/fail count # vrrp_check_interval = 5 # vrpp_fail_count = 2 # vrrp_success_count = 2 # Amphora MASTER gratuitous ARP refresh settings # vrrp_garp_refresh_interval = 5 # vrrp_garp_refresh_count = 2 [service_auth] # memcached_servers = # cafile = /opt/stack/data/ca-bundle.pem # project_domain_name = Default # project_name = admin # user_domain_name = Default # password = password # username = admin # auth_type = password # auth_url = http://localhost:5555/ [nova] # The name of the nova service in the keystone catalog # service_name = # Custom nova endpoint if override is necessary # endpoint = # Region in Identity service catalog to use for communication with the # OpenStack services. # region_name = # Endpoint type in Identity service catalog to use for communication with # the OpenStack services. # endpoint_type = publicURL # CA certificates file to verify neutron connections when TLS is enabled # ca_certificates_file = # Disable certificate validation on SSL connections # insecure = False # If non-zero, generate a random name of the length provided for each amphora, # in the format "a[A-Z0-9]*". # Otherwise, the default name format will be used: "amphora-{UUID}". # random_amphora_name_length = 0 # # Availability zone to use for creating Amphorae # availability_zone = # Enable anti-affinity in nova # enable_anti_affinity = False # Set the anti-affinity policy to what is suitable. # Nova supports: anti-affinity and soft-anti-affinity # anti_affinity_policy = anti-affinity [glance] # The name of the glance service in the keystone catalog # service_name = # Custom glance endpoint if override is necessary # endpoint = # Region in Identity service catalog to use for communication with the # OpenStack services. # region_name = # Endpoint type in Identity service catalog to use for communication with # the OpenStack services. # endpoint_type = publicURL # CA certificates file to verify neutron connections when TLS is enabled # insecure = False # ca_certificates_file = [neutron] # The name of the neutron service in the keystone catalog # service_name = # Custom neutron endpoint if override is necessary # endpoint = # Region in Identity service catalog to use for communication with the # OpenStack services. # region_name = # Endpoint type in Identity service catalog to use for communication with # the OpenStack services. # endpoint_type = publicURL # CA certificates file to verify neutron connections when TLS is enabled # insecure = False # ca_certificates_file = [quotas] # default_load_balancer_quota = -1 # default_listener_quota = -1 # default_member_quota = -1 # default_pool_quota = -1 # default_health_monitor_quota = -1