7d904743ef
This patch aims to migrate service from usage of regular syslog files to journald. We also disable uwsgi logging, since it dublicates requests that are logged by service itself. Change-Id: If1eda4d803661a0b924941aecd1867302391a5f4
601 lines
23 KiB
YAML
601 lines
23 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
|
|
# Enable/Disable barbican configurations
|
|
nova_barbican_enabled: False
|
|
# Enable/Disable designate configurations
|
|
nova_designate_enabled: False
|
|
# Notification topics for designate.
|
|
nova_notifications_designate: notifications_designate
|
|
# Enable/Disable ceilometer configurations
|
|
nova_ceilometer_enabled: False
|
|
# Enable/Disable nova versioned notification
|
|
nova_versioned_notification_enabled: False
|
|
|
|
## Verbosity Options
|
|
debug: False
|
|
|
|
# Set the host which will execute the shade modules
|
|
# for the service setup. The host must already have
|
|
# clouds.yaml properly configured.
|
|
nova_service_setup_host: "{{ openstack_service_setup_host | default('localhost') }}"
|
|
nova_service_setup_host_python_interpreter: "{{ openstack_service_setup_host_python_interpreter | default((nova_service_setup_host == 'localhost') | ternary(ansible_playbook_python, ansible_python['executable'])) }}"
|
|
|
|
# Set the host which will run compute initialization tasks such as checking
|
|
# for a compute node to be up and running cell discovery.
|
|
nova_conductor_setup_host: "{{ groups[nova_services['nova-conductor']['group']][0] }}"
|
|
|
|
# Set the package install state for distribution and pip packages
|
|
# Options are 'present' and 'latest'
|
|
nova_package_state: "latest"
|
|
nova_pip_package_state: "latest"
|
|
|
|
# Set installation method.
|
|
nova_install_method: "source"
|
|
|
|
nova_git_repo: https://git.openstack.org/openstack/nova
|
|
nova_git_install_branch: master
|
|
|
|
nova_lxd_git_repo: https://git.openstack.org/openstack/nova-lxd
|
|
nova_lxd_git_install_branch: master
|
|
|
|
nova_upper_constraints_url: "{{ requirements_git_url | default('https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=' ~ requirements_git_install_branch | default('master')) }}"
|
|
nova_git_constraints:
|
|
- "git+{{ nova_git_repo }}@{{ nova_git_install_branch }}#egg=nova"
|
|
- "git+{{ nova_lxd_git_repo }}@{{ nova_lxd_git_install_branch }}#egg=nova-lxd"
|
|
- "--constraint {{ nova_upper_constraints_url }}"
|
|
nova_pip_install_args: "{{ pip_install_options | default('') }}"
|
|
|
|
# Name of the virtual env to deploy into
|
|
nova_venv_tag: "{{ venv_tag | default('untagged') }}"
|
|
nova_bin: "{{ _nova_bin }}"
|
|
|
|
nova_fatal_deprecations: False
|
|
|
|
## Nova user information
|
|
nova_system_user_name: nova
|
|
nova_system_group_name: nova
|
|
nova_system_shell: /bin/bash
|
|
nova_system_comment: nova system user
|
|
nova_system_home_folder: "/var/lib/{{ nova_system_user_name }}"
|
|
nova_libvirt_save_path: "{{ nova_system_home_folder }}/save"
|
|
|
|
nova_lock_path: "/var/lock/nova"
|
|
|
|
## Manually specified nova UID/GID
|
|
# Deployers can specify a UID for the nova user as well as the GID for the
|
|
# nova group if needed. This is commonly used in environments where shared
|
|
# storage is used, such as NFS or GlusterFS, and nova UID/GID values must be
|
|
# in sync between multiple servers.
|
|
#
|
|
# WARNING: Changing these values on an existing deployment can lead to
|
|
# failures, errors, and instability.
|
|
#
|
|
# nova_system_user_uid = <UID>
|
|
# nova_system_group_gid = <GID>
|
|
|
|
## Database info
|
|
nova_db_setup_host: "{{ ('galera_all' in groups) | ternary(groups['galera_all'][0], 'localhost') }}"
|
|
nova_galera_address: "{{ galera_address | default('127.0.0.1') }}"
|
|
nova_galera_user: nova
|
|
nova_galera_database: nova
|
|
nova_db_max_overflow: 10
|
|
nova_db_max_pool_size: 120
|
|
nova_db_pool_timeout: 30
|
|
# Toggle whether nova connects via an encrypted connection
|
|
nova_galera_use_ssl: "{{ galera_use_ssl | default(False) }}"
|
|
# The path where to store the database server CA certificate
|
|
nova_galera_ssl_ca_cert: "{{ galera_ssl_ca_cert | default('/etc/ssl/certs/galera-ca.pem') }}"
|
|
|
|
## DB API
|
|
nova_api_galera_address: "{{ nova_galera_address }}"
|
|
nova_api_galera_user: nova_api
|
|
nova_api_galera_database: nova_api
|
|
nova_api_db_max_overflow: 10
|
|
nova_api_db_max_pool_size: 120
|
|
nova_api_db_pool_timeout: 30
|
|
|
|
## DB Cells
|
|
nova_cell0_database: "nova_cell0"
|
|
nova_cell1_name: "cell1"
|
|
|
|
## Oslo Messaging
|
|
|
|
# RPC
|
|
nova_oslomsg_rpc_host_group: "{{ oslomsg_rpc_host_group | default('rabbitmq_all') }}"
|
|
nova_oslomsg_rpc_setup_host: "{{ (nova_oslomsg_rpc_host_group in groups) | ternary(groups[nova_oslomsg_rpc_host_group][0], 'localhost') }}"
|
|
nova_oslomsg_rpc_transport: "{{ oslomsg_rpc_transport | default('rabbit') }}"
|
|
nova_oslomsg_rpc_servers: "{{ oslomsg_rpc_servers | default('127.0.0.1') }}"
|
|
nova_oslomsg_rpc_port: "{{ oslomsg_rpc_port | default('5672') }}"
|
|
nova_oslomsg_rpc_use_ssl: "{{ oslomsg_rpc_use_ssl | default(False) }}"
|
|
nova_oslomsg_rpc_userid: nova
|
|
nova_oslomsg_rpc_vhost: /nova
|
|
|
|
# Notify
|
|
nova_oslomsg_notify_host_group: "{{ oslomsg_notify_host_group | default('rabbitmq_all') }}"
|
|
nova_oslomsg_notify_setup_host: "{{ (nova_oslomsg_notify_host_group in groups) | ternary(groups[nova_oslomsg_notify_host_group][0], 'localhost') }}"
|
|
nova_oslomsg_notify_transport: "{{ oslomsg_notify_transport | default('rabbit') }}"
|
|
nova_oslomsg_notify_servers: "{{ oslomsg_notify_servers | default('127.0.0.1') }}"
|
|
nova_oslomsg_notify_port: "{{ oslomsg_notify_port | default('5672') }}"
|
|
nova_oslomsg_notify_use_ssl: "{{ oslomsg_notify_use_ssl | default(False) }}"
|
|
nova_oslomsg_notify_userid: "{{ nova_oslomsg_rpc_userid }}"
|
|
nova_oslomsg_notify_password: "{{ nova_oslomsg_rpc_password }}"
|
|
nova_oslomsg_notify_vhost: "{{ nova_oslomsg_rpc_vhost }}"
|
|
|
|
## Qdrouterd info
|
|
# TODO(ansmith): Change structure when more backends will be supported
|
|
nova_oslomsg_amqp1_enabled: "{{ nova_oslomsg_rpc_transport == 'amqp' }}"
|
|
|
|
## RabbitMQ info
|
|
|
|
## Configuration for RPC communications
|
|
nova_rpc_thread_pool_size: 64
|
|
nova_rpc_conn_pool_size: 30
|
|
nova_rpc_response_timeout: 60
|
|
|
|
## Nova virtualization Types
|
|
# The nova_virt_types dictionary contains global overrides used for
|
|
# specific compute types. Every variable inside of this dictionary
|
|
# will become an ansible fact. This gives the user the option to set
|
|
# or customize things based on their needs without having to redefine
|
|
# this entire data structure. Every supported compute type will be
|
|
# have its specific variable requirements set under its short name.
|
|
nova_virt_types:
|
|
ironic:
|
|
nova_compute_driver: ironic.IronicDriver
|
|
nova_reserved_host_memory_mb: 0
|
|
nova_scheduler_tracks_instance_changes: False
|
|
kvm:
|
|
nova_compute_driver: libvirt.LibvirtDriver
|
|
nova_reserved_host_memory_mb: 2048
|
|
nova_scheduler_tracks_instance_changes: True
|
|
lxd:
|
|
nova_compute_driver: lxd.LXDDriver
|
|
nova_reserved_host_memory_mb: 2048
|
|
nova_scheduler_tracks_instance_changes: True
|
|
qemu:
|
|
nova_compute_driver: libvirt.LibvirtDriver
|
|
nova_reserved_host_memory_mb: 2048
|
|
nova_scheduler_tracks_instance_changes: True
|
|
nova_cpu_mode: "none"
|
|
|
|
|
|
# If this is not set, then the playbook will try to guess it.
|
|
#nova_virt_type: kvm
|
|
|
|
# Enable Kernel Shared Memory (KSM)
|
|
nova_compute_ksm_enabled: False
|
|
|
|
#if set, nova_virt_type must be one of these:
|
|
nova_supported_virt_types:
|
|
- qemu
|
|
- kvm
|
|
- lxd
|
|
- ironic
|
|
|
|
## Nova Auth
|
|
nova_service_region: RegionOne
|
|
nova_service_project_name: "service"
|
|
nova_service_project_domain_id: default
|
|
nova_service_user_domain_id: default
|
|
nova_service_user_name: "nova"
|
|
nova_service_role_name: "admin"
|
|
|
|
## Keystone authentication middleware
|
|
nova_keystone_auth_plugin: password
|
|
|
|
## Nova enabled apis
|
|
nova_enabled_apis: "osapi_compute,metadata"
|
|
|
|
## Nova v2.1
|
|
nova_service_name: nova
|
|
nova_service_type: compute
|
|
nova_service_proto: http
|
|
nova_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(nova_service_proto) }}"
|
|
nova_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(nova_service_proto) }}"
|
|
nova_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(nova_service_proto) }}"
|
|
nova_service_bind_address: 0.0.0.0
|
|
nova_service_port: 8774
|
|
nova_service_description: "Nova Compute Service"
|
|
nova_service_publicuri: "{{ nova_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ nova_service_port }}"
|
|
nova_service_publicurl: "{{ nova_service_publicuri }}/v2.1"
|
|
nova_service_adminuri: "{{ nova_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ nova_service_port }}"
|
|
nova_service_adminurl: "{{ nova_service_adminuri }}/v2.1"
|
|
nova_service_internaluri: "{{ nova_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ nova_service_port }}"
|
|
nova_service_internalurl: "{{ nova_service_internaluri }}/v2.1"
|
|
|
|
## Nova cinder
|
|
nova_cross_az_attach: True
|
|
|
|
## Nova spice
|
|
nova_spice_html5proxy_base_proto: "{{ openstack_service_publicuri_proto | default('http') }}"
|
|
nova_spice_html5proxy_base_port: 6082
|
|
nova_spice_html5proxy_base_uri: "{{ nova_spice_html5proxy_base_proto }}://{{ external_lb_vip_address }}:{{ nova_spice_html5proxy_base_port }}"
|
|
nova_spice_html5proxy_base_url: "{{ nova_spice_html5proxy_base_uri }}/spice_auto.html"
|
|
nova_spice_console_agent_enabled: "{{ ansible_architecture != 'aarch64' }}"
|
|
nova_spicehtml5_git_repo: https://gitlab.freedesktop.org/spice/spice-html5.git
|
|
nova_spicehtml5_git_install_branch: master
|
|
|
|
## Nova novnc
|
|
nova_novncproxy_proto: "{{ openstack_service_publicuri_proto | default('http') }}"
|
|
nova_novncproxy_port: 6080
|
|
nova_novncproxy_base_uri: "{{ nova_novncproxy_proto }}://{{ external_lb_vip_address }}:{{ nova_novncproxy_port }}"
|
|
nova_novncproxy_base_url: "{{ nova_novncproxy_base_uri }}/vnc_lite.html"
|
|
nova_novncproxy_vncserver_proxyclient_address: "{{ ansible_host }}"
|
|
nova_novncproxy_vncserver_listen: "{{ ansible_host }}"
|
|
nova_novncproxy_agent_enabled: True
|
|
nova_novncproxy_git_repo: https://github.com/novnc/noVNC
|
|
nova_novncproxy_git_install_branch: master
|
|
|
|
## Nova serialconsole
|
|
nova_serialconsoleproxy_proto: "ws"
|
|
nova_serialconsoleproxy_port: 6083
|
|
nova_serialconsoleproxy_port_range: 10000:20000
|
|
nova_serialconsoleproxy_base_uri: "{{ nova_serialconsoleproxy_proto }}://{{ external_lb_vip_address }}:{{ nova_serialconsoleproxy_port }}"
|
|
nova_serialconsoleproxy_base_url: "{{ nova_serialconsoleproxy_base_uri }}"
|
|
nova_serialconsoleproxy_serialconsole_proxyserver_proxyclient_address: "{{ ansible_host }}"
|
|
nova_serialconsoleproxy_enabled: True
|
|
|
|
## Nova metadata
|
|
nova_metadata_proxy_enabled: "{{ nova_network_services[nova_network_type]['metadata_proxy_enabled'] | bool }}"
|
|
nova_metadata_bind_address: 0.0.0.0
|
|
nova_metadata_port: 8775
|
|
|
|
## Nova compute
|
|
nova_enable_instance_password: True
|
|
nova_force_config_drive: False
|
|
nova_nested_virt_enabled: False
|
|
|
|
# Uwsgi settings
|
|
nova_wsgi_processes_max: 16
|
|
nova_wsgi_processes: "{{ [[ansible_processor_vcpus|default(1), 1] | max * 2, nova_wsgi_processes_max] | min }}"
|
|
nova_wsgi_buffer_size: 65535
|
|
nova_wsgi_threads: 1
|
|
|
|
## Nova libvirt
|
|
# Warning: If nova_libvirt_inject_key or nova_libvirt_inject_password are enabled for Ubuntu compute hosts
|
|
# then the kernel will be made readable to nova user (a requirement for libguestfs).
|
|
# See Launchpad bugs 1507915 (Nova), 759725 (Ubuntu), and http://libguestfs.org/guestfs-faq.1.html
|
|
nova_libvirt_inject_key: False
|
|
# inject partition options:
|
|
# -2 => disable, -1 => inspect (libguestfs only), 0 => not partitioned, >0 => partition number
|
|
nova_libvirt_inject_partition: -2
|
|
nova_libvirt_inject_password: False
|
|
nova_libvirt_disk_cachemodes: '{{ nova_libvirt_images_rbd_pool is defined | ternary("network=writeback", "") }}'
|
|
nova_libvirt_hw_disk_discard: '{{ nova_libvirt_images_rbd_pool is defined | ternary("unmap", "ignore") }}'
|
|
|
|
## Nova console
|
|
nova_console_agent_enabled: True
|
|
# Set the console type. Presently the only options are ["spice", "novnc", "serialconsole"].
|
|
nova_console_type: "{{ (ansible_architecture == 'aarch64') | ternary('serialconsole', 'novnc') }}"
|
|
|
|
# Nova console ssl info, presently only used by novnc console type
|
|
nova_console_ssl_dir: "/etc/nova/ssl"
|
|
nova_console_ssl_cert: "{{ nova_console_ssl_dir }}/nova-console.pem"
|
|
nova_console_ssl_key: "{{ nova_console_ssl_dir }}/nova-console.key"
|
|
|
|
# Set to true when terminating SSL/TLS at a load balancer
|
|
nova_external_ssl: false
|
|
|
|
# External SSL forwarding proto
|
|
nova_secure_proxy_ssl_header: HTTP_X_FORWARDED_PROTO
|
|
|
|
## Nova global config
|
|
nova_max_age: 0
|
|
nova_remove_unused_resized_minimum_age_seconds: 3600
|
|
nova_image_cache_manager_interval: 0
|
|
nova_resume_guests_state_on_host_boot: False
|
|
# nova_default_schedule_zone: nova
|
|
|
|
# Comma separated list of Glance API servers
|
|
nova_glance_api_servers: "{{ (glance_service_internalurl | default('http://localhost')) | urlsplit('scheme') ~ '://' ~ (glance_service_internalurl | default('http://localhost')) | urlsplit('netloc') }}"
|
|
|
|
nova_network_type: linuxbridge
|
|
|
|
nova_network_services:
|
|
linuxbridge:
|
|
use_forwarded_for: False
|
|
metadata_proxy_enabled: True
|
|
nuage:
|
|
use_forwarded_for: True
|
|
metadata_proxy_enabled: True
|
|
ovs_bridge: alubr0
|
|
calico:
|
|
use_forwarded_for: True
|
|
metadata_proxy_enabled: False
|
|
|
|
# Nova quota
|
|
nova_quota_cores: 20
|
|
nova_quota_injected_file_content_bytes: 10240
|
|
nova_quota_injected_file_path_length: 255
|
|
nova_quota_injected_files: 5
|
|
nova_quota_instances: 10
|
|
nova_quota_key_pairs: 100
|
|
nova_quota_metadata_items: 128
|
|
nova_quota_ram: 51200
|
|
nova_quota_server_group_members: 10
|
|
nova_quota_server_groups: 10
|
|
|
|
# Nova Scheduler
|
|
nova_cpu_allocation_ratio: 2.0
|
|
nova_disk_allocation_ratio: 1.0
|
|
nova_max_instances_per_host: 50
|
|
nova_max_io_ops_per_host: 10
|
|
nova_ram_allocation_ratio: 1.0
|
|
nova_ram_weight_multiplier: 5.0
|
|
nova_reserved_host_disk_mb: 2048
|
|
|
|
nova_scheduler_driver_task_period: 60
|
|
nova_scheduler_host_subset_size: 10
|
|
nova_scheduler_max_attempts: 5
|
|
nova_scheduler_default_filters: >-
|
|
RetryFilter,
|
|
AvailabilityZoneFilter,
|
|
AggregateRamFilter,
|
|
ComputeFilter,
|
|
AggregateCoreFilter,
|
|
AggregateDiskFilter,
|
|
AggregateNumInstancesFilter,
|
|
AggregateIoOpsFilter,
|
|
ComputeCapabilitiesFilter,
|
|
ImagePropertiesFilter,
|
|
ServerGroupAntiAffinityFilter,
|
|
ServerGroupAffinityFilter,
|
|
NUMATopologyFilter
|
|
nova_scheduler_driver: filter_scheduler
|
|
nova_scheduler_available_filters: nova.scheduler.filters.all_filters
|
|
nova_scheduler_weight_classes: nova.scheduler.weights.all_weighers
|
|
|
|
# This should be tuned depending on the number of compute hosts present in the
|
|
# deployment. Large clouds may wish to disable automatic discovery by setting
|
|
# this value to -1.
|
|
nova_discover_hosts_in_cells_interval: "{{ 300 if groups['nova_compute'] | length > 10 else 60 }}"
|
|
|
|
# If you want to regenerate the nova users SSH keys, on each run, set this var to True
|
|
# Otherwise keys will be generated on the first run and not regenerated each run.
|
|
nova_recreate_keys: False
|
|
|
|
# Define nfs information to enable nfs shares as mounted directories for
|
|
# nova. The ``nova_nfs_client`` value is a list of dictionaries that must
|
|
# be filled out completely to enable the persistent NFS mounts.
|
|
#
|
|
# Example of the expected dict structure:
|
|
# nova_nfs_client:
|
|
# - server: "127.0.0.1" ## Hostname or IP address of NFS Server
|
|
# remote_path: "/instances" ## Remote path from the NFS server's export
|
|
# local_path: "/var/lib/nova/instances" ## Local path on machine
|
|
# type: "nfs" ## This can be nfs or nfs4
|
|
# options: "_netdev,auto" ## Mount options
|
|
# config_overrides: "{}" ## Override dictionary for unit file
|
|
nova_nfs_client: []
|
|
|
|
# Nova Ceph rbd
|
|
# Enble and define nova_libvirt_images_rbd_pool to use rbd as nova backend
|
|
#nova_libvirt_images_rbd_pool: vms
|
|
nova_ceph_client: '{{ cinder_ceph_client }}'
|
|
# TODO(odyssey4me) - the uuid should be removed, there should be no defaults for secrets
|
|
nova_ceph_client_uuid: 517a4663-3927-44bc-9ea7-4a90e1cd4c66
|
|
|
|
# Enabled upstream if RBD is in use on cinder backends, which requires that
|
|
# ceph be deployed on the nova compute hosts to enable volume backed instances.
|
|
nova_cinder_rbd_inuse: False
|
|
|
|
## General Nova configuration
|
|
# If ``nova_osapi_compute_workers`` is unset the system will use half the number of available VCPUS to
|
|
# compute the number of api workers to use.
|
|
# nova_osapi_compute_workers: 16
|
|
|
|
# If ``nova_conductor_workers`` is unset the system will use half the number of available VCPUS to
|
|
# compute the number of api workers to use.
|
|
# nova_conductor_workers: 16
|
|
|
|
# If ``nova_metadata_workers`` is unset the system will use half the number of available VCPUS to
|
|
# compute the number of api workers to use.
|
|
# nova_metadata_workers: 16
|
|
|
|
# If ``nova_scheduler_workers`` is unset the system will use half the number of available VCPUS to
|
|
# compute the number of api workers to use.
|
|
# nova_scheduler_workers: 16
|
|
|
|
## Cap the maximun number of threads / workers when a user value is unspecified.
|
|
nova_api_threads_max: 16
|
|
nova_api_threads: "{{ [[ansible_processor_vcpus|default(2) // 2, 1] | max, nova_api_threads_max] | min }}"
|
|
|
|
## Policy vars
|
|
# Provide a list of access controls to update the default policy.json with. These changes will be merged
|
|
# with the access controls in the default policy.json. E.g.
|
|
#nova_policy_overrides:
|
|
# "compute:create": ""
|
|
# "compute:create:attach_network": ""
|
|
|
|
nova_service_in_ldap: false
|
|
|
|
## libvirtd config options
|
|
nova_libvirtd_listen_tls: 1
|
|
nova_libvirtd_listen_tcp: 0
|
|
nova_libvirtd_auth_tcp: sasl
|
|
nova_libvirtd_debug_log_filters: "3:remote 4:event 3:json 3:rpc"
|
|
|
|
nova_api_metadata_init_overrides: {}
|
|
nova_api_os_compute_init_overrides: {}
|
|
nova_compute_init_overrides: {}
|
|
nova_conductor_init_overrides: {}
|
|
nova_novncproxy_init_overrides: {}
|
|
nova_scheduler_init_overrides: {}
|
|
nova_spicehtml5proxy_init_overrides: {}
|
|
nova_serialproxy_init_overrides: {}
|
|
|
|
## Service Name-Group Mapping
|
|
nova_services:
|
|
nova-api-metadata:
|
|
group: nova_api_metadata
|
|
service_name: nova-api-metadata
|
|
init_config_overrides: "{{ nova_api_metadata_init_overrides }}"
|
|
start_order: 5
|
|
execstarts: "{{ nova_uwsgi_bin }}/uwsgi --autoload --ini /etc/uwsgi/nova-api-metadata.ini"
|
|
execreloads: "{{ nova_uwsgi_bin }}/uwsgi --reload /var/run/nova-api-metadata/nova-api-metadata.pid"
|
|
wsgi_app: True
|
|
wsgi_overrides: "{{ nova_api_metadata_uwsgi_ini_overrides }}"
|
|
uwsgi_bind_address: "{{ nova_metadata_bind_address }}"
|
|
uwsgi_port: "{{ nova_metadata_port }}"
|
|
wsgi_name: nova-metadata-wsgi
|
|
nova-api-os-compute:
|
|
group: nova_api_os_compute
|
|
service_name: nova-api-os-compute
|
|
init_config_overrides: "{{ {'Install': {'Alias': 'nova-api.service'}} | combine(nova_api_os_compute_init_overrides) }}"
|
|
start_order: 4
|
|
execstarts: "{{ nova_uwsgi_bin }}/uwsgi --autoload --ini /etc/uwsgi/nova-api-os-compute.ini"
|
|
execreloads: "{{ nova_uwsgi_bin }}/uwsgi --reload /var/run/nova-api-os-compute/nova-api-os-compute.pid"
|
|
wsgi_app: True
|
|
wsgi_overrides: "{{ nova_api_os_compute_uwsgi_ini_overrides }}"
|
|
uwsgi_bind_address: "{{ nova_service_bind_address }}"
|
|
uwsgi_port: "{{ nova_service_port }}"
|
|
wsgi_name: nova-api-wsgi
|
|
nova-compute:
|
|
group: nova_compute
|
|
service_name: nova-compute
|
|
init_config_overrides: "{{ nova_compute_init_overrides }}"
|
|
start_order: 6
|
|
execstarts: "{{ nova_bin }}/nova-compute"
|
|
execreloads: "/bin/kill -HUP $MAINPID"
|
|
after_targets:
|
|
- libvirtd.service
|
|
- syslog.target
|
|
- network.target
|
|
nova-conductor:
|
|
group: nova_conductor
|
|
service_name: nova-conductor
|
|
init_config_overrides: "{{ nova_conductor_init_overrides }}"
|
|
start_order: 2
|
|
execstarts: "{{ nova_bin }}/nova-conductor"
|
|
execreloads: "/bin/kill -HUP $MAINPID"
|
|
nova-novncproxy:
|
|
group: nova_console
|
|
service_name: nova-novncproxy
|
|
init_config_overrides: "{{ nova_novncproxy_init_overrides }}"
|
|
condition: "{{ nova_console_type == 'novnc' }}"
|
|
start_order: 5
|
|
execstarts: "{{ nova_bin }}/nova-novncproxy"
|
|
nova-scheduler:
|
|
group: nova_scheduler
|
|
service_name: nova-scheduler
|
|
init_config_overrides: "{{ nova_scheduler_init_overrides }}"
|
|
start_order: 3
|
|
execstarts: "{{ nova_bin }}/nova-scheduler"
|
|
execreloads: "/bin/kill -HUP $MAINPID"
|
|
nova-spicehtml5proxy:
|
|
group: nova_console
|
|
service_name: nova-spicehtml5proxy
|
|
init_config_overrides: "{{ {'Install': {'Alias': 'nova-spiceproxy.service'}} | combine(nova_spicehtml5proxy_init_overrides) }}"
|
|
condition: "{{ nova_console_type == 'spice' }}"
|
|
start_order: 5
|
|
execstarts: "{{ nova_bin }}/nova-spicehtml5proxy"
|
|
nova-serialconsole-proxy:
|
|
group: nova_console
|
|
service_name: nova-serialproxy
|
|
init_config_overrides: "{{ nova_serialproxy_init_overrides }}"
|
|
condition: "{{ nova_console_type == 'serialconsole' }}"
|
|
start_order: 5
|
|
execstarts: "{{ nova_bin }}/nova-serialproxy"
|
|
|
|
nova_novnc_pip_packages:
|
|
- websockify
|
|
|
|
nova_compute_ironic_pip_packages:
|
|
- python-ironicclient
|
|
|
|
# Common pip packages
|
|
nova_pip_packages:
|
|
- cryptography
|
|
- keystonemiddleware
|
|
- nova
|
|
- osprofiler
|
|
- PyMySQL
|
|
- python-keystoneclient
|
|
- python-memcached
|
|
- python-novaclient
|
|
- python-openstackclient
|
|
- systemd-python
|
|
- uWSGI
|
|
|
|
# Specific pip packages provided by the user
|
|
nova_user_pip_packages: []
|
|
|
|
nova_optional_oslomsg_amqp1_pip_packages:
|
|
- oslo.messaging[amqp1]
|
|
|
|
nova_compute_lxd_pip_packages:
|
|
- pylxd
|
|
- nova-lxd
|
|
- pyOpenSSL
|
|
|
|
nova_qemu_user: libvirt-qemu
|
|
nova_qemu_group: kvm
|
|
|
|
# Define the following variable to drop a replacement
|
|
# file for /etc/libvirt/qemu.conf
|
|
#qemu_conf_dict: {}
|
|
|
|
# This variable is used by the repo_build process to determine
|
|
# which host group to check for members of before building the
|
|
# pip packages required by this role. The value is picked up
|
|
# by the py_pkgs lookup.
|
|
nova_role_project_group: nova_all
|
|
|
|
## Tunable overrides
|
|
nova_nova_conf_overrides: {}
|
|
nova_rootwrap_conf_overrides: {}
|
|
nova_api_paste_ini_overrides: {}
|
|
nova_policy_overrides: {}
|
|
nova_vendor_data_overrides: {}
|
|
nova_api_metadata_uwsgi_ini_overrides: {}
|
|
nova_api_os_compute_uwsgi_ini_overrides: {}
|
|
|
|
lxd_bind_address: 0.0.0.0
|
|
lxd_bind_port: 8443
|
|
|
|
# Supported backends are 'zfs', 'dir' and 'btrfs' for now.
|
|
# To use btrfs, a btrfs filesystem must be mounted in /var/lib/lxd
|
|
# during compute node configuration.
|
|
lxd_storage_backend: dir
|
|
# This needs to be set in the user_secrets.yml file.
|
|
#lxd_trust_password:
|
|
|
|
# The name of the underlying storage pool created by, or consumed by
|
|
# lxd init. As an example this would be the name of a ZFS pool.
|
|
#lxd_init_storage_pool: pool
|
|
|
|
# Nova requires the LXD storage pool name to be specified
|
|
lxd_storage_pool: default
|
|
|
|
# This variable should be used with lxd when using a
|
|
# storage backend that utilizes storage pool if you
|
|
# want to use a specific bloc device instead of loop
|
|
#lxd_storage_create_device: "/dev/sdb"
|
|
|
|
# PCI devices passthrough to nova
|
|
# For SR-IOV please use:
|
|
# nova_pci_passthrough_whitelist: '{ "physical_network": "<ml2 network name>", "devname": "<physical nic>" }'
|
|
# Example:
|
|
# nova_pci_passthrough_whitelist: '{ "physical_network": "physnet1", "devname": "p1p1" }'
|
|
nova_pci_passthrough_whitelist: {}
|
|
# PCI alias,
|
|
# Example:
|
|
# nova_pci_alias:
|
|
# - '{ "name": "card-alias1", "product_id": "XXXX", "vendor_id": "XXXX" }'
|
|
# - '{ "name": "card-alias2", "product_id": "XXXY", "vendor_id": "XXXY" }'
|
|
nova_pci_alias: []
|