fuel-ccp-nova/service/files/nova.conf.j2

233 lines
7.7 KiB
Django/Jinja

# nova.conf
[DEFAULT]
debug = {{ nova.debug }}
state_path = /var/lib/nova
{% if nova.tls.enabled %}
osapi_compute_listen=127.0.0.1
metadata_listen = 127.0.0.1
{% else %}
osapi_compute_listen = {{ network_topology["private"]["address"] }}
metadata_listen = {{ network_topology["private"]["address"] }}
{% endif %}
osapi_compute_listen_port = {{ nova.api_port.cont }}
metadata_listen_port = {{ nova.metadata.port.cont }}
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
scheduler_max_attempts = 10
{% if neutron.plugin_agent == "openvswitch" %}
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
{% elif neutron.plugin_agent == "linuxbridge" %}
linuxnet_interface_driver = nova.network.linux_net.BridgeInterfaceDriver
{% endif %}
allow_resize_to_same_host = true
compute_driver = libvirt.LibvirtDriver
# ironic requires different host manager, this not affects regular instances
scheduler_host_manager = ironic_host_manager
force_config_drive = True
# vfat format doesn't require sending config drive over scp (like in case
# of default iso format)
config_drive_format = vfat
# allocation ratio params
cpu_allocation_ratio = {{ nova.allocation_ratio.cpu }}
disk_allocation_ratio = {{ nova.allocation_ratio.disk }}
ram_allocation_ratio = {{ nova.allocation_ratio.ram }}
# scheduler filter params
scheduler_default_filters = {{ nova.scheduler.enabled_filters | join(",") }}
# Though my_ip is not used directly, lots of other variables use $my_ip
my_ip = {{ network_topology["private"]["address"] }}
{% if ingress.enabled %}
secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
{% endif %}
{% if nova.sriov.enabled %}
{% if nova.sriov.pci_alias %}
pci_alias = [
{%- for alias in nova.sriov.pci_alias -%}
{%- if not loop.first %},{% endif -%}
{"name": "{{ alias.name }}", "product_id": "{{ alias.product_id }}", vendor_id": "{{ alias.vendor_id }}"}
{%- endfor %}]
{% endif %}
pci_passthrough_whitelist = [
{%- for pci_dev in nova.sriov.pci_passthrough_whitelist -%}
{%- if not loop.first %},{% endif -%}
{"devname": "{{ pci_dev.devname }}", "physical_network": "{{ pci_dev.physical_network }}"}
{%- endfor %}]
{% endif %}
{% if nova.console == 'novnc' %}
[vnc]
{% if nova.tls.enabled %}
novncproxy_host = 127.0.0.1
{% else %}
novncproxy_host = {{ network_topology["private"]["address"] }}
{% endif %}
novncproxy_port = {{ nova.novncproxy_port.cont }}
vncserver_listen = {{ network_topology["private"]["address"] }}
vncserver_proxyclient_address = {{ network_topology["private"]["address"] }}
novncproxy_base_url = {{ address('nova-novncproxy', nova.novncproxy_port, external=True, with_scheme=True) }}/vnc_auto.html
{% elif nova.console == 'spice' %}
[vnc]
# We have to turn off vnc to use spice
enabled = false
[spice]
server_listen = {{ network_topology["private"]["address"] }}
server_proxyclient_address = {{ nova.spicehtml5proxy.host }}
html5proxy_base_url = http://{{ address('nova-html5proxy') }}:{{ nova.spicehtml5proxy.port.cont }}/spice_auto.html
html5proxy_host = {{ nova.spicehtml5proxy.host }}
html5proxy_port = {{ nova.spicehtml5proxy.port.cont }}
{% endif %}
{% if role_name == "nova-compute-ironic" %}
[ironic]
auth_type = password
auth_url = {{ address("keystone", keystone.public_port, with_scheme=True) }}
project_name = {{ service_account.project }}
username = {{ ironic.username }}
password = {{ ironic.password }}
project_domain_name = {{ service_account.domain }}
user_domain_name = {{ service_account.domain }}
#(TODO) remove these parameters when mitaka support will be dropped
#(TODO) remember to update this once discoverd is replaced by inspector
admin_username = {{ ironic.username }}
admin_password = {{ ironic.password }}
admin_url = {{ address("keystone", keystone.public_port, with_scheme=True) }}/v2
admin_tenant_name = {{ service_account.project }}
api_endpoint = {{ address('ironic-api', ironic.api_port, with_sceme=True) }}/v1
{% endif %}
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
{% if glance.tls.enabled %}
[ssl]
ca_file = /opt/ccp/etc/tls/ca.pem
{% endif %}
[glance]
{% if glance.tls.enabled %}
protocol = https
{% endif %}
api_servers = {{ address('glance-api', glance.api_port, with_scheme=True) }}
# We need to do N number of retries here, N = number of glance-api daemons running
# FIXME
num_retries = 1
[cinder]
catalog_info = volumev2:cinder:internalURL
[neutron]
{% if neutron.tls.enabled %}
protocol = https
cafile = /opt/ccp/etc/tls/ca.pem
{% endif %}
url = {{ address('neutron-server', neutron.server_port, with_scheme=True) }}
auth_strategy = keystone
metadata_proxy_shared_secret = {{ nova.metadata.secret }}
service_metadata_proxy = true
auth_url = {{ address('keystone', keystone.admin_port, with_scheme=True) }}
auth_type = password
project_domain_name = {{ service_account.domain }}
user_domain_name = {{ service_account.domain }}
project_name = {{ service_account.project }}
username = {{ neutron.username }}
password = {{ neutron.password }}
[database]
connection = mysql+pymysql://{{ nova.db.username }}:{{ nova.db.password }}@{{ address("database") }}/{{ nova.db.name }}{% if db.tls.enabled %}?ssl_ca=/opt/ccp/etc/tls/ca.pem{% endif %}
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://{{ nova.db.username }}:{{ nova.db.password }}@{{ address("database") }}/{{ nova.db.api_name }}{% if db.tls.enabled %}?ssl_ca=/opt/ccp/etc/tls/ca.pem{% endif %}
max_retries = -1
[cache]
backend = oslo_cache.memcache_pool
enabled = true
# Here we need to pass an array of memcached daemons, for now we just use DNS
# FIXME
memcache_servers = {{ address('memcached', memcached.port) }}
{{ keystone_authtoken.keystone_authtoken(nova.username, nova.password) }}
[libvirt]
virt_type = {{ nova.virt_type }}
{% if nova.libvirt.tls.enabled %}
# TLS config:
# 1. NOTE: nova will use default connection_uri to connect to libvirt,
# e.g. qemu:/// which assumes nova-compute and libvirtd are on the same host.
#
# 2. We are using %s in live_migration_uri as workaround for TLS config with
# wildcard PKI certificates because they are issued for hostnames not IPs.
# We also need to pass domainname so FQDN (not just hostname) is used when
# initiating TLS connection and TLS can match server certificate to FQDN.
# FIXME
live_migration_uri = "qemu+tls://%s.{{ cluster_domain }}/system"
{% else %}
# non-TLS config:
connection_uri = "qemu+tcp://{{ network_topology["private"]["address"] }}/system"
live_migration_inbound_addr = "{{ network_topology["private"]["address"] }}"
{% endif %}
{% if nova.ceph.enable %}
images_type = rbd
images_rbd_pool = {{ nova.ceph.pool_name }}
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = {{ cinder.ceph.username }}
rbd_secret_uuid = {{ cinder.ceph.rbd_secret_uuid }}
disk_cachemodes="network=writeback"
hw_disk_discard = unmap
{% endif %}
[upgrade_levels]
compute = auto
[wsgi]
api_paste_config = /etc/nova/api-paste.ini
{% if nova.tls.enabled %}
[oslo_middleware]
enable_proxy_headers_parsing = true
{% endif %}
[oslo_messaging_notifications]
{% if searchlight is defined and searchlight.services.nova %}
driver = {{ searchlight.notification_driver }}
notify_on_state_change = vm_and_task_state
{% endif %}
{% if placement.enabled %}
[placement]
auth_url = {{ address("keystone", keystone.admin_port, with_scheme=True) }}/v3
auth_type = password
project_domain_name = {{ service_account.domain }}
user_domain_name = {{ service_account.domain }}
project_name = {{ service_account.project }}
username = {{ placement.account.username }}
password = {{ placement.account.password }}
memcached_servers = {{ address("memcached", memcached.port) }}
os_region_name = RegionOne
{% if keystone.tls.enabled %}
cafile = /opt/ccp/etc/tls/ca.pem
{% endif %}
{% endif %}
{# messaging macros templates #}
{{ oslo_messaging[messaging.backend.notifications]('notifications_config') }}
{{ oslo_messaging[messaging.backend.rpc]('rpc_config') }}