Files
openstack-ansible/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
Markos Chandras 4603188934 Add support for using distribution packages for OpenStack services
Add new 'aio_distro_basekit' jobs to test the minimal basekit deployment
using distribution packages for the OpenStack services.

We can skip all repo-* related playbooks and roles since we are not
building pip packages for OpenStack services anymore. Finally, we can
populate the utility container using the distribution packages for the
OpenStack client instead of using the wheel packages.

Change-Id: Ia8c394123b5588fff8c4acbe1532ed5a6dc7e8ec
Depends-On: https://review.openstack.org/#/c/583161/
Depends-On: https://review.openstack.org/#/c/567530/
Depends-On: https://review.openstack.org/#/c/580455/
Implements: blueprint openstack-distribution-packages
2018-07-20 08:14:32 +01:00

191 lines
5.0 KiB
Django/Jinja

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## General options
debug: True
## Installation method for OpenStack services
install_method: "{{ lookup('env','INSTALL_METHOD') | default('source', true) }}"
## Tempest settings
tempest_public_subnet_cidr: 172.29.248.0/22
tempest_public_subnet_allocation_pools: "172.29.249.110-172.29.249.200"
## Galera settings
galera_innodb_buffer_pool_size: 16M
galera_innodb_log_buffer_size: 4M
galera_wsrep_provider_options:
- { option: "gcache.size", value: "4M" }
## Neutron settings
neutron_metadata_checksum_fix: True
### Set workers for all services to optimise memory usage
## Repo
repo_nginx_threads: 2
## Keystone
keystone_httpd_mpm_start_servers: 2
keystone_httpd_mpm_min_spare_threads: 1
keystone_httpd_mpm_max_spare_threads: 2
keystone_httpd_mpm_thread_limit: 2
keystone_httpd_mpm_thread_child: 1
keystone_wsgi_threads: 1
keystone_wsgi_processes_max: 2
## Barbican
barbican_wsgi_processes: 2
barbican_wsgi_threads: 1
## Cinder
cinder_wsgi_processes_max: 2
cinder_wsgi_threads: 1
cinder_wsgi_buffer_size: 16384
cinder_osapi_volume_workers_max: 2
## Glance
glance_api_threads_max: 2
glance_api_threads: 1
glance_api_workers: 1
glance_registry_workers: 1
## Nova
nova_wsgi_threads: 1
nova_wsgi_processes_max: 2
nova_wsgi_processes: 2
nova_wsgi_buffer_size: 16384
nova_api_threads_max: 2
nova_api_threads: 1
nova_osapi_compute_workers: 1
nova_conductor_workers: 1
nova_metadata_workers: 1
## Neutron
neutron_rpc_workers: 1
neutron_metadata_workers: 1
neutron_api_workers: 1
neutron_api_threads_max: 2
neutron_api_threads: 2
neutron_num_sync_threads: 1
## Heat
heat_api_workers: 1
heat_api_threads_max: 2
heat_api_threads: 1
heat_wsgi_threads: 1
heat_wsgi_processes_max: 2
heat_wsgi_processes: 1
heat_wsgi_buffer_size: 16384
## Horizon
horizon_wsgi_processes: 1
horizon_wsgi_threads: 1
horizon_wsgi_threads_max: 2
## Ceilometer
ceilometer_notification_workers_max: 2
ceilometer_notification_workers: 1
## AODH
aodh_wsgi_threads: 1
aodh_wsgi_processes_max: 2
aodh_wsgi_processes: 1
## Gnocchi
gnocchi_wsgi_threads: 1
gnocchi_wsgi_processes_max: 2
gnocchi_wsgi_processes: 1
## Swift
swift_account_server_replicator_workers: 1
swift_server_replicator_workers: 1
swift_object_replicator_workers: 1
swift_account_server_workers: 1
swift_container_server_workers: 1
swift_object_server_workers: 1
swift_proxy_server_workers_max: 2
swift_proxy_server_workers_not_capped: 1
swift_proxy_server_workers_capped: 1
swift_proxy_server_workers: 1
## Ironic
ironic_wsgi_threads: 1
ironic_wsgi_processes_max: 2
ironic_wsgi_processes: 1
## Trove
trove_api_workers_max: 2
trove_api_workers: 1
trove_conductor_workers_max: 2
trove_conductor_workers: 1
trove_wsgi_threads: 1
trove_wsgi_processes_max: 2
trove_wsgi_processes: 1
## Sahara
sahara_api_workers_max: 2
sahara_api_workers: 1
# NOTE: hpcloud-b4's eth0 uses 10.0.3.0/24, which overlaps with the
# lxc_net_address default
# TODO: We'll need to implement a mechanism to determine valid lxc_net_address
# value which will not overlap with an IP already assigned to the host.
lxc_net_address: 10.255.255.1
lxc_net_netmask: 255.255.255.0
lxc_net_dhcp_range: 10.255.255.2,10.255.255.253
{% if repo_build_pip_extra_indexes is defined and repo_build_pip_extra_indexes|length > 0 %}
## Wheel mirrors for the repo_build to use
repo_build_pip_extra_indexes:
{{ repo_build_pip_extra_indexes | to_nice_yaml }}
{% endif %}
{% if _lxc_mirror is defined and _lxc_mirror.stdout_lines is defined %}
## images.linuxcontainers.org reverse proxy
lxc_image_cache_server_mirrors:
- "http://{{ _lxc_mirror.stdout_lines[0] }}"
{% endif %}
{% if cache_timeout is defined %}
## Package cache timeout
cache_timeout: {{ cache_timeout }}
{% endif %}
# The container backing store is set to 'machinectl' to speed up the
# AIO build time. Options are: [machinectl, overlayfs, btrfs, zfs, dir, lvm]
lxc_container_backing_store: "{{ lxc_container_backing_store }}"
## Always setup tempest, the resources for it, then execute tests
tempest_install: yes
tempest_run: yes
{% if nodepool_dir.stat.exists %}
# Disable chronyd in OpenStack CI
security_rhel7_enable_chrony: no
{% endif %}
# For testing purposes in public clouds, we need to ignore these
# services when trying to do a reload of nova services.
nova_service_negate:
- "nova-agent.service"
- "nova-resetnetwork.service"
{% if _pypi_wheel_mirror is defined and _pypi_wheel_mirror.stdout_lines is defined %}
repo_nginx_pypi_upstream: "{{ _pypi_wheel_mirror.stdout_lines[0] | netloc }}"
repo_build_pip_extra_indexes:
- "{{ _pypi_wheel_mirror.stdout_lines[1] }}"
{% endif %}