Kevin Carter 8e6dbd01c9 Convert existing roles into galaxy roles
This change implements the blueprint to convert all roles and plays into
a more generic setup, following upstream ansible best practices.

Items Changed:
* All tasks have tags.
* All roles use namespaced variables.
* All redundant tasks within a given play and role have been removed.
* All of the repetitive plays have been removed in-favor of a more
  simplistic approach. This change duplicates code within the roles but
  ensures that the roles only ever run within their own scope.
* All roles have been built using an ansible galaxy syntax.
* The `*requirement.txt` files have been reformatted follow upstream
  Openstack practices.
* Dynamically generated inventory is now more organized, this should assist
  anyone who may want or need to dive into the JSON blob that is created.
  In the inventory a properties field is used for items that customize containers
  within the inventory.
* The environment map has been modified to support additional host groups to
  enable the seperation of infrastructure pieces. While the old infra_hosts group
  will still work this change allows for groups to be divided up into seperate
  chunks; eg: deployment of a swift only stack.
* The LXC logic now exists within the plays.
* etc/openstack_deploy/user_variables.yml has all password/token
  variables extracted into the separate file
  etc/openstack_deploy/user_secrets.yml in order to allow seperate
  security settings on that file.

Items Excised:
* All of the roles have had the LXC logic removed from within them which
  should allow roles to be consumed outside of the `os-ansible-deployment`
  reference architecture.

Note:
* the directory rpc_deployment still exists and is presently pointed at plays
  containing a deprecation warning instructing the user to move to the standard
  playbooks directory.
* While all of the rackspace specific components and variables have been removed
  and or were refactored the repository still relies on an upstream mirror of
  Openstack built python files and container images. This upstream mirror is hosted
  at rackspace at "http://rpc-repo.rackspace.com" though this is
  not locked to and or tied to rackspace specific installations. This repository
  contains all of the needed code to create and/or clone your own mirror.

DocImpact
Co-Authored-By: Jesse Pretorius <jesse.pretorius@rackspace.co.uk>
Closes-Bug: #1403676
Implements: blueprint galaxy-roles
Change-Id: I03df3328b7655f0cc9e43ba83b02623d038d214e
2015-02-18 10:56:25 +00:00

126 lines
3.4 KiB
Django/Jinja

# {{ ansible_managed }}
{% set _api_threads = ansible_processor_vcpus|default(2) // 2 %}
{% set api_threads = _api_threads if _api_threads > 0 else 1 %}
{% set _rpc_threads = api_threads // 2 %}
{% set rpc_threads = _rpc_threads if _rpc_threads > 0 else 1 %}
[DEFAULT]
verbose = {{ verbose }}
debug = {{ debug }}
use_syslog = False
log_file = /var/log/neutron/neutron.log
auth_strategy = keystone
lock_path = /var/lock/neutron
network_device_mtu = {{ neutron_network_device_mtu }}
allow_overlapping_ips = True
## Drivers
network_scheduler_driver = {{ neutron_driver_network_scheduler }}
router_scheduler_driver = {{ neutron_driver_router_scheduler }}
loadbalancer_pool_scheduler_driver = {{ neutron_driver_loadbalancer_pool_scheduler }}
dhcp_driver = {{ neutron_driver_dhcp }}
notification_driver = {{ neutron_driver_notification }}
## Schedulers
router_distributed = False
network_auto_schedule = True
router_auto_schedule = True
## Agents
agent_down_time = {{ neutron_agent_down_time }}
## API
bind_port = 9696
bind_host = 0.0.0.0
## Workers
api_workers = {{ neutron_api_workers | default(api_threads) }}
rpc_workers = {{ neutron_rpc_workers | default(rpc_threads) }}
## Plugins
core_plugin = {{ neutron_plugin_core }}
service_plugins = {{ neutron_plugin_loaded_base }}
## MAC Address
base_mac = fa:16:3e:00:00:00
mac_generation_retries = 16
## DHCP
dhcp_agent_notification = True
dhcp_agents_per_network = {{ groups['neutron_agent'] | length }}
dhcp_delete_namespaces = True
dhcp_lease_duration = 86400
## RabbitMQ
rabbit_port = {{ rabbitmq_port }}
rabbit_userid = {{ rabbitmq_userid }}
rabbit_password = {{ rabbitmq_password }}
rabbit_hosts = {{ rabbitmq_servers }}
## Notifications
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
send_events_interval = 2
## Nova
nova_url = {{ nova_service_adminurl|replace('/$(tenant_id)s', '') }}
nova_admin_username = {{ nova_service_user_name }}
nova_admin_tenant_id = {{ nova_admin_tenant_id }}
nova_admin_password = {{ nova_service_password }}
nova_admin_auth_url = {{ keystone_service_internalurl }}
[quotas]
quota_driver = {{ neutron_driver_quota }}
quota_items = network,subnet,port
[agent]
polling_interval = {{ neutron_agent_polling_interval|default(5) }}
report_interval = {{ neutron_report_interval|int }}
root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
[keystone_authtoken]
signing_dir = /var/cache/neutron
identity_uri = {{ keystone_service_internaluri }}
auth_uri = {{ keystone_service_internalurl }}
admin_tenant_name = {{ neutron_service_tenant_name }}
admin_user = {{ neutron_service_user_name }}
admin_password = {{ neutron_service_password }}
memcached_servers = {{ memcached_servers }}
token_cache_time = 300
revocation_cache_time = 60
# if your memcached server is shared, use these settings to avoid cache poisoning
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcached_encryption_key }}
# if your keystone deployment uses PKI, and you value security over performance:
check_revocations_for_cached = False
[database]
connection = mysql://{{ neutron_galera_user }}:{{ neutron_container_mysql_password }}@{{ galera_address }}/{{ neutron_galera_database }}?charset=utf8
[service_providers]
service_provider = LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider = VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default