Add Ansible playbooks and modified cobbler snippets

Change-Id: If759d0fa469d3d40ab92e3359c6f9dfdbebe7214
This commit is contained in:
Xicheng Chang 2015-04-04 12:06:36 -07:00
parent ddb0e1af4f
commit 587250bfae
107 changed files with 9765 additions and 21 deletions

View File

@ -0,0 +1,32 @@
---
- hosts: controller
sudo: True
roles:
- common
- database
- mq
- keystone
- nova-controller
- neutron-controller
- dashboard
- cinder-controller
- glance
- hosts: network
sudo: True
roles:
- common
- neutron-network
- hosts: storage
sudo: True
roles:
- common
- cinder-volume
- hosts: compute
sudo: True
roles:
- common
- nova-compute
- neutron-compute

View File

@ -0,0 +1,8 @@
---
- hosts: all
remote_user: vagrant
sudo: True
roles:
- common
- nova-compute
- neutron-compute

View File

@ -0,0 +1,14 @@
---
- hosts: controller
remote_user: root
sudo: True
roles:
- common
- database
- mq
- keystone
- nova-controller
- neutron-controller
- dashboard
- cinder-controller
- glance

View File

@ -0,0 +1,53 @@
controller_host: 10.1.0.11
network_host: 10.1.0.12
compute_host: 10.1.0.13
storage_host: 10.1.0.14
odl_controller: 10.1.0.15
DEBUG: False
VERBOSE: False
NTP_SERVER_LOCAL: controller
DB_HOST: "{{ controller_host }}"
MQ_BROKER: rabbitmq
OPENSTACK_REPO: cloudarchive-juno.list
ADMIN_TOKEN: admin
CEILOMETER_TOKEN: c095d479023a0fd58a54
RABBIT_PASS: guest
KEYSTONE_DBPASS: keystone_db_secret
DEMO_PASS: demo_secret
ADMIN_PASS: admin_secret
GLANCE_DBPASS: glance_db_secret
GLANCE_PASS: glance_secret
NOVA_DBPASS: nova_db_secret
NOVA_PASS: nova_secret
DASH_DBPASS: dash_db_secret
CINDER_DBPASS: cinder_db_secret
CINDER_PASS: cinder_secret
NEUTRON_DBPASS: neutron_db_secret
NEUTRON_PASS: netron_secret
NEUTRON_TYPE_DRIVERS: ['flat', 'gre', 'vxlan']
NEUTRON_TENANT_NETWORK_TYPES: ['vxlan']
#NEUTRON_MECHANISM_DRIVERS: ['opendaylight']
NEUTRON_MECHANISM_DRIVERS: ['openvswitch']
NEUTRON_TUNNEL_TYPES: ['vxlan']
METADATA_SECRET: metadata_secret
INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS: 10.1.1.21
INTERFACE_NAME: eth2
EXTERNAL_NETWORK_CIDR: 203.0.113.0/24
EXTERNAL_NETWORK_GATEWAY: 203.0.113.1
FLOATING_IP_START: 203.0.113.101
FLOATING_IP_END: 203.0.113.200
build_in_image: http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img
build_in_image_name: cirros-0.3.3-x86_64-disk.img
physical_device: /dev/sdb
internal_interface: ansible_eth1
internal_ip: "{{ hostvars[inventory_hostname][internal_interface]['ipv4']['address'] }}"
odl_username: admin
odl_password: admin
odl_api_port: 8080

View File

@ -0,0 +1,67 @@
---
- hosts: database
sudo: True
roles:
- common
- database
- hosts: messaging
sudo: True
roles:
- common
- mq
- hosts: identity
sudo: True
roles:
- common
- keystone
- hosts: compute-controller
sudo: True
roles:
- common
- nova-controller
- hosts: network-server
sudo: True
roles:
- common
- neutron-controller
- hosts: storage-controller
sudo: True
roles:
- common
- cinder-controller
- hosts: image
sudo: True
roles:
- common
- glance
- hosts: dashboard
sudo: True
roles:
- common
- dashboard
- hosts: network-worker
sudo: True
roles:
- common
- neutron-network
- hosts: storage-volume
sudo: True
roles:
- common
- cinder-volume
- hosts: compute-worker
sudo: True
roles:
- common
- nova-compute
- neutron-compute

View File

@ -0,0 +1,7 @@
---
- hosts: all
remote_user: vagrant
sudo: True
roles:
- common
- neutron-network

View File

@ -0,0 +1,6 @@
---
- name: restart cinder-scheduler
service: name=cinder-scheduler state=restarted
- name: restart cinder-api
service: name=cinder-api state=restarted

View File

@ -0,0 +1,29 @@
---
- name: install cinder packages
apt: name={{ item }} state=present force=yes
with_items:
- cinder-api
- cinder-scheduler
- python-cinderclient
- name: upload cinder conf
template: src=cinder.conf dest=/etc/cinder/cinder.conf
notify:
- restart cinder-scheduler
- restart cinder-api
- name: sync cinder db
shell: su -s /bin/sh -c "cinder-manage db sync" cinder && cinder
notify:
- restart cinder-scheduler
- restart cinder-api
- meta: flush_handlers
- name: upload cinder keystone register script
template: src=cinder_init.sh dest=/opt/cinder_init.sh mode=0744
- name: run cinder register script
shell: /opt/cinder_init.sh && touch cinder_init_complete
args:
creates: cinder_init_complete

View File

@ -0,0 +1,71 @@
#############
# OpenStack #
#############
[composite:osapi_volume]
use = call:cinder.api:root_app_factory
/: apiversions
/v1: openstack_volume_api_v1
/v2: openstack_volume_api_v2
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
[filter:request_id]
paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
hmac_keys = SECRET_KEY
enabled = yes
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
[filter:sizelimit]
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
[app:apiv1]
paste.app_factory = cinder.api.v1.router:APIRouter.factory
[app:apiv2]
paste.app_factory = cinder.api.v2.router:APIRouter.factory
[pipeline:apiversions]
pipeline = faultwrap osvolumeversionapp
[app:osvolumeversionapp]
paste.app_factory = cinder.api.versions:Versions.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
# auth_host = 127.0.0.1
# auth_port = 35357
# auth_protocol = http
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = cinder
admin_password = {{ CINDER_PASS }}
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory

View File

@ -0,0 +1,63 @@
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = {{ VERBOSE }}
debug = {{ DEBUG }}
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
notification_driver=cinder.openstack.common.notifier.rpc_notifier
volumes_dir = /var/lib/cinder/volumes
log_file=/var/log/cinder/cinder.log
control_exchange = cinder
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_port = 5672
rabbit_userid = guest
rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ storage_controller_host }}
glance_host = {{ image_host }}
glance_port = 9292
api_rate_limit = False
storage_availability_zone = nova
quota_volumes = 10
quota_gigabytes=1000
quota_driver=cinder.quota.DbQuotaDriver
osapi_volume_listen = {{ storage_controller_host }}
osapi_volume_listen_port = 8776
db_backend = sqlalchemy
volume_name_template = volume-%s
snapshot_name_template = snapshot-%s
max_gigabytes=10000
volume_group=cinder-volumes
volume_clear=zero
volume_clear_size=10
iscsi_ip_address={{ storage_controller_host }}
iscsi_port=3260
iscsi_helper=tgtadm
volumes_dir=/var/lib/cinder/volumes
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = cinder
admin_password = {{ CINDER_PASS }}
[database]
connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder

View File

@ -0,0 +1,6 @@
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=cinder --pass={{ CINDER_PASS }} --email=cinder@example.com
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=cinder --tenant=service --role=admin
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name=cinder --type=volume --description="OpenStack Block Storage"
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ volume / {print $2}') --publicurl=http://{{ storage_controller_host }}:8776/v1/%\(tenant_id\)s --internalurl=http://{{ storage_controller_host }}:8776/v1/%\(tenant_id\)s --adminurl=http://{{ storage_controller_host }}:8776/v1/%\(tenant_id\)s

View File

@ -0,0 +1 @@
physical_device: /dev/loop0

View File

@ -0,0 +1,6 @@
---
- name: restart cinder-volume
service: name=cinder-volume state=restarted
- name: restart tgt
service: name=tgt state=restarted

View File

@ -0,0 +1,48 @@
---
- name: install cinder-volume and lvm2 packages
apt: name={{ item }} state=present force=yes
with_items:
- cinder-volume
- lvm2
- name: check if physical device exists
stat: path={{ physical_device }}
register: st
- name: repace physical_device with /dev/loop if st returns false
local_action: copy src=loop.yml dest=/tmp/loop.yml
when: st.stat.exists == False
- name: load loop.yml
include_vars: /tmp/loop.yml
when: st.stat.exists == False
- name: check if cinder-volumes is mounted
shell: ls /mnt
register: cindervolumes
- name: get available partition size
shell: df / | awk '$3 ~ /[0-9]+/ { print $4 }'
register: partition_size
- name: if not mounted, mount it
shell: dd if=/dev/zero of=/mnt/cinder-volumes bs=1 count=0 seek={{ partition_size.stdout }}
when: cindervolumes.stdout != 'cinder-volumes'
- name: get first lo device
shell: ls /dev/loop* | egrep 'loop[0-9]+'|sed -n 1p
register: first_lo
when: cindervolumes.stdout != 'cinder-volumes'
- name: do a losetup on /mnt/cinder-volumes
shell: losetup {{ first_lo.stdout }} /mnt/cinder-volumes
when: cindervolumes.stdout != 'cinder-volumes'
- name: create physical and group volumes
lvg: vg=cinder-volumes pvs={{ physical_device }} vg_options=--force
- name: upload cinder-volume configuration
template: src=cinder.conf dest=/etc/cinder/cinder.conf backup=yes
notify:
- restart cinder-volume
- restart tgt

View File

@ -0,0 +1,62 @@
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
notification_driver=cinder.openstack.common.notifier.rpc_notifier
volumes_dir = /var/lib/cinder/volumes
log_file=/var/log/cinder/cinder.log
control_exchange = cinder
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_port = 5672
rabbit_userid = guest
rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ storage_controller_host }}
glance_host = {{ image_host }}
glance_port = 9292
api_rate_limit = False
storage_availability_zone = nova
quota_volumes = 10
quota_gigabytes=1000
quota_driver=cinder.quota.DbQuotaDriver
osapi_volume_listen = {{ storage_controller_host }}
osapi_volume_listen_port = 8776
db_backend = sqlalchemy
volume_name_template = volume-%s
snapshot_name_template = snapshot-%s
max_gigabytes=10000
volume_group=cinder-volumes
volume_clear=zero
volume_clear_size=10
iscsi_ip_address={{ storage_controller_host }}
iscsi_port=3260
iscsi_helper=tgtadm
volumes_dir=/var/lib/cinder/volumes
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = cinder
admin_password = {{ CINDER_PASS }}
[database]
connection = mysql://cinder:{{ CINDER_DBPASS }}@{{ db_host }}/cinder

View File

@ -0,0 +1 @@
deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main

View File

@ -0,0 +1,4 @@
---
- name: restart ntp
command: su -s /bin/sh -c "service ntp stop; ntpd -gq; hwclock --systohc; service ntp start"
ignore_errors: True

View File

@ -0,0 +1,33 @@
---
- name: add juno cloudarchive
apt_repository: repo='deb http://ubuntu-cloud.archive.canonical.com/ubuntu trusty-updates/juno main' state=present
- name: add juno apt key
apt_key: keyserver=keyserver.ubuntu.com id=5EDB1B62EC4926EA
- name: update packages once
apt: update_cache=yes
- name: update hosts files to all hosts
template: src=hosts
dest=/etc/hosts
backup=yes
- name: install common packages
apt: name={{ item }} state=latest
with_items:
- python-pip
- python-dev
- python-mysqldb
- ntp
- name: update ntp conf
template: src=ntp.conf dest=/etc/ntp.conf backup=yes
notify:
- restart ntp
- name: update pip
pip: name={{ item }} state=latest
with_items:
- pip

View File

@ -0,0 +1,22 @@
# compute-controller
10.145.89.136 host-136
# database
10.145.89.136 host-136
# messaging
10.145.89.136 host-136
# storage-controller
10.145.89.138 host-138
# image
10.145.89.138 host-138
# identity
10.145.89.136 host-136
# network-server
10.145.89.138 host-138
# dashboard
10.145.89.136 host-136
# storage-volume
10.145.89.139 host-139
# network-worker
10.145.89.139 host-139
# compute-worker
10.145.89.137 host-137

View File

@ -0,0 +1,56 @@
# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help
driftfile /var/lib/ntp/ntp.drift
# Enable this if you want statistics to be logged.
#statsdir /var/log/ntpstats/
statistics loopstats peerstats clockstats
filegen loopstats file loopstats type day enable
filegen peerstats file peerstats type day enable
filegen clockstats file clockstats type day enable
# Specify one or more NTP servers.
# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board
# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for
# more information.
server {{ NTP_SERVER_LOCAL }}
server 0.ubuntu.pool.ntp.org
server 1.ubuntu.pool.ntp.org
server 2.ubuntu.pool.ntp.org
server 3.ubuntu.pool.ntp.org
# Use Ubuntu's ntp server as a fallback.
server ntp.ubuntu.com
# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for
# details. The web page <http://support.ntp.org/bin/view/Support/AccessRestrictions>
# might also be helpful.
#
# Note that "restrict" applies to both servers and clients, so a configuration
# that might be intended to block requests from certain clients could also end
# up blocking replies from your own upstream servers.
# By default, exchange time with everybody, but don't allow configuration.
restrict -4 default kod notrap nomodify nopeer noquery
restrict -6 default kod notrap nomodify nopeer noquery
# Local users may interrogate the ntp server more closely.
restrict 127.0.0.1
restrict ::1
# Clients from this (example!) subnet have unlimited access, but only if
# cryptographically authenticated.
#restrict 192.168.123.0 mask 255.255.255.0 notrust
# If you want to provide time to your local subnet, change the next line.
# (Again, the address is an example only.)
#broadcast 192.168.123.255
# If you want to listen to time broadcasts on your local subnet, de-comment the
# next lines. Please do this only if you trust everybody on the network!
#disable auth
#broadcastclient

View File

@ -0,0 +1,29 @@
---
- name: install dashboard packages
apt: name={{ item }} state=present force=yes
with_items:
- apache2
- memcached
- libapache2-mod-wsgi
- openstack-dashboard
- name: remove ubuntu theme
apt: name=openstack-dashboard-ubuntu-theme
state=absent
## horizon configuration is already enabled in apache2/conf-enabled
## by openstack-dashboard package deploy script.
#- name: update dashboard conf
# template: src=openstack-dashboard.conf
# dest=/etc/apache2/sites-available/openstack-dashboard.conf
# backup=yes
- name: update horizon settings
template: src=local_settings.py dest=/etc/openstack-dashboard/local_settings.py
backup=yes
- name: restart apache2
service: name=apache2 state=restarted
- name: restart memcached
service: name=memcached state=restarted

View File

@ -0,0 +1,511 @@
import os
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard import exceptions
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Required for Django 1.5.
# If horizon is running in production (DEBUG is False), set this
# with the list of host/domain names that the application can serve.
# For more information see:
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
#ALLOWED_HOSTS = ['horizon.example.com', ]
# Set SSL proxy settings:
# For Django 1.4+ pass this header from the proxy after terminating the SSL,
# and don't forget to strip it from the client's request.
# For more information see:
# https://docs.djangoproject.com/en/1.4/ref/settings/#secure-proxy-ssl-header
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# If Horizon is being served through SSL, then uncomment the following two
# settings to better secure the cookies from security exploits
#CSRF_COOKIE_SECURE = True
#SESSION_COOKIE_SECURE = True
# Overrides for OpenStack API versions. Use this setting to force the
# OpenStack dashboard to use a specific API version for a given service API.
# NOTE: The version should be formatted as it appears in the URL for the
# service API. For example, The identity service APIs have inconsistent
# use of the decimal point, so valid options would be "2.0" or "3".
# OPENSTACK_API_VERSIONS = {
# "identity": 3,
# "volume": 2
# }
# Set this to True if running on multi-domain model. When this is enabled, it
# will require user to enter the Domain name in addition to username for login.
# OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = False
# Overrides the default domain used when running on single-domain model
# with Keystone V3. All entities will be created in the default domain.
# OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'Default'
# Set Console type:
# valid options would be "AUTO", "VNC", "SPICE" or "RDP"
# CONSOLE_TYPE = "AUTO"
# Default OpenStack Dashboard configuration.
HORIZON_CONFIG = {
'dashboards': ('project', 'admin', 'settings',),
'default_dashboard': 'project',
'user_home': 'openstack_dashboard.views.get_user_home',
'ajax_queue_limit': 10,
'auto_fade_alerts': {
'delay': 3000,
'fade_duration': 1500,
'types': ['alert-success', 'alert-info']
},
'help_url': "http://docs.openstack.org",
'exceptions': {'recoverable': exceptions.RECOVERABLE,
'not_found': exceptions.NOT_FOUND,
'unauthorized': exceptions.UNAUTHORIZED},
}
# Specify a regular expression to validate user passwords.
# HORIZON_CONFIG["password_validator"] = {
# "regex": '.*',
# "help_text": _("Your password does not meet the requirements.")
# }
# Disable simplified floating IP address management for deployments with
# multiple floating IP pools or complex network requirements.
# HORIZON_CONFIG["simple_ip_management"] = False
# Turn off browser autocompletion for the login form if so desired.
# HORIZON_CONFIG["password_autocomplete"] = "off"
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
# Set custom secret key:
# You can either set it to a specific value or you can let horizion generate a
# default secret key that is unique on this machine, e.i. regardless of the
# amount of Python WSGI workers (if used behind Apache+mod_wsgi): However, there
# may be situations where you would want to set this explicitly, e.g. when
# multiple dashboard instances are distributed on different machines (usually
# behind a load-balancer). Either you have to make sure that a session gets all
# requests routed to the same dashboard instance or you set the same SECRET_KEY
# for all of them.
from horizon.utils import secret_key
SECRET_KEY = 'AJDSKLAJDKASJDKASJDKSAJDKSJAKDSA'
# We recommend you use memcached for development; otherwise after every reload
# of the django development server, you will have to login again. To use
# memcached set CACHES to something like
CACHES = {
'default': {
'BACKEND' : 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION' : '127.0.0.1:11211',
}
}
#CACHES = {
# 'default': {
# 'BACKEND' : 'django.core.cache.backends.locmem.LocMemCache'
# }
#}
# Enable the Ubuntu theme if it is present.
try:
from ubuntu_theme import *
except ImportError:
pass
# Default Ubuntu apache configuration uses /horizon as the application root.
# Configure auth redirects here accordingly.
LOGIN_URL='/horizon/auth/login/'
LOGOUT_URL='/horizon/auth/logout/'
LOGIN_REDIRECT_URL='/horizon'
# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
# offline compression by default. To enable online compression, install
# the node-less package and enable the following option.
COMPRESS_OFFLINE = True
# By default, validation of the HTTP Host header is disabled. Production
# installations should have this set accordingly. For more information
# see https://docs.djangoproject.com/en/dev/ref/settings/.
ALLOWED_HOSTS = ['{{ dashboard_host }}', '0.0.0.0']
# Send email to the console by default
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Or send them to /dev/null
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
# Configure these for your outgoing email host
# EMAIL_HOST = 'smtp.my-company.com'
# EMAIL_PORT = 25
# EMAIL_HOST_USER = 'djangomail'
# EMAIL_HOST_PASSWORD = 'top-secret!'
# For multiple regions uncomment this configuration, and add (endpoint, title).
# AVAILABLE_REGIONS = [
# ('http://cluster1.example.com:5000/v2.0', 'cluster1'),
# ('http://cluster2.example.com:5000/v2.0', 'cluster2'),
# ]
OPENSTACK_HOST = "{{ identity_host }}"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v2.0" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "_member_"
# Disable SSL certificate checks (useful for self-signed certificates):
# OPENSTACK_SSL_NO_VERIFY = True
# The CA certificate to use to verify SSL connections
# OPENSTACK_SSL_CACERT = '/path/to/cacert.pem'
# The OPENSTACK_KEYSTONE_BACKEND settings can be used to identify the
# capabilities of the auth backend for Keystone.
# If Keystone has been configured to use LDAP as the auth backend then set
# can_edit_user to False and name to 'ldap'.
#
# TODO(tres): Remove these once Keystone has an API to identify auth backend.
OPENSTACK_KEYSTONE_BACKEND = {
'name': 'native',
'can_edit_user': True,
'can_edit_group': True,
'can_edit_project': True,
'can_edit_domain': True,
'can_edit_role': True
}
#Setting this to True, will add a new "Retrieve Password" action on instance,
#allowing Admin session password retrieval/decryption.
#OPENSTACK_ENABLE_PASSWORD_RETRIEVE = False
# The Xen Hypervisor has the ability to set the mount point for volumes
# attached to instances (other Hypervisors currently do not). Setting
# can_set_mount_point to True will add the option to set the mount point
# from the UI.
OPENSTACK_HYPERVISOR_FEATURES = {
'can_set_mount_point': False,
'can_set_password': False,
}
# The OPENSTACK_NEUTRON_NETWORK settings can be used to enable optional
# services provided by neutron. Options currently available are load
# balancer service, security groups, quotas, VPN service.
OPENSTACK_NEUTRON_NETWORK = {
'enable_lb': False,
'enable_firewall': False,
'enable_quotas': True,
'enable_vpn': False,
# The profile_support option is used to detect if an external router can be
# configured via the dashboard. When using specific plugins the
# profile_support can be turned on if needed.
'profile_support': None,
#'profile_support': 'cisco',
}
# The OPENSTACK_IMAGE_BACKEND settings can be used to customize features
# in the OpenStack Dashboard related to the Image service, such as the list
# of supported image formats.
# OPENSTACK_IMAGE_BACKEND = {
# 'image_formats': [
# ('', ''),
# ('aki', _('AKI - Amazon Kernel Image')),
# ('ami', _('AMI - Amazon Machine Image')),
# ('ari', _('ARI - Amazon Ramdisk Image')),
# ('iso', _('ISO - Optical Disk Image')),
# ('qcow2', _('QCOW2 - QEMU Emulator')),
# ('raw', _('Raw')),
# ('vdi', _('VDI')),
# ('vhd', _('VHD')),
# ('vmdk', _('VMDK'))
# ]
# }
# The IMAGE_CUSTOM_PROPERTY_TITLES settings is used to customize the titles for
# image custom property attributes that appear on image detail pages.
IMAGE_CUSTOM_PROPERTY_TITLES = {
"architecture": _("Architecture"),
"kernel_id": _("Kernel ID"),
"ramdisk_id": _("Ramdisk ID"),
"image_state": _("Euca2ools state"),
"project_id": _("Project ID"),
"image_type": _("Image Type")
}
# OPENSTACK_ENDPOINT_TYPE specifies the endpoint type to use for the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is 'publicURL'.
#OPENSTACK_ENDPOINT_TYPE = "publicURL"
# SECONDARY_ENDPOINT_TYPE specifies the fallback endpoint type to use in the
# case that OPENSTACK_ENDPOINT_TYPE is not present in the endpoints
# in the Keystone service catalog. Use this setting when Horizon is running
# external to the OpenStack environment. The default is None. This
# value should differ from OPENSTACK_ENDPOINT_TYPE if used.
#SECONDARY_ENDPOINT_TYPE = "publicURL"
# The number of objects (Swift containers/objects or images) to display
# on a single page before providing a paging element (a "more" link)
# to paginate results.
API_RESULT_LIMIT = 1000
API_RESULT_PAGE_SIZE = 20
# The timezone of the server. This should correspond with the timezone
# of your entire OpenStack installation, and hopefully be in UTC.
TIME_ZONE = "UTC"
# When launching an instance, the menu of available flavors is
# sorted by RAM usage, ascending. If you would like a different sort order,
# you can provide another flavor attribute as sorting key. Alternatively, you
# can provide a custom callback method to use for sorting. You can also provide
# a flag for reverse sort. For more info, see
# http://docs.python.org/2/library/functions.html#sorted
# CREATE_INSTANCE_FLAVOR_SORT = {
# 'key': 'name',
# # or
# 'key': my_awesome_callback_method,
# 'reverse': False,
# }
# The Horizon Policy Enforcement engine uses these values to load per service
# policy rule files. The content of these files should match the files the
# OpenStack services are using to determine role based access control in the
# target installation.
# Path to directory containing policy.json files
#POLICY_FILES_PATH = os.path.join(ROOT_PATH, "conf")
# Map of local copy of service policy files
#POLICY_FILES = {
# 'identity': 'keystone_policy.json',
# 'compute': 'nova_policy.json',
# 'volume': 'cinder_policy.json',
# 'image': 'glance_policy.json',
#}
# Trove user and database extension support. By default support for
# creating users and databases on database instances is turned on.
# To disable these extensions set the permission here to something
# unusable such as ["!"].
# TROVE_ADD_USER_PERMS = []
# TROVE_ADD_DATABASE_PERMS = []
LOGGING = {
'version': 1,
# When set to True this will disable all logging except
# for loggers specified in this configuration dictionary. Note that
# if nothing is specified here and disable_existing_loggers is True,
# django.db.backends will still log unless it is disabled explicitly.
'disable_existing_loggers': False,
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'console': {
# Set the level to "DEBUG" for verbose output logging.
'level': 'INFO',
'class': 'logging.StreamHandler',
},
},
'loggers': {
# Logging from django.db.backends is VERY verbose, send to null
# by default.
'django.db.backends': {
'handlers': ['null'],
'propagate': False,
},
'requests': {
'handlers': ['null'],
'propagate': False,
},
'horizon': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_dashboard': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'novaclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'cinderclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'keystoneclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'glanceclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'neutronclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'heatclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'ceilometerclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'troveclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'swiftclient': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'openstack_auth': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'nose.plugins.manager': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'iso8601': {
'handlers': ['null'],
'propagate': False,
},
}
}
# 'direction' should not be specified for all_tcp/udp/icmp.
# It is specified in the form.
SECURITY_GROUP_RULES = {
'all_tcp': {
'name': 'ALL TCP',
'ip_protocol': 'tcp',
'from_port': '1',
'to_port': '65535',
},
'all_udp': {
'name': 'ALL UDP',
'ip_protocol': 'udp',
'from_port': '1',
'to_port': '65535',
},
'all_icmp': {
'name': 'ALL ICMP',
'ip_protocol': 'icmp',
'from_port': '-1',
'to_port': '-1',
},
'ssh': {
'name': 'SSH',
'ip_protocol': 'tcp',
'from_port': '22',
'to_port': '22',
},
'smtp': {
'name': 'SMTP',
'ip_protocol': 'tcp',
'from_port': '25',
'to_port': '25',
},
'dns': {
'name': 'DNS',
'ip_protocol': 'tcp',
'from_port': '53',
'to_port': '53',
},
'http': {
'name': 'HTTP',
'ip_protocol': 'tcp',
'from_port': '80',
'to_port': '80',
},
'pop3': {
'name': 'POP3',
'ip_protocol': 'tcp',
'from_port': '110',
'to_port': '110',
},
'imap': {
'name': 'IMAP',
'ip_protocol': 'tcp',
'from_port': '143',
'to_port': '143',
},
'ldap': {
'name': 'LDAP',
'ip_protocol': 'tcp',
'from_port': '389',
'to_port': '389',
},
'https': {
'name': 'HTTPS',
'ip_protocol': 'tcp',
'from_port': '443',
'to_port': '443',
},
'smtps': {
'name': 'SMTPS',
'ip_protocol': 'tcp',
'from_port': '465',
'to_port': '465',
},
'imaps': {
'name': 'IMAPS',
'ip_protocol': 'tcp',
'from_port': '993',
'to_port': '993',
},
'pop3s': {
'name': 'POP3S',
'ip_protocol': 'tcp',
'from_port': '995',
'to_port': '995',
},
'ms_sql': {
'name': 'MS SQL',
'ip_protocol': 'tcp',
'from_port': '1433',
'to_port': '1433',
},
'mysql': {
'name': 'MYSQL',
'ip_protocol': 'tcp',
'from_port': '3306',
'to_port': '3306',
},
'rdp': {
'name': 'RDP',
'ip_protocol': 'tcp',
'from_port': '3389',
'to_port': '3389',
},
}
FLAVOR_EXTRA_KEYS = {
'flavor_keys': [
('quota:read_bytes_sec', _('Quota: Read bytes')),
('quota:write_bytes_sec', _('Quota: Write bytes')),
('quota:cpu_quota', _('Quota: CPU')),
('quota:cpu_period', _('Quota: CPU period')),
('quota:inbound_average', _('Quota: Inbound average')),
('quota:outbound_average', _('Quota: Outbound average')),
]
}

View File

@ -0,0 +1,14 @@
<VirtualHost *:80>
WSGIScriptAlias / /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10
Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/
<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
Order allow,deny
Allow from all
</Directory>
</VirtualHost>

View File

@ -0,0 +1,131 @@
#
# The MySQL database server configuration file.
#
# You can copy this to one of:
# - "/etc/mysql/my.cnf" to set global options,
# - "~/.my.cnf" to set user-specific options.
#
# One can use all long options that the program supports.
# Run program with --help to get a list of available options and with
# --print-defaults to see which it would actually understand and use.
#
# For explanations see
# http://dev.mysql.com/doc/mysql/en/server-system-variables.html
# This will be passed to all mysql clients
# It has been reported that passwords should be enclosed with ticks/quotes
# escpecially if they contain "#" chars...
# Remember to edit /etc/mysql/debian.cnf when changing the socket location.
[client]
port = 3306
socket = /var/run/mysqld/mysqld.sock
# Here is entries for some specific programs
# The following values assume you have at least 32M ram
# This was formally known as [safe_mysqld]. Both versions are currently parsed.
[mysqld_safe]
socket = /var/run/mysqld/mysqld.sock
nice = 0
[mysqld]
#
# * Basic Settings
#
user = mysql
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
port = 3306
basedir = /usr
datadir = /var/lib/mysql
tmpdir = /tmp
lc-messages-dir = /usr/share/mysql
skip-external-locking
#
# Instead of skip-networking the default is now to listen only on
# localhost which is more compatible and is not less secure.
bind-address = 0.0.0.0
#
# * Fine Tuning
#
key_buffer = 16M
max_allowed_packet = 16M
thread_stack = 192K
thread_cache_size = 8
# This replaces the startup script and checks MyISAM tables if needed
# the first time they are touched
myisam-recover = BACKUP
#max_connections = 100
#table_cache = 64
#thread_concurrency = 10
#
# * Query Cache Configuration
#
query_cache_limit = 1M
query_cache_size = 16M
#
# * Logging and Replication
#
# Both location gets rotated by the cronjob.
# Be aware that this log type is a performance killer.
# As of 5.1 you can enable the log at runtime!
#general_log_file = /var/log/mysql/mysql.log
#general_log = 1
#
# Error log - should be very few entries.
#
log_error = /var/log/mysql/error.log
#
# Here you can see queries with especially long duration
#log_slow_queries = /var/log/mysql/mysql-slow.log
#long_query_time = 2
#log-queries-not-using-indexes
#
# The following can be used as easy to replay backup logs or for replication.
# note: if you are setting up a replication slave, see README.Debian about
# other settings you may need to change.
#server-id = 1
#log_bin = /var/log/mysql/mysql-bin.log
expire_logs_days = 10
max_binlog_size = 100M
#binlog_do_db = include_database_name
#binlog_ignore_db = include_database_name
#
# * InnoDB
#
# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/.
# Read the manual for more InnoDB related options. There are many!
#
# * Security Features
#
# Read the manual, too, if you want chroot!
# chroot = /var/lib/mysql/
#
# For generating SSL certificates I recommend the OpenSSL GUI "tinyca".
#
# ssl-ca=/etc/mysql/cacert.pem
# ssl-cert=/etc/mysql/server-cert.pem
# ssl-key=/etc/mysql/server-key.pem
default-storage-engine = innodb
innodb_file_per_table
collation-server = utf8_general_ci
init-connect = 'SET NAMES utf8'
character-set-server = utf8
[mysqldump]
quick
quote-names
max_allowed_packet = 16M
[mysql]
#no-auto-rehash # faster start of mysql but no tab completition
[isamchk]
key_buffer = 16M
#
# * IMPORTANT: Additional settings that can override those from this file!
# The files must end with '.cnf', otherwise they'll be ignored.
#
!includedir /etc/mysql/conf.d/

View File

@ -0,0 +1,3 @@
---
- name: restart mysql
service: name=mysql state=restarted

View File

@ -0,0 +1,123 @@
---
- name: install mysql client and server packages
apt: name={{ item }} state=present
with_items:
- python-mysqldb
- mysql-server
- name: update mysql my.cnf
copy: src=my.cnf
dest=/etc/mysql/my.cnf
backup=yes
notify:
- restart mysql
- name: manually restart mysql server first before creating db models
service: name=mysql
state=restarted
- name: create keystone db model
mysql_db: name=keystone
state=present
- name: create keystone local user
mysql_user: name=keystone
password={{ KEYSTONE_DBPASS }}
priv=keystone.*:ALL
state=present
- name: create keystone remote user
mysql_user: host=%
name=keystone
password={{ KEYSTONE_DBPASS }}
priv=keystone.*:ALL
state=present
# glance
- name: create glance database
mysql_db: name=glance
state=present
- name: create glance local user
mysql_user: name=glance
password={{ GLANCE_DBPASS }}
priv=glance.*:ALL
state=present
- name: create glance remote user
mysql_user: host=%
name=glance
password={{ GLANCE_DBPASS }}
priv=glance.*:ALL
state=present
# neutron
- name: create neutron database
mysql_db: name=neutron
state=present
- name: create neutron local user
mysql_user: name=neutron
password={{ NEUTRON_DBPASS }}
priv=neutron.*:ALL
state=present
- name: create neutron remote user
mysql_user: host=%
name=neutron
password={{ NEUTRON_DBPASS }}
priv=neutron.*:ALL
state=present
- name: create ovs_neutron database
mysql_db: name=ovs_neutron
state=present
- name: grant access to ovs_neutron
mysql_user: name=neutron
password={{ NEUTRON_DBPASS }}
priv=ovs_neutron.*:ALL
state=present
- name: grant remote access to ovs_neutron
mysql_user: host=%
name=neutron
password={{ NEUTRON_DBPASS }}
priv=ovs_neutron.*:ALL
state=present
# nova
- name: create nova database
mysql_db: name=nova
state=present
- name: create nova local user
mysql_user: name=nova
password={{ NOVA_DBPASS }}
priv=neutron.*:ALL
state=present
- name: create nova remote user
mysql_user: host=%
name=nova
password={{ NOVA_DBPASS }}
priv=nova.*:ALL
state=present
# cinder
- name: create cinder database
mysql_db: name=cinder
state=present
- name: create cinder local user
mysql_user: name=cinder
password={{ CINDER_DBPASS }}
priv=cinder.*:ALL
state=present
- name: create cinder remote user
mysql_user: host=%
name=cinder
password={{ CINDER_DBPASS }}
priv=cinder.*:ALL
state=present

View File

@ -0,0 +1,6 @@
---
- name: restart glance-api
service: name=glance-api state=restarted
- name: restart glance-registry
service: name=glance-registry state=restarted

View File

@ -0,0 +1,46 @@
---
- name: install glance packages
apt: name={{ item }} state=latest force=yes
with_items:
- glance
- python-glanceclient
- name: update glance conf
template: src={{ item }} dest=/etc/glance/{{ item }} backup=yes
with_items:
- glance-api.conf
- glance-registry.conf
notify:
- restart glance-registry
- restart glance-api
#- name: manually stop glance-api
# service: name=glance-api state=stopped
#- name: manually stop glance-registry
# service: name=glance-registry state=stopped
- name: remove default sqlite db
shell: rm /var/lib/glance/glance.sqlite || touch glance.sqllite.db.removed
- name: sync glance db
shell: su -s /bin/sh -c "glance-manage db_sync" glance
notify:
- restart glance-registry
- restart glance-api
- meta: flush_handlers
- name: place image upload script
template: src=image_upload.sh dest=/opt/image_upload.sh mode=0744
- name: download cirros image file
get_url: url={{ build_in_image }} dest=/opt/{{ build_in_image_name }}
- name: wait for 9292 port to become available
wait_for: port=9292 delay=5
- name: run image upload
shell: /opt/image_upload.sh && touch image_upload_completed
args:
creates: image_upload_completed

View File

@ -0,0 +1,677 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
# Which backend scheme should Glance use by default is not specified
# in a request to add a new image to Glance? Known schemes are determined
# by the known_stores option below.
# Default: 'file'
# "default_store" option has been moved to [glance_store] section in
# Juno release
# List of which store classes and store class locations are
# currently known to glance at startup.
# Existing but disabled stores:
# glance.store.rbd.Store,
# glance.store.s3.Store,
# glance.store.swift.Store,
# glance.store.sheepdog.Store,
# glance.store.cinder.Store,
# glance.store.gridfs.Store,
# glance.store.vmware_datastore.Store,
#known_stores = glance.store.filesystem.Store,
# glance.store.http.Store
# Maximum image size (in bytes) that may be uploaded through the
# Glance API server. Defaults to 1 TB.
# WARNING: this value should only be increased after careful consideration
# and must be set to a value under 8 EB (9223372036854775808).
#image_size_cap = 1099511627776
# Address to bind the API server
bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 9292
# Log to this file. Make sure you do not set the same log file for both the API
# and registry servers!
#
# If `log_file` is omitted and `use_syslog` is false, then log messages are
# sent to stdout as a fallback.
log_file = /var/log/glance/api.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# API to use for accessing data. Default value points to sqlalchemy
# package, it is also possible to use: glance.db.registry.api
# data_api = glance.db.sqlalchemy.api
# Number of Glance API worker processes to start.
# On machines with more than one CPU increasing this value
# may improve performance (especially if using SSL with
# compression turned on). It is typically recommended to set
# this value to the number of CPUs present on your machine.
workers = 1
# Maximum line size of message headers to be accepted.
# max_header_line may need to be increased when using large tokens
# (typically those generated by the Keystone v3 API with big service
# catalogs)
# max_header_line = 16384
# Role used to identify an authenticated user as administrator
#admin_role = admin
# Allow unauthenticated users to access the API with read-only
# privileges. This only applies when using ContextMiddleware.
#allow_anonymous_access = False
# Allow access to version 1 of glance api
#enable_v1_api = True
# Allow access to version 2 of glance api
#enable_v2_api = True
# Return the URL that references where the data is stored on
# the backend storage system. For example, if using the
# file system store a URL of 'file:///path/to/image' will
# be returned to the user in the 'direct_url' meta-data field.
# The default value is false.
#show_image_direct_url = False
# Send headers containing user and tenant information when making requests to
# the v1 glance registry. This allows the registry to function as if a user is
# authenticated without the need to authenticate a user itself using the
# auth_token middleware.
# The default value is false.
#send_identity_headers = False
# Supported values for the 'container_format' image attribute
#container_formats=ami,ari,aki,bare,ovf,ova
# Supported values for the 'disk_format' image attribute
#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso
# Directory to use for lock files. Default to a temp directory
# (string value). This setting needs to be the same for both
# glance-scrubber and glance-api.
#lock_path=<None>
# Property Protections config file
# This file contains the rules for property protections and the roles/policies
# associated with it.
# If this config value is not specified, by default, property protections
# won't be enforced.
# If a value is specified and the file is not found, then the glance-api
# service will not start.
#property_protection_file =
# Specify whether 'roles' or 'policies' are used in the
# property_protection_file.
# The default value for property_protection_rule_format is 'roles'.
#property_protection_rule_format = roles
# Specifies how long (in hours) a task is supposed to live in the tasks DB
# after succeeding or failing before getting soft-deleted.
# The default value for task_time_to_live is 48 hours.
# task_time_to_live = 48
# This value sets what strategy will be used to determine the image location
# order. Currently two strategies are packaged with Glance 'location_order'
# and 'store_type'.
#location_strategy = location_order
# ================= Syslog Options ============================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
#use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
#syslog_log_facility = LOG_LOCAL0
# ================= SSL Options ===============================
# Certificate file to use when starting API server securely
#cert_file = /path/to/certfile
# Private key file to use when starting API server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
# ================= Security Options ==========================
# AES key for encrypting store 'location' metadata, including
# -- if used -- Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# ============ Registry Options ===============================
# Address to find the registry server
registry_host = 0.0.0.0
# Port the registry server is listening on
registry_port = 9191
# What protocol to use when connecting to the registry server?
# Set to https for secure HTTP communication
registry_client_protocol = http
# The path to the key file to use in SSL connections to the
# registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
#registry_client_key_file = /path/to/key/file
# The path to the cert file to use in SSL connections to the
# registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
#registry_client_cert_file = /path/to/cert/file
# The path to the certifying authority cert file to use in SSL connections
# to the registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
#registry_client_ca_file = /path/to/ca/file
# When using SSL in connections to the registry server, do not require
# validation via a certifying authority. This is the registry's equivalent of
# specifying --insecure on the command line using glanceclient for the API
# Default: False
#registry_client_insecure = False
# The period of time, in seconds, that the API server will wait for a registry
# request to complete. A value of '0' implies no timeout.
# Default: 600
#registry_client_timeout = 600
# Whether to automatically create the database tables.
# Default: False
#db_auto_create = False
# Enable DEBUG log messages from sqlalchemy which prints every database
# query and response.
# Default: False
#sqlalchemy_debug = True
# Pass the user's token through for API requests to the registry.
# Default: True
#use_user_token = True
# If 'use_user_token' is not in effect then admin credentials
# can be specified. Requests to the registry on behalf of
# the API will use these credentials.
# Admin user name
#admin_user = None
# Admin password
#admin_password = None
# Admin tenant name
#admin_tenant_name = None
# Keystone endpoint
#auth_url = None
# Keystone region
#auth_region = None
# Auth strategy
#auth_strategy = keystone
# ============ Notification System Options =====================
# Notifications can be sent when images are create, updated or deleted.
# There are three methods of sending notifications, logging (via the
# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
# message queue), or noop (no notifications sent, the default)
# NOTE: THIS CONFIGURATION OPTION HAS BEEN DEPRECATED IN FAVOR OF `notification_driver`
# notifier_strategy = default
# Driver or drivers to handle sending notifications
# notification_driver = noop
# Default publisher_id for outgoing notifications.
# default_publisher_id = image.localhost
# Configuration options if sending notifications via rabbitmq (these are
# the defaults)
rabbit_host = localhost
rabbit_port = 5672
rabbit_use_ssl = false
rabbit_userid = guest
rabbit_password = guest
rabbit_virtual_host = /
rabbit_notification_exchange = glance
rabbit_notification_topic = notifications
rabbit_durable_queues = False
# Configuration options if sending notifications via Qpid (these are
# the defaults)
qpid_notification_exchange = glance
qpid_notification_topic = notifications
qpid_hostname = localhost
qpid_port = 5672
qpid_username =
qpid_password =
qpid_sasl_mechanisms =
qpid_reconnect_timeout = 0
qpid_reconnect_limit = 0
qpid_reconnect_interval_min = 0
qpid_reconnect_interval_max = 0
qpid_reconnect_interval = 0
qpid_heartbeat = 5
# Set to 'ssl' to enable SSL
qpid_protocol = tcp
qpid_tcp_nodelay = True
# ============ Filesystem Store Options ========================
# Directory that the Filesystem backend store
# writes image data to
# this option has been moved to [glance_store] for Juno release
# filesystem_store_datadir = /var/lib/glance/images/
# A list of directories where image data can be stored.
# This option may be specified multiple times for specifying multiple store
# directories. Either one of filesystem_store_datadirs or
# filesystem_store_datadir option is required. A priority number may be given
# after each directory entry, separated by a ":".
# When adding an image, the highest priority directory will be selected, unless
# there is not enough space available in cases where the image size is already
# known. If no priority is given, it is assumed to be zero and the directory
# will be considered for selection last. If multiple directories have the same
# priority, then the one with the most free space available is selected.
# If same store is specified multiple times then BadStoreConfiguration
# exception will be raised.
#filesystem_store_datadirs = /var/lib/glance/images/:1
# A path to a JSON file that contains metadata describing the storage
# system. When show_multiple_locations is True the information in this
# file will be returned with any location that is contained in this
# store.
#filesystem_store_metadata_file = None
# ============ Swift Store Options =============================
# Version of the authentication service to use
# Valid versions are '2' for keystone and '1' for swauth and rackspace
swift_store_auth_version = 2
# Address where the Swift authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'https://'
# For swauth, use something like '127.0.0.1:8080/v1.0/'
swift_store_auth_address = 127.0.0.1:5000/v2.0/
# User to authenticate against the Swift authentication service
# If you use Swift authentication service, set it to 'account':'user'
# where 'account' is a Swift storage account and 'user'
# is a user in that account
swift_store_user = jdoe:jdoe
# Auth key for the user authenticating against the
# Swift authentication service
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
# Container within the account that the account should use
# for storing images in Swift
swift_store_container = glance
# Do we create the container if it does not exist?
swift_store_create_container_on_put = False
# What size, in MB, should Glance start chunking image files
# and do a large object manifest in Swift? By default, this is
# the maximum object size in Swift, which is 5GB
swift_store_large_object_size = 5120
# When doing a large object manifest, what size, in MB, should
# Glance write chunks to Swift? This amount of data is written
# to a temporary disk buffer during the process of chunking
# the image file, and the default is 200MB
swift_store_large_object_chunk_size = 200
# Whether to use ServiceNET to communicate with the Swift storage servers.
# (If you aren't RACKSPACE, leave this False!)
#
# To use ServiceNET for authentication, prefix hostname of
# `swift_store_auth_address` with 'snet-'.
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
swift_enable_snet = False
# If set to True enables multi-tenant storage mode which causes Glance images
# to be stored in tenant specific Swift accounts.
#swift_store_multi_tenant = False
# A list of swift ACL strings that will be applied as both read and
# write ACLs to the containers created by Glance in multi-tenant
# mode. This grants the specified tenants/users read and write access
# to all newly created image objects. The standard swift ACL string
# formats are allowed, including:
# <tenant_id>:<username>
# <tenant_name>:<username>
# *:<username>
# Multiple ACLs can be combined using a comma separated list, for
# example: swift_store_admin_tenants = service:glance,*:admin
#swift_store_admin_tenants =
# The region of the swift endpoint to be used for single tenant. This setting
# is only necessary if the tenant has multiple swift endpoints.
#swift_store_region =
# If set to False, disables SSL layer compression of https swift requests.
# Setting to 'False' may improve performance for images which are already
# in a compressed format, eg qcow2. If set to True, enables SSL layer
# compression (provided it is supported by the target swift proxy).
#swift_store_ssl_compression = True
# The number of times a Swift download will be retried before the
# request fails
#swift_store_retry_get_count = 0
# ============ S3 Store Options =============================
# Address where the S3 authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'http://'
s3_store_host = 127.0.0.1:8080/v1.0/
# User to authenticate against the S3 authentication service
s3_store_access_key = <20-char AWS access key>
# Auth key for the user authenticating against the
# S3 authentication service
s3_store_secret_key = <40-char AWS secret key>
# Container within the account that the account should use
# for storing images in S3. Note that S3 has a flat namespace,
# so you need a unique bucket name for your glance images. An
# easy way to do this is append your AWS access key to "glance".
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
# your AWS access key if you use it in your bucket name below!
s3_store_bucket = <lowercased 20-char aws access key>glance
# Do we create the bucket if it does not exist?
s3_store_create_bucket_on_put = False
# When sending images to S3, the data will first be written to a
# temporary buffer on disk. By default the platform's temporary directory
# will be used. If required, an alternative directory can be specified here.
#s3_store_object_buffer_dir = /path/to/dir
# When forming a bucket url, boto will either set the bucket name as the
# subdomain or as the first token of the path. Amazon's S3 service will
# accept it as the subdomain, but Swift's S3 middleware requires it be
# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
#s3_store_bucket_url_format = subdomain
# ============ RBD Store Options =============================
# Ceph configuration file path
# If using cephx authentication, this file should
# include a reference to the right keyring
# in a client.<USER> section
#rbd_store_ceph_conf = /etc/ceph/ceph.conf
# RADOS user to authenticate as (only applicable if using cephx)
# If <None>, a default will be chosen based on the client. section
# in rbd_store_ceph_conf
#rbd_store_user = <None>
# RADOS pool in which images are stored
#rbd_store_pool = images
# RADOS images will be chunked into objects of this size (in megabytes).
# For best performance, this should be a power of two
#rbd_store_chunk_size = 8
# ============ Sheepdog Store Options =============================
sheepdog_store_address = localhost
sheepdog_store_port = 7000
# Images will be chunked into objects of this size (in megabytes).
# For best performance, this should be a power of two
sheepdog_store_chunk_size = 64
# ============ Cinder Store Options ===============================
# Info to match when looking for cinder in the service catalog
# Format is : separated values of the form:
# <service_type>:<service_name>:<endpoint_type> (string value)
#cinder_catalog_info = volume:cinder:publicURL
# Override service catalog lookup with template for cinder endpoint
# e.g. http://localhost:8776/v1/%(project_id)s (string value)
#cinder_endpoint_template = <None>
# Region name of this node (string value)
#os_region_name = <None>
# Location of ca certicates file to use for cinder client requests
# (string value)
#cinder_ca_certificates_file = <None>
# Number of cinderclient retries on failed http calls (integer value)
#cinder_http_retries = 3
# Allow to perform insecure SSL requests to cinder (boolean value)
#cinder_api_insecure = False
# ============ VMware Datastore Store Options =====================
# ESX/ESXi or vCenter Server target system.
# The server value can be an IP address or a DNS name
# e.g. 127.0.0.1, 127.0.0.1:443, www.vmware-infra.com
#vmware_server_host = <None>
# Server username (string value)
#vmware_server_username = <None>
# Server password (string value)
#vmware_server_password = <None>
# Inventory path to a datacenter (string value)
# Value optional when vmware_server_ip is an ESX/ESXi host: if specified
# should be `ha-datacenter`.
#vmware_datacenter_path = <None>
# Datastore associated with the datacenter (string value)
#vmware_datastore_name = <None>
# The number of times we retry on failures
# e.g., socket error, etc (integer value)
#vmware_api_retry_count = 10
# The interval used for polling remote tasks
# invoked on VMware ESX/VC server in seconds (integer value)
#vmware_task_poll_interval = 5
# Absolute path of the folder containing the images in the datastore
# (string value)
#vmware_store_image_dir = /openstack_glance
# Allow to perform insecure SSL requests to the target system (boolean value)
#vmware_api_insecure = False
# ============ Delayed Delete Options =============================
# Turn on/off delayed delete
delayed_delete = False
# Delayed delete time in seconds
scrub_time = 43200
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-scrubber.conf
scrubber_datadir = /var/lib/glance/scrubber
# =============== Quota Options ==================================
# The maximum number of image members allowed per image
#image_member_quota = 128
# The maximum number of image properties allowed per image
#image_property_quota = 128
# The maximum number of tags allowed per image
#image_tag_quota = 128
# The maximum number of locations allowed per image
#image_location_quota = 10
# Set a system wide quota for every user. This value is the total number
# of bytes that a user can use across all storage systems. A value of
# 0 means unlimited.
#user_storage_quota = 0
# =============== Image Cache Options =============================
# Base directory that the Image Cache uses
image_cache_dir = /var/lib/glance/image-cache/
# =============== Manager Options =================================
# DEPRECATED. TO BE REMOVED IN THE JUNO RELEASE.
# Whether or not to enforce that all DB tables have charset utf8.
# If your database tables do not have charset utf8 you will
# need to convert before this option is removed. This option is
# only relevant if your database engine is MySQL.
#db_enforce_mysql_charset = True
# =============== Glance Store ====================================
[glance_store]
# Moved from [DEFAULT], for Juno release
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
# =============== Database Options =================================
[database]
# The file name to use with SQLite (string value)
sqlite_db = /var/lib/glance/glance.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous = True
# The backend to use for db (string value)
# Deprecated group/name - [DEFAULT]/db_backend
backend = sqlalchemy
# The SQLAlchemy connection string used to connect to the
# database (string value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection = <None>
connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
# The SQL mode to be used for MySQL sessions. This option,
# including the default, overrides any server-set SQL mode. To
# use whatever SQL mode is set by the server configuration,
# set this to no value. Example: mysql_sql_mode= (string
# value)
#mysql_sql_mode = TRADITIONAL
# Timeout before idle sql connections are reaped (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#idle_timeout = 3600
# Minimum number of SQL connections to keep open in a pool
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size = <None>
# Maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries = 10
# Interval between retries of opening a sql connection
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval = 10
# If set, use this value for max_overflow with sqlalchemy
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow = <None>
# Verbosity of SQL debugging information. 0=None,
# 100=Everything (integer value)
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
# Add python stack traces to SQL as comment strings (boolean
# value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# (integer value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout = <None>
# Enable the experimental use of database reconnect on
# connection lost (boolean value)
#use_db_reconnect = False
# seconds between db connection retries (integer value)
#db_retry_interval = 1
# Whether to increase interval between db connection retries,
# up to db_max_retry_interval (boolean value)
#db_inc_retry_interval = True
# max seconds between db connection retries, if
# db_inc_retry_interval is enabled (integer value)
#db_max_retry_interval = 10
# maximum db connection retries before error is raised.
# (setting -1 implies an infinite retry count) (integer value)
#db_max_retries = 20
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = glance
admin_password = {{ GLANCE_PASS }}
[paste_deploy]
# Name of the paste configuration file that defines the available pipelines
#config_file = glance-api-paste.ini
# Partial name of a pipeline in your paste configuration file with the
# service name removed. For example, if your paste section name is
# [pipeline:glance-api-keystone], you would configure the flavor below
# as 'keystone'.
flavor= keystone
[store_type_location_strategy]
# The scheme list to use to get store preference order. The scheme must be
# registered by one of the stores defined by the 'known_stores' config option.
# This option will be applied when you using 'store_type' option as image
# location strategy defined by the 'location_strategy' config option.
#store_type_preference =

View File

@ -0,0 +1,190 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
# Address to bind the registry server
bind_host = 0.0.0.0
# Port the bind the registry server to
bind_port = 9191
# Log to this file. Make sure you do not set the same log file for both the API
# and registry servers!
#
# If `log_file` is omitted and `use_syslog` is false, then log messages are
# sent to stdout as a fallback.
log_file = /var/log/glance/registry.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# API to use for accessing data. Default value points to sqlalchemy
# package.
#data_api = glance.db.sqlalchemy.api
# Enable Registry API versions individually or simultaneously
#enable_v1_registry = True
#enable_v2_registry = True
# Limit the api to return `param_limit_max` items in a call to a container. If
# a larger `limit` query param is provided, it will be reduced to this value.
api_limit_max = 1000
# If a `limit` query param is not provided in an api request, it will
# default to `limit_param_default`
limit_param_default = 25
# Role used to identify an authenticated user as administrator
#admin_role = admin
# Whether to automatically create the database tables.
# Default: False
#db_auto_create = False
# Enable DEBUG log messages from sqlalchemy which prints every database
# query and response.
# Default: False
#sqlalchemy_debug = True
# ================= Syslog Options ============================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
#use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
#syslog_log_facility = LOG_LOCAL1
# ================= SSL Options ===============================
# Certificate file to use when starting registry server securely
#cert_file = /path/to/certfile
# Private key file to use when starting registry server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
# ================= Database Options ==========================
[database]
# The file name to use with SQLite (string value)
sqlite_db = /var/lib/glance/glance.sqlite
# If True, SQLite uses synchronous mode (boolean value)
#sqlite_synchronous = True
# The backend to use for db (string value)
# Deprecated group/name - [DEFAULT]/db_backend
backend = sqlalchemy
# The SQLAlchemy connection string used to connect to the
# database (string value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
#connection = <None>
connection = mysql://glance:{{ GLANCE_DBPASS }}@{{ db_host }}/glance
# The SQL mode to be used for MySQL sessions. This option,
# including the default, overrides any server-set SQL mode. To
# use whatever SQL mode is set by the server configuration,
# set this to no value. Example: mysql_sql_mode= (string
# value)
#mysql_sql_mode = TRADITIONAL
# Timeout before idle sql connections are reaped (integer
# value)
# Deprecated group/name - [DEFAULT]/sql_idle_timeout
# Deprecated group/name - [DATABASE]/sql_idle_timeout
# Deprecated group/name - [sql]/idle_timeout
#idle_timeout = 3600
# Minimum number of SQL connections to keep open in a pool
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_min_pool_size
# Deprecated group/name - [DATABASE]/sql_min_pool_size
#min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_pool_size
# Deprecated group/name - [DATABASE]/sql_max_pool_size
#max_pool_size = <None>
# Maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_retries
# Deprecated group/name - [DATABASE]/sql_max_retries
#max_retries = 10
# Interval between retries of opening a sql connection
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_retry_interval
# Deprecated group/name - [DATABASE]/reconnect_interval
#retry_interval = 10
# If set, use this value for max_overflow with sqlalchemy
# (integer value)
# Deprecated group/name - [DEFAULT]/sql_max_overflow
# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
#max_overflow = <None>
# Verbosity of SQL debugging information. 0=None,
# 100=Everything (integer value)
# Deprecated group/name - [DEFAULT]/sql_connection_debug
#connection_debug = 0
# Add python stack traces to SQL as comment strings (boolean
# value)
# Deprecated group/name - [DEFAULT]/sql_connection_trace
#connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# (integer value)
# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
#pool_timeout = <None>
# Enable the experimental use of database reconnect on
# connection lost (boolean value)
#use_db_reconnect = False
# seconds between db connection retries (integer value)
#db_retry_interval = 1
# Whether to increase interval between db connection retries,
# up to db_max_retry_interval (boolean value)
#db_inc_retry_interval = True
# max seconds between db connection retries, if
# db_inc_retry_interval is enabled (integer value)
#db_max_retry_interval = 10
# maximum db connection retries before error is raised.
# (setting -1 implies an infinite retry count) (integer value)
#db_max_retries = 20
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = glance
admin_password = {{ GLANCE_PASS }}
[paste_deploy]
# Name of the paste configuration file that defines the available pipelines
#config_file = glance-registry-paste.ini
# Partial name of a pipeline in your paste configuration file with the
# service name removed. For example, if your paste section name is
# [pipeline:glance-registry-keystone], you would configure the flavor below
# as 'keystone'.
flavor= keystone

View File

@ -0,0 +1,2 @@
sleep 10
glance --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 image-create --name="cirros" --disk-format=qcow2 --container-format=bare --is-public=true < /opt/{{ build_in_image_name }} && touch glance.import.completed

View File

@ -0,0 +1,3 @@
---
- name: restart keystone
service: name=keystone state=restarted

View File

@ -0,0 +1,36 @@
---
- name: install keystone packages
apt: name=keystone state=present force=yes
- name: update keystone conf
template: src=keystone.conf dest=/etc/keystone/keystone.conf backup=yes
notify: restart keystone
- name: delete sqlite database
shell: rm /var/lib/keystone/keystone.db || echo sqllite database already removed
- name: manually stop keystone once
service: name=keystone state=stopped
- name: keystone-manage db-sync
shell: su -s /bin/sh -c "keystone-manage db_sync"
- name: cron job to purge expired tokens hourly
shell: (crontab -l -u keystone 2>&1 | grep -q token_flush) || echo '@hourly /usr/bin/keystone-manage token_flush > /var/log/keystone/keystone-tokenflush.log 2>&1' >> /var/spool/cron/crontabs/keystone
- name: manually start keystone
service: name=keystone state=started
- name: place keystone init script under /opt/
template: src=keystone_init dest=/opt/keystone_init mode=0744
- name: run keystone_init
shell: /opt/keystone_init && touch keystone_init_complete || keystone_init_failed
args:
creates: keystone_init_complete
- name: keystone source files
template: src={{ item }} dest=/opt/{{ item }}
with_items:
- admin-openrc.sh
- demo-openrc.sh

View File

@ -0,0 +1,6 @@
# Verify the Identity Service installation
export OS_PASSWORD={{ ADMIN_PASS }}
export OS_TENANT_NAME=admin
export OS_AUTH_URL=http://{{ identity_host }}:35357/v2.0
export OS_USERNAME=ADMIN

View File

@ -0,0 +1,5 @@
export OS_USERNAME=demo
export OS_PASSWORD={{ DEMO_PASS }}
export OS_TENANT_NAME=demo
export OS_AUTH_URL=http://{{ identity_host }}:35357/v2.0

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
# create an administrative user
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 role-create --name=admin
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=admin --pass={{ ADMIN_PASS }} --email=admin@admin.com
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 tenant-create --name=admin --description="Admin Tenant"
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=admin --tenant=admin --role=admin
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=admin --role=_member_ --tenant=admin
# create a normal user
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=demo --pass={{ DEMO_PASS }} --email=DEMO_EMAIL
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 tenant-create --name=demo --description="Demo Tenant"
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=demo --role=_member_ --tenant=demo
# create a service tenant
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 tenant-create --name=service --description="Service Tenant"
# regist keystone
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name=keystone --type=identity --description="OpenStack Identity"
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service_id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ identity / {print $2}') --publicurl=http://{{ identity_host }}:5000/v2.0 --internalurl=http://{{ identity_host }}:5000/v2.0 --adminurl=http://{{ identity_host }}:35357/v2.0
# Create a glance user that the Image Service can use to authenticate with the Identity service
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=glance --pass={{ GLANCE_PASS }} --email=glance@example.com
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=glance --tenant=service --role=admin
#Register the Image Service with the Identity service so that other OpenStack services can locate it
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name=glance --type=image --description="OpenStack Image Service"
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ image / {print $2}') --publicurl=http://{{ image_host }}:9292 --internalurl=http://{{ image_host }}:9292 --adminurl=http://{{ image_host }}:9292
#Create a nova user that Compute uses to authenticate with the Identity Service
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name=nova --pass={{ NOVA_PASS }} --email=nova@example.com
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user=nova --tenant=service --role=admin
# register Compute with the Identity Service so that other OpenStack services can locate it
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name=nova --type=compute --description="OpenStack Compute"
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service-id=$(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ compute / {print $2}') --publicurl=http://{{ identity_host }}:8774/v2/%\(tenant_id\)s --internalurl=http://{{ compute_controller_host }}:8774/v2/%\(tenant_id\)s --adminurl=http://{{ compute_controller_host }}:8774/v2/%\(tenant_id\)s
# register netron user, role and service
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-create --name neutron --pass {{ NEUTRON_PASS }} --email neutron@example.com
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 user-role-add --user neutron --tenant service --role admin
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-create --name neutron --type network --description "OpenStack Networking"
keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 endpoint-create --service-id $(keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 service-list | awk '/ network / {print $2}') --publicurl http://{{ network_server_host }}:9696 --adminurl http://{{ network_server_host }}:9696 --internalurl http://{{ network_server_host }}:9696

View File

@ -0,0 +1,2 @@
---
- include: rabbitmq.yml

View File

@ -0,0 +1,8 @@
---
- name: install rabbitmq-server
apt: name=rabbitmq-server state=present
- name: start and enable rabbitmq-server
service: name=rabbitmq-server
state=restarted
enabled=yes

View File

@ -0,0 +1,13 @@
---
- name: restart neutron-plugin-openvswitch-agent
service: name=neutron-plugin-openvswitch-agent state=restarted
when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: restart neutron-l3-agent
service: name=neutron-l3-agent state=restarted
- name: restart neutron-dhcp-agent
service: name=neutron-dhcp-agent state=restarted
- name: restart neutron-metadata-agent
service: name=neutron-metadata-agent state=restarted

View File

@ -0,0 +1,2 @@
---
neutron_ovs_bridge_mappings: ""

View File

@ -0,0 +1,13 @@
---
- name: restart neutron-plugin-openvswitch-agent
service: name=neutron-plugin-openvswitch-agent state=restarted
when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: restart neutron-l3-agent
service: name=neutron-l3-agent state=restarted
- name: restart neutron-dhcp-agent
service: name=neutron-dhcp-agent state=restarted
- name: restart neutron-metadata-agent
service: name=neutron-metadata-agent state=restarted

View File

@ -0,0 +1,43 @@
---
- name: activate ipv4 forwarding
sysctl: name=net.ipv4.ip_forward value=1 state=present reload=yes
- name: deactivate ipv4 rp filter
sysctl: name=net.ipv4.conf.all.rp_filter value=0 state=present reload=yes
- name: deactivate ipv4 default rp filter
sysctl: name=net.ipv4.conf.default.rp_filter value=0 state=present reload=yes
- name: install compute-related neutron packages
apt: name={{ item }} state=present force=yes
with_items:
- neutron-common
- neutron-plugin-ml2
- openvswitch-datapath-dkms
- openvswitch-switch
- name: install neutron openvswitch agent
apt: name=neutron-plugin-openvswitch-agent state=present force=yes
when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: config neutron
template: src=neutron-network.conf dest=/etc/neutron/neutron.conf backup=yes
notify:
- restart neutron-plugin-openvswitch-agent
- name: config ml2 plugin
template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
notify:
- restart neutron-plugin-openvswitch-agent
- name: add br-int
openvswitch_bridge: bridge=br-int state=present
notify:
- restart neutron-plugin-openvswitch-agent
- restart nova-compute
- include: ../../neutron-network/tasks/odl.yml
when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
- meta: flush_handlers

View File

@ -0,0 +1,90 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
verbose = True
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
resync_interval = 5
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
# dnsmasq_config_file =
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
# dhcp_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -0,0 +1,2 @@
dhcp-option-force=26,1454

View File

@ -0,0 +1,25 @@
interfaces {
restore-original-config-on-shutdown: false
interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
description: "Internal pNodes interface"
disable: false
default-system-config
}
}
protocols {
igmp {
disable: false
interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
disable: false
version: 3
}
}
traceoptions {
flag all {
disable: false
}
}
}
}

View File

@ -0,0 +1,81 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
verbose = True
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -0,0 +1,46 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
debug = True
# The Neutron user information for accessing the Neutron API.
auth_url = http://{{ identity_host }}:5000/v2.0
auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
nova_metadata_ip = {{ compute_controller_host }}
# TCP Port used by Nova metadata server
nova_metadata_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
metadata_proxy_shared_secret = {{ METADATA_SECRET }}
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server
# metadata_workers = 0
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 128
# URL to connect to the cache backend.
# Example of URL using memory caching backend
# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url =

View File

@ -0,0 +1,108 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
flat_networks = external
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
vni_ranges = 1001:4095
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
vxlan_group = 239.1.1.1
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[database]
connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron?charset=utf8
[ovs]
local_ip = {{ internal_ip }}
{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
integration_bridge = br-int
tunnel_bridge = br-tun
tunnel_id_ranges = 1001:4095
tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
{% endif %}
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
vxlan_udp_port = 4789
{% endif %}
l2_population = False
[odl]
{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
network_vlan_ranges = 1001:4095
tunnel_id_ranges = 1001:4095
tun_peer_patch_port = patch-int
int_peer_patch_port = patch-tun
tenant_network_type = vxlan
tunnel_bridge = br-tun
integration_bridge = br-int
controllers = 10.1.0.15:8080:admin:admin
{% endif %}
[ml2_odl]
{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
username = {{ odl_username }}
password = {{ odl_password }}
url = http://{{ odl_controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
{% endif %}

View File

@ -0,0 +1,466 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ DEBUG }}
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
log_dir = /var/log/neutron
# publish_errors = False
# Address to bind the API server to
bind_host = {{ network_server_host }}
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
# Paste configuration file
api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
allow_overlapping_ips = True
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
# Size of RPC thread pool
rpc_thread_pool_size = 240
# Size of RPC connection pool
rpc_conn_pool_size = 100
# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# Port where RabbitMQ server is running/listening
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 8
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
rpc_workers = 8
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
nova_url = http://{{ compute_controller_host }}:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
nova_region_name = RegionOne
# Username for connection to nova in admin context
nova_admin_username = nova
# The uuid of the admin nova tenant
# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = 1000
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = 1000
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite:////var/lib/neutron/neutron.sqlite
#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
# Timeout in seconds before idle sql connections are reaped
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
# Add python stack traces to SQL as comment strings
connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,467 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ VERBOSE }}
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
log_dir = /var/log/neutron
# publish_errors = False
# Address to bind the API server to
bind_host = {{ network_server_host }}
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
# Paste configuration file
api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
allow_overlapping_ips = True
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
# Size of RPC thread pool
rpc_thread_pool_size = 240
# Size of RPC connection pool
rpc_conn_pool_size = 100
# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# Port where RabbitMQ server is running/listening
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 8
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
rpc_workers = 8
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
nova_url = http://{{ compute_controller_host }}:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
nova_region_name = RegionOne
# Username for connection to nova in admin context
nova_admin_username = nova
# The uuid of the admin nova tenant
nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = 1000
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = 1000
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite:////var/lib/neutron/neutron.sqlite
#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
# Timeout in seconds before idle sql connections are reaped
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
# Add python stack traces to SQL as comment strings
connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,4 @@
# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}

View File

@ -0,0 +1,68 @@
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose={{ VERBOSE}}
debug={{ DEBUG }}
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
vif_plugging_is_fatal: false
vif_plugging_timeout: 0
auth_strategy = keystone
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ internal_ip }}
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = {{ internal_ip }}
novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html
novncproxy_host = {{ internal_ip }}
novncproxy_port = 6080
network_api_class = nova.network.neutronv2.api.API
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
[database]
# The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
[glance]
host = {{ image_host }}
[neutron]
url = http://{{ network_server_host }}:9696
auth_strategy = keystone
admin_tenant_name = service
admin_username = neutron
admin_password = {{ NEUTRON_PASS }}
admin_auth_url = http://{{ identity_host }}:35357/v2.0

View File

@ -0,0 +1,24 @@
---
- name: restart nova-api
service: name=nova-api state=restarted
- name: restart nova-cert
service: name=nova-cert state=restarted
- name: restart nova-consoleauth
service: name=nova-consoleauth state=restarted
- name: restart nova-scheduler
service: name=nova-scheduler state=restarted
- name: restart nova-conductor
service: name=nova-conductor state=restarted
- name: restart nova-novncproxy
service: name=nova-novncproxy state=restarted
- name: remove nova-sqlite-db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
- name: restart neutron-server
service: name=neutron-server state=restarted

View File

@ -0,0 +1,46 @@
---
- name: install controller-related neutron packages
apt: name={{ item }} state=present force=yes
with_items:
- neutron-server
- neutron-plugin-ml2
- name: get tenant id to fill neutron.conf
shell: keystone --os-token={{ ADMIN_TOKEN }} --os-endpoint=http://{{ identity_host }}:35357/v2.0 tenant-get service | grep id | awk '{print $4}'
register: NOVA_ADMIN_TENANT_ID
- name: update neutron conf
template: src=neutron.conf dest=/etc/neutron/neutron.conf backup=yes
notify:
- restart neutron-server
- name: update ml2 plugin conf
template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
notify:
- restart neutron-server
- meta: flush_handlers
#- name: manually restart nova-api
# service: name=nova-api state=restarted
#- name: manually restart nova-scheduler
# service: name=nova-scheduler state=restarted
#- name: manually restart nova-conductor
# service: name=nova-conductor state=restarted
#- name: manually restart neutron-server
# service: name=neutron-server state=restarted
- name: place neutron_init.sh under /opt/
template: src=neutron_init.sh dest=/opt/neutron_init.sh mode=0744
- name: init neutron
shell: /opt/neutron_init.sh && touch neutron_init_complete || touch neutron_init_failed
args:
creates: neutron_init_complete
- name: neutron-db-manage upgrade to Juno
shell: neutron-db-manage --config-file=/etc/neutron/neutron.conf --config-file=/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
notify:
- restart neutron-server

View File

@ -0,0 +1,90 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
verbose = True
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
resync_interval = 5
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
# dnsmasq_config_file =
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
# dhcp_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -0,0 +1,2 @@
dhcp-option-force=26,1454

View File

@ -0,0 +1,25 @@
interfaces {
restore-original-config-on-shutdown: false
interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
description: "Internal pNodes interface"
disable: false
default-system-config
}
}
protocols {
igmp {
disable: false
interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
disable: false
version: 3
}
}
traceoptions {
flag all {
disable: false
}
}
}
}

View File

@ -0,0 +1,81 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
verbose = True
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -0,0 +1,46 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
debug = True
# The Neutron user information for accessing the Neutron API.
auth_url = http://{{ identity_host }}:5000/v2.0
auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
nova_metadata_ip = {{ compute_controller_host }}
# TCP Port used by Nova metadata server
nova_metadata_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
metadata_proxy_shared_secret = {{ METADATA_SECRET }}
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server
# metadata_workers = 0
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 128
# URL to connect to the cache backend.
# Example of URL using memory caching backend
# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url =

View File

@ -0,0 +1,108 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
flat_networks = external
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
vni_ranges = 1001:4095
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
vxlan_group = 239.1.1.1
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[database]
connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron?charset=utf8
[ovs]
local_ip = {{ internal_ip }}
{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
integration_bridge = br-int
tunnel_bridge = br-tun
tunnel_id_ranges = 1001:4095
tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
{% endif %}
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
vxlan_udp_port = 4789
{% endif %}
l2_population = False
[odl]
{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
network_vlan_ranges = 1001:4095
tunnel_id_ranges = 1001:4095
tun_peer_patch_port = patch-int
int_peer_patch_port = patch-tun
tenant_network_type = vxlan
tunnel_bridge = br-tun
integration_bridge = br-int
controllers = 10.1.0.15:8080:admin:admin
{% endif %}
[ml2_odl]
{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
username = {{ odl_username }}
password = {{ odl_password }}
url = http://{{ odl_controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
{% endif %}

View File

@ -0,0 +1,466 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ DEBUG }}
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
log_dir = /var/log/neutron
# publish_errors = False
# Address to bind the API server to
bind_host = {{ network_server_host }}
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
# Paste configuration file
api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
allow_overlapping_ips = True
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
# Size of RPC thread pool
rpc_thread_pool_size = 240
# Size of RPC connection pool
rpc_conn_pool_size = 100
# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# Port where RabbitMQ server is running/listening
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 8
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
rpc_workers = 8
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
nova_url = http://{{ compute_controller_host }}:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
nova_region_name = RegionOne
# Username for connection to nova in admin context
nova_admin_username = nova
# The uuid of the admin nova tenant
# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = 1000
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = 1000
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite:////var/lib/neutron/neutron.sqlite
#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
# Timeout in seconds before idle sql connections are reaped
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
# Add python stack traces to SQL as comment strings
connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,467 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ VERBOSE }}
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
log_dir = /var/log/neutron
# publish_errors = False
# Address to bind the API server to
bind_host = {{ network_server_host }}
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
# Paste configuration file
api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
allow_overlapping_ips = True
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
# Size of RPC thread pool
rpc_thread_pool_size = 240
# Size of RPC connection pool
rpc_conn_pool_size = 100
# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# Port where RabbitMQ server is running/listening
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 8
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
rpc_workers = 8
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
nova_url = http://{{ compute_controller_host }}:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
nova_region_name = RegionOne
# Username for connection to nova in admin context
nova_admin_username = nova
# The uuid of the admin nova tenant
nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = 1000
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = 1000
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite:////var/lib/neutron/neutron.sqlite
#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
# Timeout in seconds before idle sql connections are reaped
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
# Add python stack traces to SQL as comment strings
connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,4 @@
# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}

View File

@ -0,0 +1,68 @@
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose={{ VERBOSE}}
debug={{ DEBUG }}
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
vif_plugging_is_fatal: false
vif_plugging_timeout: 0
auth_strategy = keystone
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ internal_ip }}
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = {{ internal_ip }}
novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html
novncproxy_host = {{ internal_ip }}
novncproxy_port = 6080
network_api_class = nova.network.neutronv2.api.API
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
[database]
# The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
[glance]
host = {{ image_host }}
[neutron]
url = http://{{ network_server_host }}:9696
auth_strategy = keystone
admin_tenant_name = service
admin_username = neutron
admin_password = {{ NEUTRON_PASS }}
admin_auth_url = http://{{ identity_host }}:35357/v2.0

View File

@ -0,0 +1,21 @@
---
- name: restart neutron-plugin-openvswitch-agent
service: name=neutron-plugin-openvswitch-agent state=restarted
when: "'opendaylight' not in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: restart neutron-l3-agent
service: name=neutron-l3-agent state=restarted
- name: kill dnsmasq
command: killall dnsmasq
ignore_errors: True
- name: restart neutron-dhcp-agent
service: name=neutron-dhcp-agent state=restarted
- name: restart neutron-metadata-agent
service: name=neutron-metadata-agent state=restarted
- name: restart xorp
service: name=xorp state=restarted sleep=10
ignore_errors: True

View File

@ -0,0 +1,20 @@
---
- name: Install XORP to provide IGMP router functionality
apt: pkg=xorp
- name: configure xorp
template: src=etc/xorp/config.boot dest=/etc/xorp/config.boot
notify:
- restart xorp
- name: set xorp defaults
lineinfile: dest=/etc/default/xorp regexp=^RUN= line=RUN=yes
notify:
- restart xorp
- meta: flush_handlers
- name: start and enable xorp service
service: name=xorp state=started enabled=yes
retries: 2
delay: 10

View File

@ -0,0 +1,92 @@
---
- name: activate ipv4 forwarding
sysctl: name=net.ipv4.ip_forward value=1 state=present reload=yes
- name: deactivate ipv4 rp filter
sysctl: name=net.ipv4.conf.all.rp_filter value=0 state=present reload=yes
- name: deactivate ipv4 default rp filter
sysctl: name=net.ipv4.conf.default.rp_filter value=0 state=present reload=yes
- name: install neutron network related packages
apt: name={{ item }} state=present force=yes
with_items:
- neutron-plugin-ml2
- openvswitch-datapath-dkms
- openvswitch-switch
- neutron-l3-agent
- neutron-dhcp-agent
- name: install neutron openvswitch agent
apt: name=neutron-plugin-openvswitch-agent state=present force=yes
when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: config neutron
template: src=neutron-network.conf dest=/etc/neutron/neutron.conf backup=yes
notify:
- restart neutron-plugin-openvswitch-agent
- restart neutron-l3-agent
- kill dnsmasq
- restart neutron-dhcp-agent
- restart neutron-metadata-agent
- name: config l3 agent
template: src=l3_agent.ini dest=/etc/neutron/l3_agent.ini backup=yes
notify:
- restart neutron-l3-agent
- name: config dhcp agent
template: src=dhcp_agent.ini dest=/etc/neutron/dhcp_agent.ini backup=yes
notify:
- kill dnsmasq
- restart neutron-dhcp-agent
- name: update dnsmasq-neutron.conf
template: src=dnsmasq-neutron.conf dest=/etc/neutron/dnsmasq-neutron.conf
notify:
- kill dnsmasq
- restart neutron-dhcp-agent
- name: config metadata agent
template: src=metadata_agent.ini dest=/etc/neutron/metadata_agent.ini backup=yes
notify:
- restart neutron-metadata-agent
- name: config ml2 plugin
template: src=ml2_conf.ini dest=/etc/neutron/plugins/ml2/ml2_conf.ini backup=yes
notify:
- restart neutron-plugin-openvswitch-agent
- meta: flush_handlers
- name: add br-int
openvswitch_bridge: bridge=br-int state=present
- name: add br-ex
openvswitch_bridge: bridge=br-ex state=present
when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: assign a port to br-ex for physical ext interface
openvswitch_port: bridge=br-ex port={{ INTERFACE_NAME }} state=present
when: "'openvswitch' in {{ NEUTRON_MECHANISM_DRIVERS }}"
- include: igmp-router.yml
when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
- name: assert kernel support for vxlan
command: modinfo -F version vxlan
when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
- name: assert iproute2 suppport for vxlan
command: ip link add type vxlan help
register: iproute_out
failed_when: iproute_out.rc == 255
when: "'vxlan' in {{ NEUTRON_TUNNEL_TYPES }}"
- include: odl.yml
when: "'opendaylight' in {{ NEUTRON_MECHANISM_DRIVERS }}"
- name: restart ovs service
service: name=openvswitch-switch state=restarted
- meta: flush_handlers

View File

@ -0,0 +1,13 @@
---
- name: ovs set manager
command: ovs-vsctl set-manager tcp:{{ odl_controller }}:6640
- name: get ovs uuid
shell: ovs-vsctl get Open_vSwitch . _uuid
register: ovs_uuid
- name: set bridge_mappings
command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:bridge_mappings=physnet1:{{ INTERFACE_NAME }}
- name: set local ip
command: ovs-vsctl set Open_vSwitch {{ ovs_uuid.stdout }} other_config:local_ip={{ internal_ip }}

View File

@ -0,0 +1,90 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
verbose = True
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
resync_interval = 5
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
# dnsmasq_config_file =
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
# dhcp_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -0,0 +1,2 @@
dhcp-option-force=26,1454

View File

@ -0,0 +1,25 @@
interfaces {
restore-original-config-on-shutdown: false
interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
description: "Internal pNodes interface"
disable: false
default-system-config
}
}
protocols {
igmp {
disable: false
interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
disable: false
version: 3
}
}
traceoptions {
flag all {
disable: false
}
}
}
}

View File

@ -0,0 +1,81 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
verbose = True
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -0,0 +1,46 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
debug = True
# The Neutron user information for accessing the Neutron API.
auth_url = http://{{ identity_host }}:5000/v2.0
auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
nova_metadata_ip = {{ compute_controller_host }}
# TCP Port used by Nova metadata server
nova_metadata_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
metadata_proxy_shared_secret = {{ METADATA_SECRET }}
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server
# metadata_workers = 0
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 128
# URL to connect to the cache backend.
# Example of URL using memory caching backend
# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url =

View File

@ -0,0 +1,108 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
flat_networks = external
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
vni_ranges = 1001:4095
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
vxlan_group = 239.1.1.1
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[database]
connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron?charset=utf8
[ovs]
local_ip = {{ internal_ip }}
{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
integration_bridge = br-int
tunnel_bridge = br-tun
tunnel_id_ranges = 1001:4095
tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
{% endif %}
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
vxlan_udp_port = 4789
{% endif %}
l2_population = False
[odl]
{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
network_vlan_ranges = 1001:4095
tunnel_id_ranges = 1001:4095
tun_peer_patch_port = patch-int
int_peer_patch_port = patch-tun
tenant_network_type = vxlan
tunnel_bridge = br-tun
integration_bridge = br-int
controllers = 10.1.0.15:8080:admin:admin
{% endif %}
[ml2_odl]
{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
username = {{ odl_username }}
password = {{ odl_password }}
url = http://{{ odl_controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
{% endif %}

View File

@ -0,0 +1,466 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ DEBUG }}
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
log_dir = /var/log/neutron
# publish_errors = False
# Address to bind the API server to
bind_host = {{ network_server_host }}
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
# Paste configuration file
api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
allow_overlapping_ips = True
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
# Size of RPC thread pool
rpc_thread_pool_size = 240
# Size of RPC connection pool
rpc_conn_pool_size = 100
# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# Port where RabbitMQ server is running/listening
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 8
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
rpc_workers = 8
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
nova_url = http://{{ compute_controller_host }}:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
nova_region_name = RegionOne
# Username for connection to nova in admin context
nova_admin_username = nova
# The uuid of the admin nova tenant
# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = 1000
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = 1000
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite:////var/lib/neutron/neutron.sqlite
#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
# Timeout in seconds before idle sql connections are reaped
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
# Add python stack traces to SQL as comment strings
connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,467 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ VERBOSE }}
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
log_dir = /var/log/neutron
# publish_errors = False
# Address to bind the API server to
bind_host = {{ network_server_host }}
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
# Paste configuration file
api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
allow_overlapping_ips = True
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
# Size of RPC thread pool
rpc_thread_pool_size = 240
# Size of RPC connection pool
rpc_conn_pool_size = 100
# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# Port where RabbitMQ server is running/listening
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 8
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
rpc_workers = 8
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
nova_url = http://{{ compute_controller_host }}:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
nova_region_name = RegionOne
# Username for connection to nova in admin context
nova_admin_username = nova
# The uuid of the admin nova tenant
nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = 1000
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = 1000
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite:////var/lib/neutron/neutron.sqlite
#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
# Timeout in seconds before idle sql connections are reaped
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
# Add python stack traces to SQL as comment strings
connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,4 @@
# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}

View File

@ -0,0 +1,68 @@
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose={{ VERBOSE}}
debug={{ DEBUG }}
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
vif_plugging_is_fatal: false
vif_plugging_timeout: 0
auth_strategy = keystone
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ internal_ip }}
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = {{ internal_ip }}
novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html
novncproxy_host = {{ internal_ip }}
novncproxy_port = 6080
network_api_class = nova.network.neutronv2.api.API
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
[database]
# The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
[glance]
host = {{ image_host }}
[neutron]
url = http://{{ network_server_host }}:9696
auth_strategy = keystone
admin_tenant_name = service
admin_username = neutron
admin_password = {{ NEUTRON_PASS }}
admin_auth_url = http://{{ identity_host }}:35357/v2.0

View File

@ -0,0 +1,3 @@
---
- name: restart nova-compute
service: name=nova-compute state=restarted

View File

@ -0,0 +1,16 @@
---
- name: install nova-compute related packages
apt: name=nova-compute-kvm state=present force=yes
- name: update nova-compute conf
template: src={{ item }} dest=/etc/nova/{{ item }}
with_items:
- nova.conf
- nova-compute.conf
notify:
- restart nova-compute
- meta: flush_handlers
- name: remove nova sqlite db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.removed

View File

@ -0,0 +1,4 @@
[DEFAULT]
compute_driver=libvirt.LibvirtDriver
[libvirt]
virt_type=qemu

View File

@ -0,0 +1,53 @@
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
vif_plugging_is_fatal: false
vif_plugging_timeout: 0
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{RABBIT_PASS}}
my_ip = {{ internal_ip }}
vncserver_listen = {{ internal_ip }}
vncserver_proxyclient_address = {{ compute_controller_host }}
auth_strategy = keystone
network_api_class = nova.network.neutronv2.api.API
neutron_url = http://{{ network_server_host }}:9696
neutron_auth_strategy = keystone
neutron_admin_tenant_name = service
neutron_admin_username = neutron
neutron_admin_password = {{ NEUTRON_PASS }}
neutron_admin_auth_url = http://{{ identity_host }}:35357/v2.0
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
[database]
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ DB_HOST }}/nova
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
[glance]
host = {{ image_host }}

View File

@ -0,0 +1,24 @@
---
- name: restart nova-api
service: name=nova-api state=restarted
- name: restart nova-cert
service: name=nova-cert state=restarted
- name: restart nova-consoleauth
service: name=nova-consoleauth state=restarted
- name: restart nova-scheduler
service: name=nova-scheduler state=restarted
- name: restart nova-conductor
service: name=nova-conductor state=restarted
- name: restart nova-novncproxy
service: name=nova-novncproxy state=restarted
- name: remove nova-sqlite-db
shell: rm /var/lib/nova/nova.sqlite || touch nova.sqlite.db.removed
- name: restart neutron-server
service: name=neutron-server state=restarted

View File

@ -0,0 +1,37 @@
---
- name: install nova related packages
apt: name={{ item }} state=present force=yes
with_items:
- nova-api
- nova-cert
- nova-conductor
- nova-consoleauth
- nova-novncproxy
- nova-scheduler
- python-novaclient
- python-oslo.rootwrap
- name: update nova conf
template: src=nova.conf
dest=/etc/nova/nova.conf
backup=yes
notify:
- restart nova-api
- restart nova-cert
- restart nova-consoleauth
- restart nova-scheduler
- restart nova-conductor
- restart nova-novncproxy
- remove nova-sqlite-db
- name: nova db sync
command: su -s /bin/sh -c "nova-manage db sync" nova
notify:
- restart nova-api
- restart nova-cert
- restart nova-consoleauth
- restart nova-scheduler
- restart nova-conductor
- restart nova-novncproxy
- meta: flush_handlers

View File

@ -0,0 +1,90 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
verbose = True
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
resync_interval = 5
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Name of Open vSwitch bridge to use
# ovs_integration_bridge = br-int
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet does not contain any router port. The guest
# instance must be configured to request host routes via DHCP (Option 121).
enable_isolated_metadata = False
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
enable_metadata_network = False
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
# dnsmasq_config_file =
dnsmasq_config_file = /etc/neutron/dnsmasq-neutron.conf
# Comma-separated list of DNS servers which will be used by dnsmasq
# as forwarders.
# dnsmasq_dns_servers =
# Limit number of leases to prevent a denial-of-service.
dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# dhcp_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the dhcp agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a dhcp server is disabled.
# dhcp_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -0,0 +1,2 @@
dhcp-option-force=26,1454

View File

@ -0,0 +1,25 @@
interfaces {
restore-original-config-on-shutdown: false
interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
description: "Internal pNodes interface"
disable: false
default-system-config
}
}
protocols {
igmp {
disable: false
interface {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
vif {{ hostvars[inventory_hostname][neutron_vxlan_interface|default(internal_interface)]['device'] }} {
disable: false
version: 3
}
}
traceoptions {
flag all {
disable: false
}
}
}
}

View File

@ -0,0 +1,81 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
verbose = True
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10

View File

@ -0,0 +1,46 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
debug = True
# The Neutron user information for accessing the Neutron API.
auth_url = http://{{ identity_host }}:5000/v2.0
auth_region = RegionOne
# Turn off verification of the certificate for ssl
# auth_insecure = False
# Certificate Authority public key (CA cert) file for ssl
# auth_ca_cert =
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
nova_metadata_ip = {{ compute_controller_host }}
# TCP Port used by Nova metadata server
nova_metadata_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
metadata_proxy_shared_secret = {{ METADATA_SECRET }}
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# Number of separate worker processes for metadata server
# metadata_workers = 0
# Number of backlog requests to configure the metadata server socket with
# metadata_backlog = 128
# URL to connect to the cache backend.
# Example of URL using memory caching backend
# with ttl set to 5 seconds: cache_url = memory://?default_ttl=5
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid for.
# No cache is used in case no value is passed.
# cache_url =

View File

@ -0,0 +1,108 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
type_drivers = {{ NEUTRON_TYPE_DRIVERS |join(",") }}
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
tenant_network_types = {{ NEUTRON_TENANT_NETWORK_TYPES |join(",") }}
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
mechanism_drivers = {{ NEUTRON_MECHANISM_DRIVERS |join(",") }}
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
flat_networks = external
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
vni_ranges = 1001:4095
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
vxlan_group = 239.1.1.1
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[database]
connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/ovs_neutron?charset=utf8
[ovs]
local_ip = {{ internal_ip }}
{% if 'openvswitch' in NEUTRON_MECHANISM_DRIVERS %}
integration_bridge = br-int
tunnel_bridge = br-tun
tunnel_id_ranges = 1001:4095
tunnel_type = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
bridge_mappings = {{ neutron_ovs_bridge_mappings | default("external:br-ex") }}
{% endif %}
[agent]
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
tunnel_types = {{ NEUTRON_TUNNEL_TYPES |join(",") }}
{% if 'vxlan' in NEUTRON_TUNNEL_TYPES %}
vxlan_udp_port = 4789
{% endif %}
l2_population = False
[odl]
{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
network_vlan_ranges = 1001:4095
tunnel_id_ranges = 1001:4095
tun_peer_patch_port = patch-int
int_peer_patch_port = patch-tun
tenant_network_type = vxlan
tunnel_bridge = br-tun
integration_bridge = br-int
controllers = 10.1.0.15:8080:admin:admin
{% endif %}
[ml2_odl]
{% if 'opendaylight' in NEUTRON_MECHANISM_DRIVERS %}
username = {{ odl_username }}
password = {{ odl_password }}
url = http://{{ odl_controller }}:{{ odl_api_port }}/controller/nb/v2/neutron
{% endif %}

View File

@ -0,0 +1,466 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ DEBUG }}
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
log_dir = /var/log/neutron
# publish_errors = False
# Address to bind the API server to
bind_host = {{ network_server_host }}
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
# Paste configuration file
api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
allow_overlapping_ips = True
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
# Size of RPC thread pool
rpc_thread_pool_size = 240
# Size of RPC connection pool
rpc_conn_pool_size = 100
# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# Port where RabbitMQ server is running/listening
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 8
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
rpc_workers = 8
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
nova_url = http://{{ compute_controller_host }}:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
nova_region_name = RegionOne
# Username for connection to nova in admin context
nova_admin_username = nova
# The uuid of the admin nova tenant
# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = 1000
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = 1000
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite:////var/lib/neutron/neutron.sqlite
#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
# Timeout in seconds before idle sql connections are reaped
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
# Add python stack traces to SQL as comment strings
connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,467 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
verbose = {{ VERBOSE }}
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
debug = {{ VERBOSE }}
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
log_dir = /var/log/neutron
# publish_errors = False
# Address to bind the API server to
bind_host = {{ network_server_host }}
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
service_plugins = router
# Paste configuration file
api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
allow_overlapping_ips = True
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
# Size of RPC thread pool
rpc_thread_pool_size = 240
# Size of RPC connection pool
rpc_conn_pool_size = 100
# Seconds to wait for a response from call or multicall
rpc_response_timeout = 300
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
rpc_cast_timeout = 300
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# Port where RabbitMQ server is running/listening
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 8
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
rpc_workers = 8
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
nova_url = http://{{ compute_controller_host }}:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
nova_region_name = RegionOne
# Username for connection to nova in admin context
nova_admin_username = nova
# The uuid of the admin nova tenant
nova_admin_tenant_id = {{ NOVA_ADMIN_TENANT_ID.stdout_lines[0] }}
# Password for connection to nova in admin context.
nova_admin_password = {{ NOVA_PASS }}
# Authorization URL for connection to nova in admin context.
nova_admin_auth_url = http://{{ identity_host }}:35357/v2.0
# Number of seconds between sending events to nova if there are any events to send
send_events_interval = 2
# ======== end of neutron nova interactions ==========
[quotas]
# Default driver to use for quota checks
quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
quota_network = 100
# Number of subnets allowed per tenant. A negative value means unlimited.
quota_subnet = 100
# Number of ports allowed per tenant. A negative value means unlimited.
quota_port = 8000
# Number of security groups allowed per tenant. A negative value means
# unlimited.
quota_security_group = 1000
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
quota_security_group_rule = 1000
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf"
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/v2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = neutron
admin_password = {{ NEUTRON_PASS }}
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
# connection = sqlite:////var/lib/neutron/neutron.sqlite
#connection = mysql://neutron:{{ NEUTRON_DBPASS }}@{{ db_host }}/neutron
# The SQLAlchemy connection string used to connect to the slave database
slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
max_pool_size = 100
# Timeout in seconds before idle sql connections are reaped
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
max_overflow = 100
# Verbosity of SQL debugging information. 0=None, 100=Everything
connection_debug = 0
# Add python stack traces to SQL as comment strings
connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,4 @@
# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 net-create ext-net --shared --router:external=True
# neutron --os-username=admin --os-password={{ ADMIN_PASS }} --os-tenant-name=admin --os-auth-url=http://{{ identity_host }}:35357/v2.0 subnet-create ext-net --name ext-subnet --allocation-pool start={{ FLOATING_IP_START }},end={{ FLOATING_IP_END}} --disable-dhcp --gateway {{EXTERNAL_NETWORK_GATEWAY}} {{EXTERNAL_NETWORK_CIDR}}

View File

@ -0,0 +1,68 @@
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose={{ VERBOSE}}
debug={{ DEBUG }}
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
vif_plugging_is_fatal: false
vif_plugging_timeout: 0
auth_strategy = keystone
rpc_backend = rabbit
rabbit_host = {{ rabbit_host }}
rabbit_password = {{ RABBIT_PASS }}
my_ip = {{ internal_ip }}
vnc_enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = {{ internal_ip }}
novncproxy_base_url = http://{{ compute_controller_host }}:6080/vnc_auto.html
novncproxy_host = {{ internal_ip }}
novncproxy_port = 6080
network_api_class = nova.network.neutronv2.api.API
linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver = nova.virt.firewall.NoopFirewallDriver
security_group_api = neutron
instance_usage_audit = True
instance_usage_audit_period = hour
notify_on_state_change = vm_and_task_state
notification_driver = nova.openstack.common.notifier.rpc_notifier
notification_driver = ceilometer.compute.nova_notifier
[database]
# The SQLAlchemy connection string used to connect to the database
connection = mysql://nova:{{ NOVA_DBPASS }}@{{ db_host }}/nova
[keystone_authtoken]
auth_uri = http://{{ identity_host }}:5000/2.0
identity_uri = http://{{ identity_host }}:35357
admin_tenant_name = service
admin_user = nova
admin_password = {{ NOVA_PASS }}
[glance]
host = {{ image_host }}
[neutron]
url = http://{{ network_server_host }}:9696
auth_strategy = keystone
admin_tenant_name = service
admin_username = neutron
admin_password = {{ NEUTRON_PASS }}
admin_auth_url = http://{{ identity_host }}:35357/v2.0

View File

@ -0,0 +1,32 @@
---
- hosts: controller
sudo: True
roles:
- common
- database
- mq
- keystone
- nova-controller
- neutron-controller
- dashboard
- cinder-controller
- glance
- hosts: network
sudo: True
roles:
- common
- neutron-network
- hosts: storage
sudo: True
roles:
- common
- cinder-volume
- hosts: compute
sudo: True
roles:
- common
- nova-compute
- neutron-compute

View File

@ -0,0 +1,7 @@
---
- hosts: all
remote_user: vagrant
sudo: True
roles:
- common
- cinder-volume

View File

@ -0,0 +1 @@

View File

@ -19,6 +19,11 @@
#set $proxy_url = $proxy
#end if
#if $getVar('compass_server', '') != ""
#set srv = $getVar('compass_server','')
#else
#set srv = $getVar('server','')
#end if
cat << EOF > /etc/chef/chef_client_run.sh
#!/bin/bash
@ -74,9 +79,7 @@ local3.info @$compass_server:514
local3.info @server:514
#end if
EOL
if [ -f "/var/lib/rsyslog/chef_\\${node}_log" ]; then
rm -rf /var/lib/rsyslog/chef_\\$node_log
fi
rm -rf /var/lib/rsyslog/chef_\\$node_log
service rsyslog restart
fi
if [ -f "/etc/chef/\\$node.done" ]; then
@ -90,6 +93,7 @@ EOL
else
echo "chef-client --node-name \\$node run success" >> /var/log/chef.log 2>&1
touch /etc/chef/\\$node.done
wget -O /tmp/package_state.\\$node --post-data='{"ready": true}' --header=Content-Type:application/json "http://$srv/api/clusterhosts/\\${node}/state_internal"
fi
done
if [ \\$all_nodes_success -eq 0 ]; then

View File

@ -14,8 +14,7 @@ cat << EOF > /etc/init.d/anamon.init
# Provides: anamon.init
# Default-Start: 3 5
# Default-Stop: 0 1 2 4 6
# Required-Start:
# Should-Start: $network
# Required-Start: $network
# Short-Description: Starts the cobbler anamon boot notification program
# Description: anamon runs the first time a machine is booted after
# installation.
@ -47,3 +46,44 @@ test -d /selinux && restorecon /etc/init.d/anamon.init /usr/local/sbin/anamon
chkconfig anamon.init on
#end if
## place start-up script for updating os state
#if $getVar('compass_server', '') != ""
#set srv = $getVar('compass_server','')
#else
#set srv = $getVar('server','')
#end if
cat << EOF > /etc/init.d/set_state
#raw
#!/bin/bash
## BEGIN INIT INFO
# chkconfig: 2345 99 15
# Provides: set_state
# Default-Start: 3 5
# Default-Stop: 0 1 2 4 6
# Required-Start: $network $sshd
# Short-Description: Notifies the os installation is finished
# Description: set_state runs the first time a machine is booted after
# installation.
## END INIT INFO
#
# set_state: Notify compass server the os installation is finished
#
# chkconfig: 35 99 99
#
# description: set_state runs the first time a machine is booted after
# installation.
#
#end raw
wget -O /tmp/os_state --post-data='{"ready": true}' --header=Content-Type:application/json "http://$srv/api/hosts/${hostname}/state_internal"
chkconfig set_state off
mv /etc/init.d/set_state /tmp/set_state
EOF
## adjust permissions
chmod 755 /etc/init.d/set_state
test -d /selinux && restorecon /etc/init.d/set_state
## enable the script
chkconfig set_state on

Some files were not shown because too many files have changed in this diff Show More