Merge pull request #1 from jon-proulx/master

MIT CSAIL Configs
This commit is contained in:
Stephen Jahl 2015-03-13 16:50:51 -04:00
commit 12dcf62bdc
111 changed files with 10555 additions and 0 deletions

92
MIT_CSAIL/README.md Normal file
View File

@ -0,0 +1,92 @@
# MIT CSAIL OpenStack Configurations
Our cloud is running a fairly simple non-HA configuration over approx
75 hypervisor nodes.
This is primarily a private research cloud for
<http://www.csail.mit.edu>, though it does hold web and other
'production' services.
We initially installed Essex on Ubuntu 12.04 and have progressively
updated. As of this writing all nodes are Ubuntu 14.04 running Juno.
There is a single controller node which also serves as Network node,
MySQL DB server and Cinder volume server (actual volumes are provided
by a mix of EqualLogic SAN and Ceph RBD drivers).
# Caveat Hackor
This is *as running* config that have evolved over six OpenStack
releases. It is not claimed to be clean or necessarily 100% best
practice. In fact it is nearly guaranteed that there are options in
the config files that are deprecated and possibly removed in current
release or otherwise irrelevant to the actual functioning of the
system.
Read at your own risk.
# Directory Layout
There's a first level directory for each node type. Currently just
controller and compute, but we're hoping to break services out and
move to an HA controller setup probably in the Kilo time frame.
Below that directory structure mimics layout from the root directory
of that type node for example nova.conf on the compute node will be
`compute/etc/nova/nova.conf`
# Notes on OpenStack Projects Used
## Keystone
Uses UUID tokens with MySQL identity backend and memcached token
backend.
This is run through Apache mod_wsgi
Primary access via https on 5001 and 35358 though still listening via
http on standard ports this is still buried in some internal URLs.
## Glance
APIs run via eventlet server
Some historical images use file backend, currently defaults to
Ceph RBD
## Nova
Libvirt KVM using Ceph RBD for ephemeral storage.
Live migration implemented using RSA key based ssh.
## Cinder
Single controller fronting both EqualLogic SAN storage and Ceph RBD
storage.
## Neutron
ML2/OVS with shared VLAN based provider network(s) using public ipv4
addressing. Projects may create private GRE based overlay networks.
Shared provider net has jumbo frame 9000 MTU inside instances.
No L3 agent -- routing provided by core, non-openstack, infrastructure
No Floating IP -- fixed IPs are public & we provide the ability to
assign specific IPs at boot, IPAM and DNS are handled with legacy
tools outside OpenStack for more on this see our user facing
documentation <http://tig.csail.mit.edu/wiki/TIG/OpenStack#Network>
## Horizon
Mostly stock with a small extension to allow specifying fixed IP on
system start up. This is a bit of a hack and only for when launching a
single instance with a single vNIC, but it does cover our needs.
## Heat
Added to our world during Icehouse time frame, it's there, it mostly
works but is not yet much used.

View File

@ -0,0 +1,30 @@
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = request_id catch_errors extensions neutronapiapp_v2_0
keystone = request_id catch_errors authtoken keystonecontext extensions neutronapiapp_v2_0
[filter:request_id]
paste.filter_factory = neutron.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:catch_errors]
paste.filter_factory = neutron.openstack.common.middleware.catch_errors:CatchErrorsMiddleware.factory
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory

View File

@ -0,0 +1,3 @@
[fwaas]
#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
#enabled = True

View File

@ -0,0 +1,102 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# When external_network_bridge is set, each L3 agent can be associated
# with no more than one external network. This value should be set to the UUID
# of that external network. To allow L3 agent support multiple external
# networks, both the external_network_bridge and gateway_external_network_id
# must be left empty.
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
# handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge. when this parameter is set, each L3 agent
# can be associated with no more than one external network.
# external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
# metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
# send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
# periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
# periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
# router_delete_namespaces, which is false by default, can be set to True if
# namespaces can be deleted cleanly on the host running the L3 agent.
# Do not enable this until you understand the problem with the Linux iproute
# utility mentioned in https://bugs.launchpad.net/neutron/+bug/1052535 and
# you are sure that your version of iproute does not suffer from the problem.
# If True, namespaces will be deleted when a router is destroyed.
# router_delete_namespaces = False
# Timeout for ovs-vsctl commands.
# If the timeout expires, ovs commands will fail with ALARMCLOCK error.
# ovs_vsctl_timeout = 10
# The working mode for the agent. Allowed values are:
# - legacy: this preserves the existing behavior where the L3 agent is
# deployed on a centralized networking node to provide L3 services
# like DNAT, and SNAT. Use this mode if you do not want to adopt DVR.
# - dvr: this mode enables DVR functionality, and must be used for an L3
# agent that runs on a compute host.
# - dvr_snat: this enables centralized SNAT support in conjunction with
# DVR. This mode must be used for an L3 agent running on a centralized
# node (or in single-host deployments, e.g. devstack).
# agent_mode = legacy
# Location to store keepalived and all HA configurations
# ha_confs_path = $state_path/ha_confs
# VRRP authentication type AH/PASS
# ha_vrrp_auth_type = PASS
# VRRP authentication password
# ha_vrrp_auth_password =
# The advertisement interval in seconds
# ha_vrrp_advert_int = 2

View File

@ -0,0 +1,494 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
# verbose = False
verbose = True
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
debug = True
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
# Paste configuration file
# api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
mac_generation_retries = 16
# DHCP Lease duration (in seconds)
# dhcp_lease_duration = 86400
dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = neutron.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)
# kombu_ssl_ca_certs =
# IP address of the RabbitMQ installation
# rabbit_host = localhost
rabbit_host = <CONTROLLER>.csail.mit.edu
# Password of the RabbitMQ server
# rabbit_password = guest
rabbit_password = <REDACTED>
# Port where RabbitMQ server is running/listening
# rabbit_port = 5672
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
rabbit_hosts = <CONTROLLER>.csail.mit.edu:5672
# User ID used for RabbitMQ connections
# rabbit_userid = guest
rabbit_userid = REDACTED
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all). You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
rabbit_ha_queues = False
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are created, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver.
notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
# default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
# notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
# api_workers = 0
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
log_dir=/var/log/neutron
rabbit_use_ssl=False
kombu_reconnect_delay=1.0
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
# default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
# quota_network = 10
# Number of subnets allowed per tenant. A negative value means unlimited.
# quota_subnet = 10
# Number of ports allowed per tenant. A negative value means unlimited.
# quota_port = 50
# Number of security groups allowed per tenant. A negative value means
# unlimited.
# quota_security_group = 10
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
# quota_security_group_rule = 100
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitors = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = sudo neutron-rootwrap /etc/neutron/rootwrap.conf
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
signing_dir = $state_path/keystone-signing
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
connection = sqlite:////var/lib/neutron/neutron.sqlite
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default

View File

@ -0,0 +1,637 @@
[DEFAULT]
# Print more verbose output (set logging level to INFO instead of default WARNING level).
# verbose = False
# =========Start Global Config Option for Distributed L3 Router===============
# Setting the "router_distributed" flag to "True" will default to the creation
# of distributed tenant routers. The admin can override this flag by specifying
# the type of the router on the create request (admin-only attribute). Default
# value is "False" to support legacy mode (centralized) routers.
#
# router_distributed = False
#
# ===========End Global Config Option for Distributed L3 Router===============
# Print debugging output (set logging level to DEBUG instead of default WARNING level).
# debug = False
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server to
# bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# (StrOpt) Neutron core plugin entrypoint to be loaded from the
# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the
# plugins included in the neutron source distribution. For compatibility with
# previous versions, the class name of a plugin can be specified instead of its
# entrypoint name.
#
core_plugin = ml2
# Example: core_plugin = ml2
# (ListOpt) List of service plugin entrypoints to be loaded from the
# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of
# the plugins included in the neutron source distribution. For compatibility
# with previous versions, the class name of a plugin can be specified instead
# of its entrypoint name.
#
# service_plugins =
# Example: service_plugins = router,firewall,lbaas,vpnaas,metering
# Paste configuration file
# api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also be used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# DVR Base MAC address. The first 3 octets will remain unchanged. If the
# 4th octet is not 00, it will also be used. The others will be randomly
# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to
# avoid mixing them up with MAC's allocated for tenant ports.
# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00
# The default is 3 octet
# dvr_base_mac = fa:16:3f:00:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds). Use -1 to
# tell dnsmasq to use infinite lease times.
# dhcp_lease_duration = 86400
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet. For IPv6, validate only if
# gateway is not a link local address. Deprecated, to be removed during the
# K release, at which point the check will be mandatory.
# force_gateway_on_subnet = True
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# be greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# Maximum number of routes per router
# max_routes = 30
# =========== items for agent management extension =============
# Seconds to regard the agent as down; should be at least twice
# report_interval, to be sure the agent is down for good
# agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Allow automatic rescheduling of routers from dead L3 agents with
# admin_state_up set to True to alive agents.
# allow_automatic_l3agent_failover = False
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== items for l3 extension ==============
# Enable high availability for virtual routers.
# l3_ha = False
#
# Maximum number of l3 agents which a HA router will be scheduled on. If it
# is set to 0 the router will be scheduled on every agent.
# max_l3_agents_per_router = 3
#
# Minimum number of l3 agents which a HA router will be scheduled on. The
# default value is 2.
# min_l3_agents_per_router = 2
#
# CIDR of the administrative network if HA mode is enabled
# l3_ha_net_cidr = 169.254.192.0/18
# =========== end of items for l3 extension =======
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
# api_workers = 0
# Number of separate RPC worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as RPC workers. The parent process manages them.
# This feature is experimental until issues are addressed and testing has been
# enabled for various plugins for compatibility.
# rpc_workers = 0
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Max header line to accommodate large tokens
# max_header_line = 16384
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
# ======== neutron nova interactions ==========
# Send notification to nova when port status is active.
# notify_nova_on_port_status_changes = True
# Send notifications to nova when port data (fixed_ips/floatingips) change
# so nova can update it's cache.
# notify_nova_on_port_data_changes = True
# URL for connection to nova (Only supports one nova region currently).
# nova_url = http://127.0.0.1:8774/v2
# Name of nova region to use. Useful if keystone manages more than one region
# nova_region_name =
# Username for connection to nova in admin context
# nova_admin_username =
# The uuid of the admin nova tenant
# nova_admin_tenant_id =
# Password for connection to nova in admin context.
# nova_admin_password =
# Authorization URL for connection to nova in admin context.
# nova_admin_auth_url =
# CA file for novaclient to verify server certificates
# nova_ca_certificates_file =
# Boolean to control ignoring SSL errors on the nova url
# nova_api_insecure = False
# Number of seconds between sending events to nova if there are any events to send
# send_events_interval = 2
# ======== end of neutron nova interactions ==========
#
# Options defined in oslo.messaging
#
# Use durable queues in amqp. (boolean value)
# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
#amqp_durable_queues=false
# Auto-delete queues in amqp. (boolean value)
#amqp_auto_delete=false
# Size of RPC connection pool. (integer value)
#rpc_conn_pool_size=30
# Qpid broker hostname. (string value)
#qpid_hostname=localhost
# Qpid broker port. (integer value)
#qpid_port=5672
# Qpid HA cluster host:port pairs. (list value)
#qpid_hosts=$qpid_hostname:$qpid_port
# Username for Qpid connection. (string value)
#qpid_username=
# Password for Qpid connection. (string value)
#qpid_password=
# Space separated list of SASL mechanisms to use for auth.
# (string value)
#qpid_sasl_mechanisms=
# Seconds between connection keepalive heartbeats. (integer
# value)
#qpid_heartbeat=60
# Transport to use, either 'tcp' or 'ssl'. (string value)
#qpid_protocol=tcp
# Whether to disable the Nagle algorithm. (boolean value)
#qpid_tcp_nodelay=true
# The qpid topology version to use. Version 1 is what was
# originally used by impl_qpid. Version 2 includes some
# backwards-incompatible changes that allow broker federation
# to work. Users should update to version 2 when they are
# able to take everything down, as it requires a clean break.
# (integer value)
#qpid_topology_version=1
# SSL version to use (valid only if SSL enabled). valid values
# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some
# distributions. (string value)
#kombu_ssl_version=
# SSL key file (valid only if SSL enabled). (string value)
#kombu_ssl_keyfile=
# SSL cert file (valid only if SSL enabled). (string value)
#kombu_ssl_certfile=
# SSL certification authority file (valid only if SSL
# enabled). (string value)
#kombu_ssl_ca_certs=
# How long to wait before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
#kombu_reconnect_delay=1.0
# The RabbitMQ broker address where a single node is used.
# (string value)
#rabbit_host=localhost
# The RabbitMQ broker port where a single node is used.
# (integer value)
#rabbit_port=5672
# RabbitMQ HA cluster host:port pairs. (list value)
#rabbit_hosts=$rabbit_host:$rabbit_port
# Connect over SSL for RabbitMQ. (boolean value)
#rabbit_use_ssl=false
# The RabbitMQ userid. (string value)
#rabbit_userid=guest
# The RabbitMQ password. (string value)
#rabbit_password=guest
# the RabbitMQ login method (string value)
#rabbit_login_method=AMQPLAIN
# The RabbitMQ virtual host. (string value)
#rabbit_virtual_host=/
# How frequently to retry connecting with RabbitMQ. (integer
# value)
#rabbit_retry_interval=1
# How long to backoff for between retries when connecting to
# RabbitMQ. (integer value)
#rabbit_retry_backoff=2
# Maximum number of RabbitMQ connection retries. Default is 0
# (infinite retry count). (integer value)
#rabbit_max_retries=0
# Use HA queues in RabbitMQ (x-ha-policy: all). If you change
# this option, you must wipe the RabbitMQ database. (boolean
# value)
#rabbit_ha_queues=false
# If passed, use a fake RabbitMQ provider. (boolean value)
#fake_rabbit=false
# ZeroMQ bind address. Should be a wildcard (*), an ethernet
# interface, or IP. The "host" option should point or resolve
# to this address. (string value)
#rpc_zmq_bind_address=*
# MatchMaker driver. (string value)
#rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost
# ZeroMQ receiver listening port. (integer value)
#rpc_zmq_port=9501
# Number of ZeroMQ contexts, defaults to 1. (integer value)
#rpc_zmq_contexts=1
# Maximum number of ingress messages to locally buffer per
# topic. Default is unlimited. (integer value)
#rpc_zmq_topic_backlog=<None>
# Directory for holding IPC sockets. (string value)
#rpc_zmq_ipc_dir=/var/run/openstack
# Name of this node. Must be a valid hostname, FQDN, or IP
# address. Must match "host" option, if running Nova. (string
# value)
#rpc_zmq_host=oslo
# Seconds to wait before a cast expires (TTL). Only supported
# by impl_zmq. (integer value)
#rpc_cast_timeout=30
# Heartbeat frequency. (integer value)
#matchmaker_heartbeat_freq=300
# Heartbeat time-to-live. (integer value)
#matchmaker_heartbeat_ttl=600
# Size of RPC greenthread pool. (integer value)
#rpc_thread_pool_size=64
# Driver or drivers to handle sending notifications. (multi
# valued)
#notification_driver=
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
#notification_topics=notifications
# Seconds to wait for a response from a call. (integer value)
#rpc_response_timeout=60
# A URL representing the messaging driver to use and its full
# configuration. If not set, we fall back to the rpc_backend
# option and driver specific configuration. (string value)
#transport_url=<None>
# The messaging driver to use, defaults to rabbit. Other
# drivers include qpid and zmq. (string value)
#rpc_backend=rabbit
# The default exchange under which topics are scoped. May be
# overridden by an exchange name specified in the
# transport_url option. (string value)
#control_exchange=openstack
[matchmaker_redis]
#
# Options defined in oslo.messaging
#
# Host to locate redis. (string value)
#host=127.0.0.1
# Use this port to connect to redis host. (integer value)
#port=6379
# Password for Redis server (optional). (string value)
#password=<None>
[matchmaker_ring]
#
# Options defined in oslo.messaging
#
# Matchmaker ring file (JSON). (string value)
# Deprecated group/name - [DEFAULT]/matchmaker_ringfile
#ringfile=/etc/oslo/matchmaker_ring.json
[quotas]
# Default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
# Resource name(s) that are supported in quota features
# quota_items = network,subnet,port
# Default number of resource allowed per tenant. A negative value means
# unlimited.
# default_quota = -1
# Number of networks allowed per tenant. A negative value means unlimited.
# quota_network = 10
# Number of subnets allowed per tenant. A negative value means unlimited.
# quota_subnet = 10
# Number of ports allowed per tenant. A negative value means unlimited.
# quota_port = 50
# Number of security groups allowed per tenant. A negative value means
# unlimited.
# quota_security_group = 10
# Number of security group rules allowed per tenant. A negative value means
# unlimited.
# quota_security_group_rule = 100
# Number of vips allowed per tenant. A negative value means unlimited.
# quota_vip = 10
# Number of pools allowed per tenant. A negative value means unlimited.
# quota_pool = 10
# Number of pool members allowed per tenant. A negative value means unlimited.
# The default is unlimited because a member is not a real resource consumer
# on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_member = -1
# Number of health monitors allowed per tenant. A negative value means
# unlimited.
# The default is unlimited because a health monitor is not a real resource
# consumer on Openstack. However, on back-end, a member is a resource consumer
# and that is the reason why quota is possible.
# quota_health_monitor = -1
# Number of routers allowed per tenant. A negative value means unlimited.
# quota_router = 10
# Number of floating IPs allowed per tenant. A negative value means unlimited.
# quota_floatingip = 50
# Number of firewalls allowed per tenant. A negative value means unlimited.
# quota_firewall = 1
# Number of firewall policies allowed per tenant. A negative value means
# unlimited.
# quota_firewall_policy = 1
# Number of firewall rules allowed per tenant. A negative value means
# unlimited.
# quota_firewall_rule = 100
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
# =========== items for agent management extension =============
# seconds between nodes reporting state to server; should be less than
# agent_down_time, best if it is half or less than agent_down_time
# report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
connection = sqlite:////var/lib/neutron/neutron.sqlite
# NOTE: In deployment the [database] section and its connection attribute may
# be set in the corresponding core plugin '.ini' file. However, it is suggested
# to put the [database] section and its connection attribute in this
# configuration file.
# Database engine for which script will be generated when using offline
# migration
# engine =
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service types includes LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# This is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default
service_provider=VPN:openswan:neutron.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default
# In order to activate Radware's lbaas driver you need to uncomment the next line.
# If you want to keep the HA Proxy as the default lbaas driver, remove the attribute default from the line below.
# Otherwise comment the HA Proxy line
# service_provider = LOADBALANCER:Radware:neutron.services.loadbalancer.drivers.radware.driver.LoadBalancerDriver:default
# uncomment the following line to make the 'netscaler' LBaaS provider available.
# service_provider=LOADBALANCER:NetScaler:neutron.services.loadbalancer.drivers.netscaler.netscaler_driver.NetScalerPluginDriver
# Uncomment the following line (and comment out the OpenSwan VPN line) to enable Cisco's VPN driver.
# service_provider=VPN:cisco:neutron.services.vpn.service_drivers.cisco_ipsec.CiscoCsrIPsecVPNDriver:default
# Uncomment the line below to use Embrane heleos as Load Balancer service provider.
# service_provider=LOADBALANCER:Embrane:neutron.services.loadbalancer.drivers.embrane.driver.EmbraneLbaas:default
# Uncomment the line below to use the A10 Networks LBaaS driver. Requires 'pip install a10-neutron-lbaas'.
#service_provider = LOADBALANCER:A10Networks:neutron.services.loadbalancer.drivers.a10networks.driver_v1.ThunderDriver:default
# Uncomment the following line to test the LBaaS v2 API _WITHOUT_ a real backend
# service_provider = LOADBALANCER:LoggingNoop:neutron.services.loadbalancer.drivers.logging_noop.driver.LoggingNoopLoadBalancerDriver:default

View File

@ -0,0 +1,83 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
type_drivers = gre,vlan
# Example: type_drivers = flat,vlan,gre,vxlan
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
tenant_network_types = gre
# Example: tenant_network_types = vlan,gre,vxlan
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
mechanism_drivers=openvswitch
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
# flat_networks =
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
network_vlan_ranges=trunk:2112:2114
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
# tunnel_id_ranges =
tunnel_id_ranges=1:1000
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
# vni_ranges =
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
# vxlan_group =
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
enable_security_group = True
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[agent]
tunnel_types=gre
l2_population=False
polling_interval=30
veth_mtu=9134
[ovs]
enable_tunneling=True
tenant_network_type=gre
bridge_mappings=trunk:eth1-br
local_ip=<LOCAL_IP>
network_vlan_ranges=trunk:2112:2114
tunnel_id_ranges=1:1000

View File

@ -0,0 +1,71 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
# (ListOpt) Ordered list of extension driver entrypoints
# to be loaded from the neutron.ml2.extension_drivers namespace.
# extension_drivers =
# Example: extension_drivers = anewextensiondriver
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
# flat_networks =
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
# tunnel_id_ranges =
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
# vni_ranges =
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
# vxlan_group =
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
# Use ipset to speed-up the iptables security groups. Enabling ipset support
# requires that ipset is installed on L2 agent node.
# enable_ipset = True

View File

@ -0,0 +1,100 @@
# Defines configuration options specific for Arista ML2 Mechanism driver
[ml2_arista]
# (StrOpt) EOS IP address. This is required field. If not set, all
# communications to Arista EOS will fail
#
# eapi_host =
# Example: eapi_host = 192.168.0.1
#
# (StrOpt) EOS command API username. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_username =
# Example: arista_eapi_username = admin
#
# (StrOpt) EOS command API password. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_password =
# Example: eapi_password = my_password
#
# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs
# ("node1.domain.com") or as short names ("node1"). This is
# optional. If not set, a value of "True" is assumed.
#
# use_fqdn =
# Example: use_fqdn = True
#
# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
# This field defines how often the synchronization is performed.
# This is an optional field. If not set, a value of 180 seconds
# is assumed.
#
# sync_interval =
# Example: sync_interval = 60
#
# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller.
# This is useful when multiple OpenStack/Neutron controllers are
# managing the same Arista HW clusters. Note that this name must
# match with the region name registered (or known) to keystone
# service. Authentication with Keysotne is performed by EOS.
# This is optional. If not set, a value of "RegionOne" is assumed.
#
# region_name =
# Example: region_name = RegionOne
[l3_arista]
# (StrOpt) primary host IP address. This is required field. If not set, all
# communications to Arista EOS will fail. This is the host where
# primary router is created.
#
# primary_l3_host =
# Example: primary_l3_host = 192.168.10.10
#
# (StrOpt) Primary host username. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# primary_l3_host_username =
# Example: arista_primary_l3_username = admin
#
# (StrOpt) Primary host password. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# primary_l3_host_password =
# Example: primary_l3_password = my_password
#
# (StrOpt) IP address of the second Arista switch paired as
# MLAG (Multi-chassis Link Aggregation) with the first.
# This is optional field, however, if mlag_config flag is set,
# then this is a required field. If not set, all
# communications to Arista EOS will fail. If mlag_config is set
# to False, then this field is ignored
#
# seconadary_l3_host =
# Example: seconadary_l3_host = 192.168.10.20
#
# (BoolOpt) Defines if Arista switches are configured in MLAG mode
# If yes, all L3 configuration is pushed to both switches
# automatically. If this flag is set, ensure that secondary_l3_host
# is set to the second switch's IP.
# This flag is Optional. If not set, a value of "False" is assumed.
#
# mlag_config =
# Example: mlag_config = True
#
# (BoolOpt) Defines if the router is created in default VRF or a
# a specific VRF. This is optional.
# If not set, a value of "False" is assumed.
#
# Example: use_vrf = True
#
# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
# This field defines how often the synchronization is performed.
# This is an optional field. If not set, a value of 180 seconds
# is assumed.
#
# l3_sync_interval =
# Example: l3_sync_interval = 60

View File

@ -0,0 +1,15 @@
[ml2_brocade]
# username = <mgmt admin username>
# password = <mgmt admin password>
# address = <switch mgmt ip address>
# ostype = NOS
# osversion = autodetect | n.n.n
# physical_networks = physnet1,physnet2
#
# Example:
# username = admin
# password = password
# address = 10.24.84.38
# ostype = NOS
# osversion = 4.1.1
# physical_networks = physnet1,physnet2

View File

@ -0,0 +1,118 @@
[ml2_cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
#
# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch.
# This string value must be present in the ml2_conf.ini network_vlan_ranges
# variable.
#
# managed_physical_network =
# Example: managed_physical_network = physnet1
# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# Cisco Nexus Switch Format.
# [ml2_mech_cisco_nexus:<IP address of switch>]
# <hostname>=<intf_type:port> (1)
# ssh_port=<ssh port> (2)
# username=<credential username> (3)
# password=<credential password> (4)
#
# (1) For each host connected to a port on the switch, specify the hostname
# and the Nexus physical port (interface) it is connected to.
# Valid intf_type's are 'ethernet' and 'port-channel'.
# The default setting for <intf_type:> is 'ethernet' and need not be
# added to this setting.
# (2) The TCP port for connecting via SSH to manage the switch. This is
# port number 22 unless the switch has been configured otherwise.
# (3) The username for logging into the switch to manage it.
# (4) The password for logging into the switch to manage it.
#
# Example:
# [ml2_mech_cisco_nexus:1.1.1.1]
# compute1=1/1
# compute2=ethernet:1/2
# compute3=port-channel:1
# ssh_port=22
# username=admin
# password=mySecretPassword
[ml2_cisco_apic]
# Hostname:port list of APIC controllers
# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80
# Username for the APIC controller
# apic_username = user
# Password for the APIC controller
# apic_password = password
# Whether use SSl for connecting to the APIC controller or not
# apic_use_ssl = True
# How to map names to APIC: use_uuid or use_name
# apic_name_mapping = use_name
# Names for APIC objects used by Neutron
# Note: When deploying multiple clouds against one APIC,
# these names must be unique between the clouds.
# apic_vmm_domain = openstack
# apic_vlan_ns_name = openstack_ns
# apic_node_profile = openstack_profile
# apic_entity_profile = openstack_entity
# apic_function_profile = openstack_function
# apic_app_profile_name = openstack_app
# Agent timers for State reporting and topology discovery
# apic_sync_interval = 30
# apic_agent_report_interval = 30
# apic_agent_poll_interval = 2
# Specify your network topology.
# This section indicates how your compute nodes are connected to the fabric's
# switches and ports. The format is as follows:
#
# [apic_switch:<swich_id_from_the_apic>]
# <compute_host>,<compute_host> = <switchport_the_host(s)_are_connected_to>
#
# You can have multiple sections, one for each switch in your fabric that is
# participating in Openstack. e.g.
#
# [apic_switch:17]
# ubuntu,ubuntu1 = 1/10
# ubuntu2,ubuntu3 = 1/11
#
# [apic_switch:18]
# ubuntu5,ubuntu6 = 1/1
# ubuntu7,ubuntu8 = 1/2
# Describe external connectivity.
# In this section you can specify the external network configuration in order
# for the plugin to be able to teach the fabric how to route the internal
# traffic to the outside world. The external connectivity configuration
# format is as follows:
#
# [apic_external_network:<externalNetworkName>]
# switch = <switch_id_from_the_apic>
# port = <switchport_the_external_router_is_connected_to>
# encap = <encapsulation>
# cidr_exposed = <cidr_exposed_to_the_external_router>
# gateway_ip = <ip_of_the_external_gateway>
#
# An example follows:
# [apic_external_network:network_ext]
# switch=203
# port=1/34
# encap=vlan-100
# cidr_exposed=10.10.40.2/16
# gateway_ip=10.10.40.1

View File

@ -0,0 +1,52 @@
# Defines Configuration options for FSL SDN OS Mechanism Driver
# Cloud Resource Discovery (CRD) authorization credentials
[ml2_fslsdn]
#(StrOpt) User name for authentication to CRD.
# e.g.: user12
#
# crd_user_name =
#(StrOpt) Password for authentication to CRD.
# e.g.: secret
#
# crd_password =
#(StrOpt) Tenant name for CRD service.
# e.g.: service
#
# crd_tenant_name =
#(StrOpt) CRD auth URL.
# e.g.: http://127.0.0.1:5000/v2.0/
#
# crd_auth_url =
#(StrOpt) URL for connecting to CRD Service.
# e.g.: http://127.0.0.1:9797
#
# crd_url=
#(IntOpt) Timeout value for connecting to CRD service
# in seconds, e.g.: 30
#
# crd_url_timeout=
#(StrOpt) Region name for connecting to CRD in
# admin context, e.g.: RegionOne
#
# crd_region_name=
#(BoolOpt)If set, ignore any SSL validation issues (boolean value)
# e.g.: False
#
# crd_api_insecure=
#(StrOpt)Authorization strategy for connecting to CRD in admin
# context, e.g.: keystone
#
# crd_auth_strategy=
#(StrOpt)Location of CA certificates file to use for CRD client
# requests.
#
# crd_ca_certificates_file=

View File

@ -0,0 +1,4 @@
[eswitch]
# (StrOpt) Type of Network Interface to allocate for VM:
# mlnx_direct or hostdev according to libvirt terminology
# vnic_type = mlnx_direct

View File

@ -0,0 +1,28 @@
# Defines configuration options specific to the Tail-f NCS Mechanism Driver
[ml2_ncs]
# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack
# subtree.
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://ncs/api/running/services/openstack
# (StrOpt) Username for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout =
# Example: timeout = 15

View File

@ -0,0 +1,30 @@
# Configuration for the OpenDaylight MechanismDriver
[ml2_odl]
# (StrOpt) OpenDaylight REST URL
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron
# (StrOpt) Username for HTTP basic authentication to ODL.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to ODL.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout = 10
# Example: timeout = 15
# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout.
# This is an optional parameter, default value is 30 minutes.
#
# session_timeout = 30
# Example: session_timeout = 60

View File

@ -0,0 +1,13 @@
# Defines configuration options specific to the OpenFlow Agent Mechanism Driver
[ovs]
# Please refer to configuration options to the OpenvSwitch
[agent]
# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath.
# This is an optional parameter, default value is 60 seconds.
#
# get_datapath_retry_times =
# Example: get_datapath_retry_times = 30
# Please refer to configuration options to the OpenvSwitch else the above.

View File

@ -0,0 +1,31 @@
# Defines configuration options for SRIOV NIC Switch MechanismDriver
# and Agent
[ml2_sriov]
# (ListOpt) Comma-separated list of
# supported Vendor PCI Devices, in format vendor_id:product_id
#
# supported_pci_vendor_devs = 15b3:1004, 8086:10c9
# Example: supported_pci_vendor_devs = 15b3:1004
#
# (BoolOpt) Requires running SRIOV neutron agent for port binding
# agent_required = True
[sriov_nic]
# (ListOpt) Comma-separated list of <physical_network>:<network_device>
# tuples mapping physical network names to the agent's node-specific
# physical network device interfaces of SR-IOV physical function to be used
# for VLAN networks. All physical networks listed in network_vlan_ranges on
# the server should have mappings to appropriate interfaces on each agent.
#
# physical_device_mappings =
# Example: physical_device_mappings = physnet1:eth1
#
# (ListOpt) Comma-separated list of <network_device>:<vfs__to_exclude>
# tuples, mapping network_device to the agent's node-specific list of virtual
# functions that should not be used for virtual networking.
# vfs_to_exclude is a semicolon-separated list of virtual
# functions to exclude from network_device. The network_device in the
# mapping should appear in the physical_device_mappings list.
# exclude_devices =
# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3

View File

@ -0,0 +1,138 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"create_port": "",
"create_port:mac_address": "rule:admin_or_network_owner",
"create_port:fixed_ips": "rule:admin_or_network_owner",
"create_port:port_security_enabled": "rule:admin_or_network_owner",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner",
"get_port": "rule:admin_or_owner",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner",
"update_port:fixed_ips": "rule:admin_or_network_owner",
"update_port:port_security_enabled": "rule:admin_or_network_owner",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner",
"delete_port": "rule:admin_or_owner",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"create_floatingip": "rule:regular_user",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only"
}

View File

@ -0,0 +1,34 @@
# Configuration for neutron-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, local0, local1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR
[xenapi]
# XenAPI configuration is only required by the L2 agent if it is to
# target a XenServer/XCP compute host's dom0.
xenapi_connection_url=<None>
xenapi_connection_username=root
xenapi_connection_password=<None>

View File

@ -0,0 +1,14 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# This is needed because we should ping
# from inside a namespace which requires root
ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+

View File

@ -0,0 +1,12 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_firewall.py
# "ipset", "-A", ...
ipset: CommandFilter, ipset, root

View File

@ -0,0 +1,21 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_manager.py
# "iptables-save", ...
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# neutron/agent/linux/iptables_manager.py
# "iptables", "-A", ...
iptables: CommandFilter, iptables, root
ip6tables: CommandFilter, ip6tables, root

View File

@ -0,0 +1,49 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# arping
arping: CommandFilter, arping, root
# l3_agent
sysctl: CommandFilter, sysctl, root
route: CommandFilter, route, root
radvd: CommandFilter, radvd, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
kill_metadata6: KillFilter, root, python2.6, -9
kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP
kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
# ovs_lib (if OVSInterfaceDriver is used)
ovs-vsctl: CommandFilter, ovs-vsctl, root
# iptables_manager
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# Keepalived
keepalived: CommandFilter, keepalived, root
kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9
# l3 agent to delete floatingip's conntrack state
conntrack: CommandFilter, conntrack, root

View File

@ -0,0 +1,22 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# openvswitch-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
ovs-vsctl: CommandFilter, ovs-vsctl, root
ovs-ofctl: CommandFilter, ovs-ofctl, root
kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
ovsdb-client: CommandFilter, ovsdb-client, root
xe: CommandFilter, xe, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -0,0 +1,13 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
openswan: CommandFilter, ipsec, root

View File

@ -0,0 +1,14 @@
[DEFAULT]
# VPN-Agent configuration file
# Note vpn-agent inherits l3-agent, so you can use configs on l3-agent also
[vpnagent]
# vpn device drivers which vpn agent will use
# If we want to use multiple drivers, we need to define this option multiple times.
# vpn_device_driver=neutron.services.vpn.device_drivers.ipsec.OpenSwanDriver
# vpn_device_driver=neutron.services.vpn.device_drivers.cisco_ipsec.CiscoCsrIPsecDriver
# vpn_device_driver=another_driver
[ipsec]
# Status check interval
# ipsec_status_check_interval=60

View File

@ -0,0 +1,127 @@
############
# Metadata #
############
[composite:metadata]
use = egg:Paste#urlmap
/: meta
[pipeline:meta]
pipeline = ec2faultwrap logrequest metaapp
[app:metaapp]
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
#######
# EC2 #
#######
[composite:ec2]
use = egg:Paste#urlmap
/services/Cloud: ec2cloud
[composite:ec2cloud]
use = call:nova.api.auth:pipeline_factory
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
[filter:ec2faultwrap]
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:ec2keystoneauth]
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
[filter:ec2noauth]
paste.filter_factory = nova.api.ec2:NoAuth.factory
[filter:cloudrequest]
controller = nova.api.ec2.cloud.CloudController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:Authorizer.factory
[filter:validator]
paste.filter_factory = nova.api.ec2:Validator.factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:Executor.factory
#############
# OpenStack #
#############
[composite:osapi_compute]
use = call:nova.api.openstack.urlmap:urlmap_factory
/: oscomputeversions
/v1.1: openstack_compute_api_v2
/v2: openstack_compute_api_v2
/v2.1: openstack_compute_api_v21
/v3: openstack_compute_api_v3
[composite:openstack_compute_api_v2]
use = call:nova.api.auth:pipeline_factory
noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
[composite:openstack_compute_api_v21]
use = call:nova.api.auth:pipeline_factory_v21
noauth = request_id faultwrap sizelimit noauth osapi_compute_app_v21
keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
[composite:openstack_compute_api_v3]
use = call:nova.api.auth:pipeline_factory_v21
noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
[filter:request_id]
paste.filter_factory = nova.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:compute_req_id]
paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:noauth]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
[filter:noauth_v3]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
[filter:sizelimit]
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
[app:osapi_compute_app_v2]
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
[app:osapi_compute_app_v21]
paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
[app:osapi_compute_app_v3]
paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
[pipeline:oscomputeversions]
pipeline = faultwrap oscomputeversionapp
[app:oscomputeversionapp]
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory

View File

@ -0,0 +1,81 @@
[loggers]
keys = root, nova
[handlers]
keys = stderr, stdout, watchedfile, syslog, null
[formatters]
keys = context, default
[logger_root]
level = WARNING
handlers = null
[logger_nova]
level = INFO
handlers = stderr
qualname = nova
[logger_amqp]
level = WARNING
handlers = stderr
qualname = amqp
[logger_amqplib]
level = WARNING
handlers = stderr
qualname = amqplib
[logger_sqlalchemy]
level = WARNING
handlers = stderr
qualname = sqlalchemy
# "level = INFO" logs SQL queries.
# "level = DEBUG" logs SQL queries and results.
# "level = WARNING" logs neither. (Recommended for production systems.)
[logger_boto]
level = WARNING
handlers = stderr
qualname = boto
[logger_suds]
level = INFO
handlers = stderr
qualname = suds
[logger_eventletwsgi]
level = WARNING
handlers = stderr
qualname = eventlet.wsgi.server
[handler_stderr]
class = StreamHandler
args = (sys.stderr,)
formatter = context
[handler_stdout]
class = StreamHandler
args = (sys.stdout,)
formatter = context
[handler_watchedfile]
class = handlers.WatchedFileHandler
args = ('nova.log',)
formatter = context
[handler_syslog]
class = handlers.SysLogHandler
args = ('/dev/log', handlers.SysLogHandler.LOG_USER)
formatter = context
[handler_null]
class = nova.openstack.common.log.NullHandler
formatter = default
args = ()
[formatter_context]
class = nova.openstack.common.log.ContextFormatter
[formatter_default]
format = %(message)s

View File

@ -0,0 +1,4 @@
[DEFAULT]
compute_driver=libvirt.LibvirtDriver
[libvirt]
virt_type=kvm

View File

@ -0,0 +1,85 @@
[DEFAULT]
state_path=/var/lib/nova
lock_path=/var/lock/nova
verbose=True
network_api_class=nova.network.neutronv2.api.API
debug=False
log_dir=/var/log/nova
amqp_durable_queues=False
vncserver_proxyclient_address=<CONTROLLER_IP>
rabbit_hosts=<CONTROLLER>.csail.mit.edu:5672
notify_api_faults=False
live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE
rabbit_use_ssl=False
live_migration_bandwidth=0
notification_driver=nova.openstack.common.notifier.log_notifier
rabbit_userid=<REDACTED>
rabbit_ha_queues=False
network_device_mtu=9134
rabbit_password=<REDACTED>
report_interval=10
security_group_api=neutron
live_migration_uri=qemu+ssh://nova@%s/system?keyfile=/var/lib/nova/.ssh/id_rsa&no_tty=1&no_verify=1
rabbit_host=<CONTROLLER>.csail.mit.edu
vnc_enabled=True
rabbit_virtual_host=/
image_service=nova.image.glance.GlanceImageService
heal_instance_info_cache_interval=0
firewall_driver=nova.virt.firewall.NoopFirewallDriver
rabbit_port=5672
live_migration_retry_count=30
vif_plugging_is_fatal=True
novncproxy_base_url=http://<CONTROLLER>.csail.mit.edu:6080/vnc_auto.html
service_down_time=60
vncserver_listen=0.0.0.0
notification_topics=notifications
auth_strategy=keystone
compute_driver=libvirt.LibvirtDriver
rootwrap_config=/etc/nova/rootwrap.conf
rpc_backend=nova.openstack.common.rpc.impl_kombu
vif_plugging_timeout=300
libvirt_cpu_mode=host-passthrough
use_syslog=False
compute_monitors=ComputeDriverCPUMonitor
force_raw_images=True
vnc_keymap=en-us
force_snat_range=0.0.0.0/0
dhcp_domain=novalocal
[conductor]
use_local=False
[libvirt]
vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver
cpu_mode=host-model
virt_type=kvm
images_rbd_pool=vms
rbd_secret_uuid=cf58e08b-3c51-410f-b043-619c616c6f44
images_type=rbd
images_rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=openstack
[database]
[LIBVIRT]
volume_clear=none
[Default]
[neutron]
admin_auth_url=http://<CONTROLLER>.csail.mit.edu:35357/v2.0
extension_sync_interval=600
admin_username=neutron
admin_tenant_name=openstack
url_timeout=30
admin_password=<REDACTED>
auth_strategy=keystone
default_tenant_id=default
url=http://<CONTROLLER>.csail.mit.edu:9696
ovs_bridge=br-int
region_name=RegionOne
[glance]
api_servers=<CONTROLLER>.csail.mit.edu:9292

View File

@ -0,0 +1,12 @@
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
libvirt_use_virtio_for_bridges=True
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
enabled_apis=ec2,osapi_compute,metadata

View File

@ -0,0 +1,348 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
"default": "rule:admin_or_owner",
"cells_scheduler_filter:TargetCellFilter": "is_admin:True",
"compute:create": "",
"compute:create:attach_network": "",
"compute:create:attach_volume": "",
"compute:create:forced_host": "is_admin:True",
"compute:get_all": "",
"compute:get_all_tenants": "",
"compute:start": "rule:admin_or_owner",
"compute:stop": "rule:admin_or_owner",
"compute:unlock_override": "rule:admin_api",
"compute:shelve": "",
"compute:shelve_offload": "",
"compute:unshelve": "",
"compute:volume_snapshot_create": "",
"compute:volume_snapshot_delete": "",
"admin_api": "is_admin:True",
"compute:v3:servers:start": "rule:admin_or_owner",
"compute:v3:servers:stop": "rule:admin_or_owner",
"compute_extension:v3:os-access-ips:discoverable": "",
"compute_extension:v3:os-access-ips": "",
"compute_extension:accounts": "rule:admin_api",
"compute_extension:admin_actions": "rule:admin_api",
"compute_extension:admin_actions:pause": "rule:admin_or_owner",
"compute_extension:admin_actions:unpause": "rule:admin_or_owner",
"compute_extension:admin_actions:suspend": "rule:admin_or_owner",
"compute_extension:admin_actions:resume": "rule:admin_or_owner",
"compute_extension:admin_actions:lock": "rule:admin_or_owner",
"compute_extension:admin_actions:unlock": "rule:admin_or_owner",
"compute_extension:admin_actions:resetNetwork": "rule:admin_api",
"compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api",
"compute_extension:admin_actions:createBackup": "rule:admin_or_owner",
"compute_extension:admin_actions:migrateLive": "rule:admin_api",
"compute_extension:admin_actions:resetState": "rule:admin_api",
"compute_extension:admin_actions:migrate": "rule:admin_api",
"compute_extension:v3:os-admin-actions": "rule:admin_api",
"compute_extension:v3:os-admin-actions:discoverable": "",
"compute_extension:v3:os-admin-actions:reset_network": "rule:admin_api",
"compute_extension:v3:os-admin-actions:inject_network_info": "rule:admin_api",
"compute_extension:v3:os-admin-actions:reset_state": "rule:admin_api",
"compute_extension:v3:os-admin-password": "",
"compute_extension:v3:os-admin-password:discoverable": "",
"compute_extension:aggregates": "rule:admin_api",
"compute_extension:v3:os-aggregates:discoverable": "",
"compute_extension:v3:os-aggregates:index": "rule:admin_api",
"compute_extension:v3:os-aggregates:create": "rule:admin_api",
"compute_extension:v3:os-aggregates:show": "rule:admin_api",
"compute_extension:v3:os-aggregates:update": "rule:admin_api",
"compute_extension:v3:os-aggregates:delete": "rule:admin_api",
"compute_extension:v3:os-aggregates:add_host": "rule:admin_api",
"compute_extension:v3:os-aggregates:remove_host": "rule:admin_api",
"compute_extension:v3:os-aggregates:set_metadata": "rule:admin_api",
"compute_extension:agents": "rule:admin_api",
"compute_extension:v3:os-agents": "rule:admin_api",
"compute_extension:v3:os-agents:discoverable": "",
"compute_extension:attach_interfaces": "",
"compute_extension:v3:os-attach-interfaces": "",
"compute_extension:v3:os-attach-interfaces:discoverable": "",
"compute_extension:baremetal_nodes": "rule:admin_api",
"compute_extension:v3:os-block-device-mapping-v1:discoverable": "",
"compute_extension:cells": "rule:admin_api",
"compute_extension:cells:create": "rule:admin_api",
"compute_extension:cells:delete": "rule:admin_api",
"compute_extension:cells:update": "rule:admin_api",
"compute_extension:cells:sync_instances": "rule:admin_api",
"compute_extension:v3:os-cells": "rule:admin_api",
"compute_extension:v3:os-cells:create": "rule:admin_api",
"compute_extension:v3:os-cells:delete": "rule:admin_api",
"compute_extension:v3:os-cells:update": "rule:admin_api",
"compute_extension:v3:os-cells:sync_instances": "rule:admin_api",
"compute_extension:v3:os-cells:discoverable": "",
"compute_extension:certificates": "",
"compute_extension:v3:os-certificates:create": "",
"compute_extension:v3:os-certificates:show": "",
"compute_extension:v3:os-certificates:discoverable": "",
"compute_extension:cloudpipe": "rule:admin_api",
"compute_extension:cloudpipe_update": "rule:admin_api",
"compute_extension:console_output": "",
"compute_extension:v3:consoles:discoverable": "",
"compute_extension:v3:os-console-output:discoverable": "",
"compute_extension:v3:os-console-output": "",
"compute_extension:consoles": "",
"compute_extension:v3:os-remote-consoles": "",
"compute_extension:v3:os-remote-consoles:discoverable": "",
"compute_extension:createserverext": "",
"compute_extension:v3:os-create-backup:discoverable": "",
"compute_extension:v3:os-create-backup": "rule:admin_or_owner",
"compute_extension:deferred_delete": "",
"compute_extension:v3:os-deferred-delete": "",
"compute_extension:v3:os-deferred-delete:discoverable": "",
"compute_extension:disk_config": "",
"compute_extension:evacuate": "rule:admin_api",
"compute_extension:v3:os-evacuate": "rule:admin_api",
"compute_extension:v3:os-evacuate:discoverable": "",
"compute_extension:extended_server_attributes": "rule:admin_api",
"compute_extension:v3:os-extended-server-attributes": "rule:admin_api",
"compute_extension:v3:os-extended-server-attributes:discoverable": "",
"compute_extension:extended_status": "",
"compute_extension:v3:os-extended-status": "",
"compute_extension:v3:os-extended-status:discoverable": "",
"compute_extension:extended_availability_zone": "",
"compute_extension:v3:os-extended-availability-zone": "",
"compute_extension:v3:os-extended-availability-zone:discoverable": "",
"compute_extension:extended_ips": "",
"compute_extension:extended_ips_mac": "",
"compute_extension:extended_vif_net": "",
"compute_extension:v3:extension_info:discoverable": "",
"compute_extension:extended_volumes": "",
"compute_extension:v3:os-extended-volumes": "",
"compute_extension:v3:os-extended-volumes:swap": "",
"compute_extension:v3:os-extended-volumes:discoverable": "",
"compute_extension:v3:os-extended-volumes:attach": "",
"compute_extension:v3:os-extended-volumes:detach": "",
"compute_extension:fixed_ips": "rule:admin_api",
"compute_extension:flavor_access": "",
"compute_extension:flavor_access:addTenantAccess": "rule:admin_api",
"compute_extension:flavor_access:removeTenantAccess": "rule:admin_api",
"compute_extension:v3:os-flavor-access": "",
"compute_extension:v3:os-flavor-access:discoverable": "",
"compute_extension:v3:os-flavor-access:remove_tenant_access": "rule:admin_api",
"compute_extension:v3:os-flavor-access:add_tenant_access": "rule:admin_api",
"compute_extension:flavor_disabled": "",
"compute_extension:flavor_rxtx": "",
"compute_extension:v3:os-flavor-rxtx": "",
"compute_extension:v3:os-flavor-rxtx:discoverable": "",
"compute_extension:flavor_swap": "",
"compute_extension:flavorextradata": "",
"compute_extension:flavorextraspecs:index": "",
"compute_extension:flavorextraspecs:show": "",
"compute_extension:flavorextraspecs:create": "rule:admin_api",
"compute_extension:flavorextraspecs:update": "rule:admin_api",
"compute_extension:flavorextraspecs:delete": "rule:admin_api",
"compute_extension:v3:flavors:discoverable": "",
"compute_extension:v3:flavor-extra-specs:discoverable": "",
"compute_extension:v3:flavor-extra-specs:index": "",
"compute_extension:v3:flavor-extra-specs:show": "",
"compute_extension:v3:flavor-extra-specs:create": "rule:admin_api",
"compute_extension:v3:flavor-extra-specs:update": "rule:admin_api",
"compute_extension:v3:flavor-extra-specs:delete": "rule:admin_api",
"compute_extension:flavormanage": "rule:admin_api",
"compute_extension:v3:flavor-manage:discoverable": "",
"compute_extension:v3:flavor-manage": "rule:admin_api",
"compute_extension:floating_ip_dns": "",
"compute_extension:floating_ip_pools": "",
"compute_extension:floating_ips": "",
"compute_extension:floating_ips_bulk": "rule:admin_api",
"compute_extension:fping": "",
"compute_extension:fping:all_tenants": "rule:admin_api",
"compute_extension:hide_server_addresses": "is_admin:False",
"compute_extension:v3:os-hide-server-addresses": "is_admin:False",
"compute_extension:v3:os-hide-server-addresses:discoverable": "",
"compute_extension:hosts": "rule:admin_api",
"compute_extension:v3:os-hosts": "rule:admin_api",
"compute_extension:v3:os-hosts:discoverable": "",
"compute_extension:hypervisors": "rule:admin_api",
"compute_extension:v3:os-hypervisors": "rule:admin_api",
"compute_extension:v3:os-hypervisors:discoverable": "",
"compute_extension:image_size": "",
"compute_extension:v3:images:discoverable": "",
"compute_extension:v3:image-size": "",
"compute_extension:v3:image-size:discoverable": "",
"compute_extension:instance_actions": "",
"compute_extension:v3:os-instance-actions": "",
"compute_extension:v3:os-instance-actions:discoverable": "",
"compute_extension:instance_actions:events": "rule:admin_api",
"compute_extension:v3:os-instance-actions:events": "rule:admin_api",
"compute_extension:instance_usage_audit_log": "rule:admin_api",
"compute_extension:v3:ips:discoverable": "",
"compute_extension:keypairs": "",
"compute_extension:keypairs:index": "",
"compute_extension:keypairs:show": "",
"compute_extension:keypairs:create": "",
"compute_extension:keypairs:delete": "",
"compute_extension:v3:os-keypairs:discoverable": "",
"compute_extension:v3:os-keypairs": "",
"compute_extension:v3:os-keypairs:index": "",
"compute_extension:v3:os-keypairs:show": "",
"compute_extension:v3:os-keypairs:create": "",
"compute_extension:v3:os-keypairs:delete": "",
"compute_extension:v3:limits:discoverable": "",
"compute_extension:v3:os-lock-server:discoverable": "",
"compute_extension:v3:os-lock-server:lock": "rule:admin_or_owner",
"compute_extension:v3:os-lock-server:unlock": "rule:admin_or_owner",
"compute_extension:v3:os-migrate-server:discoverable": "",
"compute_extension:v3:os-migrate-server:migrate": "rule:admin_api",
"compute_extension:v3:os-migrate-server:migrate_live": "rule:admin_api",
"compute_extension:multinic": "",
"compute_extension:v3:os-multinic": "",
"compute_extension:v3:os-multinic:discoverable": "",
"compute_extension:networks": "rule:admin_api",
"compute_extension:networks:view": "",
"compute_extension:networks_associate": "rule:admin_api",
"compute_extension:v3:os-pause-server:discoverable": "",
"compute_extension:v3:os-pause-server:pause": "rule:admin_or_owner",
"compute_extension:v3:os-pause-server:unpause": "rule:admin_or_owner",
"compute_extension:v3:os-pci:pci_servers": "",
"compute_extension:v3:os-pci:discoverable": "",
"compute_extension:v3:os-pci:index": "rule:admin_api",
"compute_extension:v3:os-pci:detail": "rule:admin_api",
"compute_extension:v3:os-pci:show": "rule:admin_api",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "rule:admin_api",
"compute_extension:quotas:delete": "rule:admin_api",
"compute_extension:v3:os-quota-sets:discoverable": "",
"compute_extension:v3:os-quota-sets:show": "",
"compute_extension:v3:os-quota-sets:update": "rule:admin_api",
"compute_extension:v3:os-quota-sets:delete": "rule:admin_api",
"compute_extension:v3:os-quota-sets:detail": "rule:admin_api",
"compute_extension:quota_classes": "",
"compute_extension:rescue": "",
"compute_extension:v3:os-rescue": "",
"compute_extension:v3:os-rescue:discoverable": "",
"compute_extension:v3:os-scheduler-hints:discoverable": "",
"compute_extension:security_group_default_rules": "rule:admin_api",
"compute_extension:security_groups": "",
"compute_extension:v3:os-security-groups": "",
"compute_extension:v3:os-security-groups:discoverable": "",
"compute_extension:server_diagnostics": "rule:admin_api",
"compute_extension:v3:os-server-diagnostics": "rule:admin_api",
"compute_extension:v3:os-server-diagnostics:discoverable": "",
"compute_extension:server_groups": "",
"compute_extension:server_password": "",
"compute_extension:v3:os-server-password": "",
"compute_extension:v3:os-server-password:discoverable": "",
"compute_extension:server_usage": "",
"compute_extension:v3:os-server-usage": "",
"compute_extension:v3:os-server-usage:discoverable": "",
"compute_extension:v3:os-server-groups": "",
"compute_extension:v3:os-server-groups:discoverable": "",
"compute_extension:services": "rule:admin_api",
"compute_extension:v3:os-services": "rule:admin_api",
"compute_extension:v3:os-services:discoverable": "",
"compute_extension:v3:server-metadata:discoverable": "",
"compute_extension:v3:servers:discoverable": "",
"compute_extension:shelve": "",
"compute_extension:shelveOffload": "rule:admin_api",
"compute_extension:v3:os-shelve:shelve": "",
"compute_extension:v3:os-shelve:shelve:discoverable": "",
"compute_extension:v3:os-shelve:shelve_offload": "rule:admin_api",
"compute_extension:simple_tenant_usage:show": "rule:admin_or_owner",
"compute_extension::v3:os-simple-tenant-usage:discoverable": "",
"compute_extension::v3:os-simple-tenant-usage:show": "rule:admin_or_owner",
"compute_extension::v3:os-simple-tenant-usage:list": "rule:admin_api",
"compute_extension:v3:os-suspend-server:discoverable": "",
"compute_extension:v3:os-suspend-server:suspend": "rule:admin_or_owner",
"compute_extension:v3:os-suspend-server:resume": "rule:admin_or_owner",
"compute_extension:simple_tenant_usage:list": "rule:admin_api",
"compute_extension:unshelve": "",
"compute_extension:v3:os-shelve:unshelve": "",
"compute_extension:users": "rule:admin_api",
"compute_extension:v3:os-user-data:discoverable": "",
"compute_extension:virtual_interfaces": "",
"compute_extension:virtual_storage_arrays": "",
"compute_extension:volumes": "",
"compute_extension:volume_attachments:index": "",
"compute_extension:volume_attachments:show": "",
"compute_extension:volume_attachments:create": "",
"compute_extension:volume_attachments:update": "",
"compute_extension:volume_attachments:delete": "",
"compute_extension:v3:os-volumes": "",
"compute_extension:v3:os-volumes:discoverable": "",
"compute_extension:volumetypes": "",
"compute_extension:availability_zone:list": "",
"compute_extension:v3:os-availability-zone:list": "",
"compute_extension:v3:os-availability-zone:discoverable": "",
"compute_extension:availability_zone:detail": "rule:admin_api",
"compute_extension:v3:os-availability-zone:detail": "rule:admin_api",
"compute_extension:used_limits_for_admin": "rule:admin_api",
"compute_extension:v3:os-used-limits": "rule:admin_api",
"compute_extension:v3:os-used-limits:discoverable": "",
"compute_extension:migrations:index": "rule:admin_api",
"compute_extension:v3:os-migrations:index": "rule:admin_api",
"compute_extension:v3:os-migrations:discoverable": "",
"compute_extension:os-assisted-volume-snapshots:create": "rule:admin_api",
"compute_extension:os-assisted-volume-snapshots:delete": "rule:admin_api",
"compute_extension:console_auth_tokens": "rule:admin_api",
"compute_extension:v3:os-console-auth-tokens": "rule:admin_api",
"compute_extension:os-server-external-events:create": "rule:admin_api",
"compute_extension:v3:os-server-external-events:create": "rule:admin_api",
"volume:create": "",
"volume:get_all": "",
"volume:get_volume_metadata": "",
"volume:get_snapshot": "",
"volume:get_all_snapshots": "",
"volume_extension:types_manage": "rule:admin_api",
"volume_extension:types_extra_specs": "rule:admin_api",
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"network:get_all": "",
"network:get": "",
"network:create": "",
"network:delete": "",
"network:associate": "",
"network:disassociate": "",
"network:get_vifs_by_instance": "",
"network:allocate_for_instance": "",
"network:deallocate_for_instance": "",
"network:validate_networks": "",
"network:get_instance_uuids_by_ip_filter": "",
"network:get_instance_id_by_floating_address": "",
"network:setup_networks_on_host": "",
"network:get_backdoor_port": "",
"network:get_floating_ip": "",
"network:get_floating_ip_pools": "",
"network:get_floating_ip_by_address": "",
"network:get_floating_ips_by_project": "",
"network:get_floating_ips_by_fixed_address": "",
"network:allocate_floating_ip": "",
"network:deallocate_floating_ip": "",
"network:associate_floating_ip": "",
"network:disassociate_floating_ip": "",
"network:release_floating_ip": "",
"network:migrate_instance_start": "",
"network:migrate_instance_finish": "",
"network:get_fixed_ip": "",
"network:get_fixed_ip_by_address": "",
"network:add_fixed_ip_to_instance": "",
"network:remove_fixed_ip_from_instance": "",
"network:add_network_to_project": "",
"network:get_instance_nw_info": "",
"network:get_dns_domains": "",
"network:add_dns_entry": "",
"network:modify_dns_entry": "",
"network:delete_dns_entry": "",
"network:get_dns_entries_by_address": "",
"network:get_dns_entries_by_name": "",
"network:create_private_dns_domain": "",
"network:create_public_dns_domain": "",
"network:delete_dns_domain": "",
"network:attach_external_network": "rule:admin_api"
}

View File

@ -0,0 +1,27 @@
# Configuration for nova-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, local0, local1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR

View File

@ -0,0 +1,228 @@
# nova-rootwrap command filters for compute nodes
# This file should be owned by (and only-writeable by) the root user
[Filters]
# nova/virt/disk/mount/api.py: 'kpartx', '-a', device
# nova/virt/disk/mount/api.py: 'kpartx', '-d', device
kpartx: CommandFilter, kpartx, root
# nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
# nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
tune2fs: CommandFilter, tune2fs, root
# nova/virt/disk/mount/api.py: 'mount', mapped_device
# nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
# nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
# nova/virt/configdrive.py: 'mount', device, mountdir
# nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
mount: CommandFilter, mount, root
# nova/virt/disk/mount/api.py: 'umount', mapped_device
# nova/virt/disk/api.py: 'umount' target
# nova/virt/xenapi/vm_utils.py: 'umount', dev_path
# nova/virt/configdrive.py: 'umount', mountdir
umount: CommandFilter, umount, root
# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
# nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
qemu-nbd: CommandFilter, qemu-nbd, root
# nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
# nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
losetup: CommandFilter, losetup, root
# nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
# nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*
# nova/virt/disk/vfs/localfs.py: 'tee', canonpath
tee: CommandFilter, tee, root
# nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
mkdir: CommandFilter, mkdir, root
# nova/virt/disk/vfs/localfs.py: 'chown'
# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
# nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
# nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
chown: CommandFilter, chown, root
# nova/virt/disk/vfs/localfs.py: 'chmod'
chmod: CommandFilter, chmod, root
# nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
# nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
# nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
# nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
# nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
# nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
# nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
# nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
# nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
# nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
# nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
# nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
# nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
# nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
# nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
# nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
# nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
# nova/network/linux_net.py: 'ip', 'route', 'add', ..
# nova/network/linux_net.py: 'ip', 'route', 'del', .
# nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
ip: CommandFilter, ip, root
# nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
# nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
tunctl: CommandFilter, tunctl, root
# nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
# nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
# nova/network/linux_net.py: 'ovs-vsctl', ....
ovs-vsctl: CommandFilter, ovs-vsctl, root
# nova/network/linux_net.py: 'ovs-ofctl', ....
ovs-ofctl: CommandFilter, ovs-ofctl, root
# nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
dd: CommandFilter, dd, root
# nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
iscsiadm: CommandFilter, iscsiadm, root
# nova/virt/libvirt/volume.py: 'aoe-revalidate', aoedev
# nova/virt/libvirt/volume.py: 'aoe-discover'
aoe-revalidate: CommandFilter, aoe-revalidate, root
aoe-discover: CommandFilter, aoe-discover, root
# nova/virt/xenapi/vm_utils.py: parted, --script, ...
# nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
parted: CommandFilter, parted, root
# nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
pygrub: CommandFilter, pygrub, root
# nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
fdisk: CommandFilter, fdisk, root
# nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
# nova/virt/disk/api.py: e2fsck, -f, -p, image
e2fsck: CommandFilter, e2fsck, root
# nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
# nova/virt/disk/api.py: resize2fs, image
resize2fs: CommandFilter, resize2fs, root
# nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
iptables-save: CommandFilter, iptables-save, root
ip6tables-save: CommandFilter, ip6tables-save, root
# nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
# nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
arping: CommandFilter, arping, root
# nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
dhcp_release: CommandFilter, dhcp_release, root
# nova/network/linux_net.py: 'kill', '-9', pid
# nova/network/linux_net.py: 'kill', '-HUP', pid
kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
# nova/network/linux_net.py: 'kill', pid
kill_radvd: KillFilter, root, /usr/sbin/radvd
# nova/network/linux_net.py: dnsmasq call
dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq
# nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
radvd: CommandFilter, radvd, root
# nova/network/linux_net.py: 'brctl', 'addbr', bridge
# nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
# nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
# nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
brctl: CommandFilter, brctl, root
# nova/virt/libvirt/utils.py: 'mkswap'
# nova/virt/xenapi/vm_utils.py: 'mkswap'
mkswap: CommandFilter, mkswap, root
# nova/virt/xenapi/vm_utils.py: 'mkfs'
# nova/utils.py: 'mkfs', fs, path, label
mkfs: CommandFilter, mkfs, root
# nova/virt/libvirt/utils.py: 'qemu-img'
qemu-img: CommandFilter, qemu-img, root
# nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
readlink: CommandFilter, readlink, root
# nova/virt/disk/api.py: 'touch', target
touch: CommandFilter, touch, root
# nova/virt/disk/api.py:
mkfs.ext3: CommandFilter, mkfs.ext3, root
mkfs.ntfs: CommandFilter, mkfs.ntfs, root
# nova/virt/libvirt/connection.py:
read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
# nova/virt/libvirt/connection.py:
lvremove: CommandFilter, lvremove, root
# nova/virt/libvirt/utils.py:
lvcreate: CommandFilter, lvcreate, root
# nova/virt/libvirt/utils.py:
lvs: CommandFilter, lvs, root
# nova/virt/libvirt/utils.py:
vgs: CommandFilter, vgs, root
# nova/virt/baremetal/volume_driver.py: 'tgtadm', '--lld', 'iscsi', ...
tgtadm: CommandFilter, tgtadm, root
# nova/utils.py:read_file_as_root: 'cat', file_path
# (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow
# nova/virt/libvirt/volume.py: 'multipath' '-R'
multipath: CommandFilter, multipath, root
# nova/virt/libvirt/utils.py:
systool: CommandFilter, systool, root
# nova/virt/libvirt/volume.py:
sginfo: CommandFilter, sginfo, root
sg_scan: CommandFilter, sg_scan, root
ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/ip-.*-iscsi-iqn.*, /dev/disk/by-path/ip-.*-iscsi-iqn.*
# nova/volume/encryptors.py:
# nova/virt/libvirt/dmcrypt.py:
cryptsetup: CommandFilter, cryptsetup, root
# nova/virt/xenapi/vm_utils.py:
xenstore-read: CommandFilter, xenstore-read, root
# nova/virt/baremetal/tilera.py: 'rpc.mountd'
rpc.mountd: CommandFilter, rpc.mountd, root
# nova/virt/libvirt/utils.py:
rbd: CommandFilter, rbd, root
# nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
shred: CommandFilter, shred, root
# nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
cp: CommandFilter, cp, root
# nova/virt/xenapi/vm_utils.py:
sync: CommandFilter, sync, root

View File

@ -0,0 +1,6 @@
<secret ephemeral='no' private='no'>
<usage type='ceph'>
<name>/etc/ceph/ceph.client.openstack.keyring secret</name>
</usage>
<uuid>REDACTED</uuid>
</secret>

View File

@ -0,0 +1 @@
<UUID REDACTED>

View File

@ -0,0 +1,10 @@
#<VirtualHost *:80>
WSGIScriptAlias /horizon /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
WSGIDaemonProcess horizon user=www-data group=www-data processes=3 threads=10
Alias /static /usr/share/openstack-dashboard/openstack_dashboard/static/
<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
Order allow,deny
Allow from all
</Directory>
# </VirtualHost>
RedirectMatch permanent ^/$ /horizon/

View File

@ -0,0 +1,46 @@
# ************************************
# Vhost template in module puppetlabs-apache
# Managed by Puppet
# ************************************
Listen 35358
<VirtualHost *:35358>
ServerName <CONTROLLER>.csail.mit.edu
## Vhost docroot
DocumentRoot "/usr/share/pyshared/keystone/httpd/"
## Directories, there should at least be a declaration for /usr/lib/cgi-bin/keystone
<Directory "/usr/share/pyshared/keystone/httpd/">
Options Indexes FollowSymLinks MultiViews
AllowOverride None
Require all granted
</Directory>
## Load additional static includes
## Logging
ErrorLog "/var/log/apache2/keystone_wsgi_admin_error_ssl.log"
ServerSignature Off
CustomLog "/var/log/apache2/keystone_wsgi_admin_access_ssl.log" combined
## SSL directives
SSLEngine on
SSLCACertificatePath ssl.crt/
SSLCADNRequestFile ssl.crt/csail-client.crt
SSLCARevocationPath ssl.crl/
SSLCertificateFile ssl.crt/<CONTROLLER>.csail.mit.edu.crt
SSLCertificateKeyFile ssl.key/<CONTROLLER>.csail.mit.edu.key
SSLCipherSuite HIGH,!ADH
SSLProtocol all -SSLv2
# SSLUserName SSL_CLIENT_S_DN_Email
SSLVerifyDepth 3
WSGIDaemonProcess keystone-admin-ssl group=keystone processes=16 threads=16 user=keystone
WSGIProcessGroup keystone-admin-ssl
WSGIChunkedRequest On
WSGIScriptAlias / "/usr/share/pyshared/keystone/httpd/admin"
</VirtualHost>

View File

@ -0,0 +1,49 @@
# ************************************
# Vhost template in module puppetlabs-apache
# Managed by Puppet
# ************************************
Listen 5001
<VirtualHost *:5001>
ServerName <CONTROLLER>.csail.mit.edu
## Vhost docroot
DocumentRoot "/usr/share/pyshared/keystone/httpd/"
## Directories, there should at least be a declaration for /usr/lib/cgi-bin/keystone
<Directory "/usr/share/pyshared/keystone/httpd/">
Options Indexes FollowSymLinks MultiViews
AllowOverride None
Require all granted
</Directory>
## Load additional static includes
## Logging
ErrorLog "/var/log/apache2/keystone_wsgi_main_error_ssl.log"
ServerSignature Off
CustomLog "/var/log/apache2/keystone_wsgi_main_access_ssl.log" combined
## SSL directives
SSLEngine on
SSLCACertificatePath ssl.crt/
SSLCADNRequestFile ssl.crt/csail-client.crt
SSLCARevocationPath ssl.crl/
SSLCertificateFile ssl.crt/<CONTROLLER>.csail.mit.edu.crt
SSLCertificateKeyFile ssl.key/<CONTROLLER>.csail.mit.edu.key
SSLCipherSuite HIGH,!ADH
SSLProtocol all -SSLv2
# SSLUserName SSL_CLIENT_S_DN_Email
SSLVerifyDepth 3
WSGIDaemonProcess keystone-main-ssl group=keystone processes=16 threads=16 user=keystone
WSGIProcessGroup keystone-main-ssl
WSGIScriptAlias / "/usr/share/pyshared/keystone/httpd//main"
</VirtualHost>

View File

@ -0,0 +1,64 @@
#############
# OpenStack #
#############
[composite:osapi_volume]
use = call:cinder.api:root_app_factory
/: apiversions
/v1: openstack_volume_api_v1
/v2: openstack_volume_api_v2
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = faultwrap sizelimit noauth apiv1
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = faultwrap sizelimit noauth apiv2
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
[filter:sizelimit]
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
[app:apiv1]
paste.app_factory = cinder.api.v1.router:APIRouter.factory
[app:apiv2]
paste.app_factory = cinder.api.v2.router:APIRouter.factory
[pipeline:apiversions]
pipeline = faultwrap osvolumeversionapp
[app:osvolumeversionapp]
paste.app_factory = cinder.api.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
service_protocol = http
service_host = <CONTROLLER_IP>
service_port = 5000
auth_host = <CONTROLLER_IP>
auth_port = 35357
auth_version = v2.0
auth_protocol = http
admin_tenant_name = services
admin_user = cinder
admin_password = <REDACTED>
signing_dir = /var/lib/cinder
auth_uri=http://<CONTROLLER_IP>:5000/v2.0/

View File

@ -0,0 +1,60 @@
#############
# OpenStack #
#############
[composite:osapi_volume]
use = call:cinder.api:root_app_factory
/: apiversions
/v1: openstack_volume_api_v1
/v2: openstack_volume_api_v2
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = request_id faultwrap sizelimit osprofiler noauth apiv1
keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = request_id faultwrap sizelimit osprofiler noauth apiv2
keystone = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
keystone_nolimit = request_id faultwrap sizelimit osprofiler authtoken keystonecontext apiv2
[filter:request_id]
paste.filter_factory = cinder.openstack.common.middleware.request_id:RequestIdMiddleware.factory
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[filter:osprofiler]
paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
hmac_keys = SECRET_KEY
enabled = yes
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
[filter:sizelimit]
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
[app:apiv1]
paste.app_factory = cinder.api.v1.router:APIRouter.factory
[app:apiv2]
paste.app_factory = cinder.api.v2.router:APIRouter.factory
[pipeline:apiversions]
pipeline = faultwrap osvolumeversionapp
[app:osvolumeversionapp]
paste.app_factory = cinder.api.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory

View File

@ -0,0 +1,68 @@
[DEFAULT]
debug = True
verbose = True
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
volume_group = nova-volumes
volume_clear = none
auth_strategy = keystone
state_path = /var/lib/cinder
volumes_dir = /var/lib/cinder/volumes
rabbit_port=5672
rabbit_virtual_host=/
rabbit_password=<REDACTED>
rabbit_userid=<REDACTED>
rabbit_host=<CONTROLLER_IP>
osapi_volume_extension = cinder.api.contrib.standard_extensions
api_paste_config=/etc/cinder/api-paste.ini
osapi_volume_listen=0.0.0.0
sql_idle_timeout=3600
scheduler_driver=cinder.scheduler.simple.SimpleScheduler
#rabbit_hosts=<CONTROLLER_IP>:5672
rabbit_ha_queues=False
rpc_backend=cinder.openstack.common.rpc.impl_kombu
use_syslog=False
control_exchange=openstack
default_volume_type=production
enabled_backends=ganymede,rbd
glance_api_version=2
[ganymede]
#EQLX bits:
volume_driver=cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver
san_ip=<EQUALLOGIC_IP>
san_login=cinder
san_password=<REDACTED>
san_thin_provision=false
#san_ssh_port=22
# #eqlx_group_name parameter actually represents the CLI prompt message
# #without '>' ending. E.g. if prompt looks like 'group-0>', then the
# #parameter must be set to 'group-0'
eqlx_group_name=ganymede
eqlx_pool=Nimbus
#Seconds to wait before sending a keepalive packet
#eqlx_ssh_keepalive_interval=1200
#Timeout for the Group Manager cli command execution
#eqlx_cli_timeout=30
#Maximum retry count for reconnection
#eqlx_cli_max_retries=5
#Seconds to sleep before the next reconnection retry
#eqlx_cli_retries_timeout=30
#Use CHAP authentificaion for targets
#eqlx_use_chap=false
#eqlx_chap_login=admin
#eqlx_chap_password=password
#eqlx_verbose_ssh=false
volume_backend_name=eqlx
[rbd]
volume_driver=cinder.volume.drivers.rbd.RBDDriver
rbd_pool=volumes
rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot=false
rbd_max_clone_depth=5
rbd_user=openstack
rbd_secret_uuid=<REDACTED>
volume_backend_name=rbd
[database]
connection = mysql://cinder:<REDACTED>@<CONTROLLER_IP>/cinder?charset=utf8

View File

@ -0,0 +1,11 @@
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes

View File

@ -0,0 +1,76 @@
[loggers]
keys = root, cinder
[handlers]
keys = stderr, stdout, watchedfile, syslog, null
[formatters]
keys = legacycinder, default
[logger_root]
level = WARNING
handlers = null
[logger_cinder]
level = INFO
handlers = stderr
qualname = cinder
[logger_amqplib]
level = WARNING
handlers = stderr
qualname = amqplib
[logger_sqlalchemy]
level = WARNING
handlers = stderr
qualname = sqlalchemy
# "level = INFO" logs SQL queries.
# "level = DEBUG" logs SQL queries and results.
# "level = WARNING" logs neither. (Recommended for production systems.)
[logger_boto]
level = WARNING
handlers = stderr
qualname = boto
[logger_suds]
level = INFO
handlers = stderr
qualname = suds
[logger_eventletwsgi]
level = WARNING
handlers = stderr
qualname = eventlet.wsgi.server
[handler_stderr]
class = StreamHandler
args = (sys.stderr,)
formatter = legacycinder
[handler_stdout]
class = StreamHandler
args = (sys.stdout,)
formatter = legacycinder
[handler_watchedfile]
class = handlers.WatchedFileHandler
args = ('cinder.log',)
formatter = legacycinder
[handler_syslog]
class = handlers.SysLogHandler
args = ('/dev/log', handlers.SysLogHandler.LOG_USER)
formatter = legacycinder
[handler_null]
class = cinder.log.NullHandler
formatter = default
args = ()
[formatter_legacycinder]
class = cinder.log.LegacyCinderFormatter
[formatter_default]
format = %(message)s

View File

@ -0,0 +1,80 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "is_admin:True or project_id:%(project_id)s",
"default": "rule:admin_or_owner",
"admin_api": "is_admin:True",
"volume:create": "",
"volume:get_all": "",
"volume:get_volume_metadata": "",
"volume:get_volume_admin_metadata": "rule:admin_api",
"volume:delete_volume_admin_metadata": "rule:admin_api",
"volume:update_volume_admin_metadata": "rule:admin_api",
"volume:get_snapshot": "",
"volume:get_all_snapshots": "",
"volume:extend": "",
"volume:update_readonly_flag": "",
"volume:retype": "",
"volume_extension:types_manage": "rule:admin_api",
"volume_extension:types_extra_specs": "rule:admin_api",
"volume_extension:volume_type_encryption": "rule:admin_api",
"volume_extension:volume_encryption_metadata": "rule:admin_or_owner",
"volume_extension:extended_snapshot_attributes": "",
"volume_extension:volume_image_metadata": "",
"volume_extension:quotas:show": "",
"volume_extension:quotas:update": "rule:admin_api",
"volume_extension:quota_classes": "",
"volume_extension:volume_admin_actions:reset_status": "rule:admin_api",
"volume_extension:snapshot_admin_actions:reset_status": "rule:admin_api",
"volume_extension:backup_admin_actions:reset_status": "rule:admin_api",
"volume_extension:volume_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:force_detach": "rule:admin_api",
"volume_extension:snapshot_admin_actions:force_delete": "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume": "rule:admin_api",
"volume_extension:volume_admin_actions:migrate_volume_completion": "rule:admin_api",
"volume_extension:volume_host_attribute": "rule:admin_api",
"volume_extension:volume_tenant_attribute": "rule:admin_or_owner",
"volume_extension:volume_mig_status_attribute": "rule:admin_api",
"volume_extension:hosts": "rule:admin_api",
"volume_extension:services": "rule:admin_api",
"volume_extension:volume_manage": "rule:admin_api",
"volume_extension:volume_unmanage": "rule:admin_api",
"volume:services": "rule:admin_api",
"volume:create_transfer": "",
"volume:accept_transfer": "",
"volume:delete_transfer": "",
"volume:get_all_transfers": "",
"volume_extension:replication:promote": "rule:admin_api",
"volume_extension:replication:reenable": "rule:admin_api",
"backup:create" : "",
"backup:delete": "",
"backup:get": "",
"backup:get_all": "",
"backup:restore": "",
"backup:backup-import": "rule:admin_api",
"backup:backup-export": "rule:admin_api",
"snapshot_extension:snapshot_actions:update_snapshot_status": "",
"consistencygroup:create" : "group:nobody",
"consistencygroup:delete": "group:nobody",
"consistencygroup:get": "group:nobody",
"consistencygroup:get_all": "group:nobody",
"consistencygroup:create_cgsnapshot" : "",
"consistencygroup:delete_cgsnapshot": "",
"consistencygroup:get_cgsnapshot": "",
"consistencygroup:get_all_cgsnapshots": "",
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api"
}

View File

@ -0,0 +1,27 @@
# Configuration for cinder-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/cinder/rootwrap.d,/usr/share/cinder/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, local0, local1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR

View File

@ -0,0 +1,157 @@
# cinder-rootwrap command filters for volume nodes
# This file should be owned by (and only-writeable by) the root user
[Filters]
# cinder/volume/iscsi.py: iscsi_helper '--op' ...
ietadm: CommandFilter, ietadm, root
tgtadm: CommandFilter, tgtadm, root
tgt-admin: CommandFilter, tgt-admin, root
cinder-rtstool: CommandFilter, cinder-rtstool, root
# LVM related show commands
pvs: EnvFilter, env, root, LC_ALL=C, pvs
vgs: EnvFilter, env, root, LC_ALL=C, vgs
lvs: EnvFilter, env, root, LC_ALL=C, lvs
lvdisplay: EnvFilter, env, root, LC_ALL=C, lvdisplay
# cinder/volume/driver.py: 'lvcreate', '-L', sizestr, '-n', volume_name,..
# cinder/volume/driver.py: 'lvcreate', '-L', ...
lvcreate: CommandFilter, lvcreate, root
# cinder/volume/driver.py: 'dd', 'if=%s' % srcstr, 'of=%s' % deststr,...
dd: CommandFilter, dd, root
# cinder/volume/driver.py: 'lvremove', '-f', %s/%s % ...
lvremove: CommandFilter, lvremove, root
# cinder/volume/driver.py: 'lvrename', '%(vg)s', '%(orig)s' '(new)s'...
lvrename: CommandFilter, lvrename, root
# cinder/volume/driver.py: 'lvextend', '-L' '%(new_size)s', '%(lv_name)s' ...
lvextend: CommandFilter, lvextend, root
# cinder/brick/local_dev/lvm.py: 'lvchange -a y -K <lv>'
lvchange: CommandFilter, lvchange, root
# cinder/volume/driver.py: 'iscsiadm', '-m', 'discovery', '-t',...
# cinder/volume/driver.py: 'iscsiadm', '-m', 'node', '-T', ...
iscsiadm: CommandFilter, iscsiadm, root
# cinder/volume/drivers/lvm.py: 'shred', '-n3'
# cinder/volume/drivers/lvm.py: 'shred', '-n0', '-z', '-s%dMiB'
shred: CommandFilter, shred, root
# cinder/volume/utils.py: utils.temporary_chown(path, 0)
chown: CommandFilter, chown, root
# cinder/volume/utils.py: copy_volume(..., ionice='...')
ionice_1: ChainingRegExpFilter, ionice, root, ionice, -c[0-3], -n[0-7]
ionice_2: ChainingRegExpFilter, ionice, root, ionice, -c[0-3]
# cinder/volume/utils.py: setup_blkio_cgroup()
cgcreate: CommandFilter, cgcreate, root
cgset: CommandFilter, cgset, root
cgexec: ChainingRegExpFilter, cgexec, root, cgexec, -g, blkio:\S+
# cinder/volume/driver.py
dmsetup: CommandFilter, dmsetup, root
ln: CommandFilter, ln, root
# cinder/image/image_utils.py
qemu-img: EnvFilter, env, root, LC_ALL=C, qemu-img
qemu-img_convert: CommandFilter, qemu-img, root
udevadm: CommandFilter, udevadm, root
# cinder/volume/driver.py: utils.read_file_as_root()
cat: CommandFilter, cat, root
# cinder/volume/nfs.py
stat: CommandFilter, stat, root
mount: CommandFilter, mount, root
df: CommandFilter, df, root
du: CommandFilter, du, root
truncate: CommandFilter, truncate, root
chmod: CommandFilter, chmod, root
rm: CommandFilter, rm, root
# cinder/volume/drivers/netapp/nfs.py:
netapp_nfs_find: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -name, img-cache.*, -amin, \+\d+
# cinder/volume/drivers/glusterfs.py
chgrp: CommandFilter, chgrp, root
umount: CommandFilter, umount, root
# cinder/volumes/drivers/hds/hds.py:
hus-cmd: CommandFilter, hus-cmd, root
hus-cmd_local: CommandFilter, /usr/local/bin/hus-cmd, root
# cinder/volumes/drivers/hds/hnas_backend.py
ssc: CommandFilter, ssc, root
# cinder/brick/initiator/connector.py:
ls: CommandFilter, ls, root
tee: CommandFilter, tee, root
multipath: CommandFilter, multipath, root
systool: CommandFilter, systool, root
# cinder/volume/drivers/block_device.py
blockdev: CommandFilter, blockdev, root
# cinder/volume/drivers/ibm/gpfs.py
mv: CommandFilter, mv, root
mmgetstate: CommandFilter, /usr/lpp/mmfs/bin/mmgetstate, root
mmclone: CommandFilter, /usr/lpp/mmfs/bin/mmclone, root
mmlsattr: CommandFilter, /usr/lpp/mmfs/bin/mmlsattr, root
mmchattr: CommandFilter, /usr/lpp/mmfs/bin/mmchattr, root
mmlsconfig: CommandFilter, /usr/lpp/mmfs/bin/mmlsconfig, root
mmlsfs: CommandFilter, /usr/lpp/mmfs/bin/mmlsfs, root
mmlspool: CommandFilter, /usr/lpp/mmfs/bin/mmlspool, root
mkfs: CommandFilter, mkfs, root
# cinder/volume/drivers/ibm/gpfs.py
# cinder/volume/drivers/ibm/ibmnas.py
find_maxdepth_inum: RegExpFilter, find, root, find, ^[/]*([^/\0]+(/+)?)*$, -maxdepth, \d+, -inum, \d+
# cinder/brick/initiator/connector.py:
aoe-revalidate: CommandFilter, aoe-revalidate, root
aoe-discover: CommandFilter, aoe-discover, root
aoe-flush: CommandFilter, aoe-flush, root
# cinder/brick/initiator/linuxscsi.py:
sg_scan: CommandFilter, sg_scan, root
#cinder/backup/services/tsm.py
dsmc:CommandFilter,/usr/bin/dsmc,root
# cinder/volume/drivers/hitachi/hbsd_horcm.py
raidqry: CommandFilter, raidqry, root
raidcom: CommandFilter, raidcom, root
pairsplit: CommandFilter, pairsplit, root
paircreate: CommandFilter, paircreate, root
pairdisplay: CommandFilter, pairdisplay, root
pairevtwait: CommandFilter, pairevtwait, root
horcmstart.sh: CommandFilter, horcmstart.sh, root
horcmshutdown.sh: CommandFilter, horcmshutdown.sh, root
horcmgr: EnvFilter, env, root, HORCMINST=, /etc/horcmgr
# cinder/volume/drivers/hitachi/hbsd_snm2.py
auman: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auman
auluref: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluref
auhgdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgdef
aufibre1: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aufibre1
auhgwwn: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgwwn
auhgmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auhgmap
autargetmap: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetmap
aureplicationvvol: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationvvol
auluadd: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluadd
auludel: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auludel
auluchgsize: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auluchgsize
auchapuser: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auchapuser
autargetdef: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetdef
autargetopt: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetopt
autargetini: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/autargetini
auiscsi: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/auiscsi
audppool: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/audppool
aureplicationlocal: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationlocal
aureplicationmon: EnvFilter, env, root, LANG=, STONAVM_HOME=, LD_LIBRARY_PATH=, STONAVM_RSP_PASS=, STONAVM_ACT=, /usr/stonavm/aureplicationmon

View File

@ -0,0 +1,66 @@
# Use this pipeline for no auth or image caching - DEFAULT
[pipeline:glance-api]
pipeline = versionnegotiation unauthenticated-context rootapp
# Use this pipeline for image caching and no auth
[pipeline:glance-api-caching]
pipeline = versionnegotiation unauthenticated-context cache rootapp
# Use this pipeline for caching w/ management interface but no auth
[pipeline:glance-api-cachemanagement]
pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp
# Use this pipeline for keystone auth
[pipeline:glance-api-keystone]
pipeline = versionnegotiation authtoken context rootapp
# Use this pipeline for keystone auth with image caching
[pipeline:glance-api-keystone+caching]
pipeline = versionnegotiation authtoken context cache rootapp
# Use this pipeline for keystone auth with caching and cache management
[pipeline:glance-api-keystone+cachemanagement]
pipeline = versionnegotiation authtoken context cache cachemanage rootapp
[composite:rootapp]
paste.composite_factory = glance.api:root_app_factory
/: apiversions
/v1: apiv1app
/v2: apiv2app
[app:apiversions]
paste.app_factory = glance.api.versions:create_resource
[app:apiv1app]
paste.app_factory = glance.api.v1.router:API.factory
[app:apiv2app]
paste.app_factory = glance.api.v2.router:API.factory
[filter:versionnegotiation]
paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
[filter:cache]
paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
[filter:cachemanage]
paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
delay_auth_decision = true
auth_host = <CONTROLLER_IP>
auth_port = 35357
auth_protocol = http
auth_uri = http://<CONTROLLER_IP>:5000/
admin_tenant_name = services
admin_user = glance
admin_password = <REDACTED>
signing_dirname = /tmp/keystone-signing-glance-api
#auth_version = v2.0

View File

@ -0,0 +1,361 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
verbose = False
#verbose = True
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
debug = True
# Maximum image size (in bytes) that may be uploaded through the
# Glance API server. Defaults to 1 TB.
# WARNING: this value should only be increased after careful consideration
# and must be set to a value under 8 EB (9223372036854775808).
#image_size_cap = 1099511627776
# Address to bind the API server
bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 9292
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = /var/log/glance/api.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# Number of Glance API worker processes to start.
# On machines with more than one CPU increasing this value
# may improve performance (especially if using SSL with
# compression turned on). It is typically recommended to set
# this value to the number of CPUs present on your machine.
workers = 24
# Role used to identify an authenticated user as administrator
#admin_role = admin
# Allow unauthenticated users to access the API with read-only
# privileges. This only applies when using ContextMiddleware.
#allow_anonymous_access = False
# Allow access to version 1 of glance api
#enable_v1_api = True
# Allow access to version 2 of glance api
#enable_v2_api = True
# Return the URL that references where the data is stored on
# the backend storage system. For example, if using the
# file system store a URL of 'file:///path/to/image' will
# be returned to the user in the 'direct_url' meta-data field.
# The default value is false.
#show_image_direct_url = False
show_image_direct_url=True
# ================= Syslog Options ============================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
#use_syslog = False
use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
#syslog_log_facility = LOG_LOCAL0
# ================= SSL Options ===============================
# Certificate file to use when starting API server securely
#cert_file = /path/to/certfile
# Private key file to use when starting API server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
# ================= Security Options ==========================
# AES key for encrypting store 'location' metadata, including
# -- if used -- Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# ============ Registry Options ===============================
# Address to find the registry server
registry_host = 127.0.0.1
# Port the registry server is listening on
registry_port = 9191
# What protocol to use when connecting to the registry server?
# Set to https for secure HTTP communication
registry_client_protocol = http
# The path to the key file to use in SSL connections to the
# registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
#registry_client_key_file = /path/to/key/file
# The path to the cert file to use in SSL connections to the
# registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
#registry_client_cert_file = /path/to/cert/file
# The path to the certifying authority cert file to use in SSL connections
# to the registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
#registry_client_ca_file = /path/to/ca/file
# When using SSL in connections to the registry server, do not require
# validation via a certifying authority. This is the registry's equivalent of
# specifying --insecure on the command line using glanceclient for the API
# Default: False
#registry_client_insecure = False
# The period of time, in seconds, that the API server will wait for a registry
# request to complete. A value of '0' implies no timeout.
# Default: 600
#registry_client_timeout = 600
# Whether to automatically create the database tables.
# Default: False
#db_auto_create = False
# ============ Notification System Options =====================
# Notifications can be sent when images are create, updated or deleted.
# There are three methods of sending notifications, logging (via the
# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
# message queue), or noop (no notifications sent, the default)
notifier_strategy = noop
# glance now usses message queue more more than just logging
# this defines the genral rpc backend
rpc_backend=rabbit
# Configuration options if sending notifications via rabbitmq (these are
# the defaults)
rabbit_host=<CONTROLLER_IP>
rabbit_port=5672
rabbit_password=<REDACTED>
rabbit_userid=<REDACTED>
rabbit_use_ssl = false
rabbit_virtual_host = /
rabbit_notification_exchange = glance
rabbit_notification_topic = notifications
rabbit_durable_queues = False
# Configuration options if sending notifications via Qpid (these are
# the defaults)
qpid_notification_exchange = glance
qpid_notification_topic = notifications
qpid_host = localhost
qpid_port = 5672
qpid_username =
qpid_password =
qpid_sasl_mechanisms =
qpid_reconnect_timeout = 0
qpid_reconnect_limit = 0
qpid_reconnect_interval_min = 0
qpid_reconnect_interval_max = 0
qpid_reconnect_interval = 0
qpid_heartbeat = 5
# Set to 'ssl' to enable SSL
qpid_protocol = tcp
qpid_tcp_nodelay = True
# ============ Delayed Delete Options =============================
# Turn on/off delayed delete
delayed_delete = False
# Delayed delete time in seconds
scrub_time = 43200
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-scrubber.conf
scrubber_datadir = /var/lib/glance/scrubber
# =============== Image Cache Options =============================
# Base directory that the Image Cache uses
image_cache_dir = /var/lib/glance/image-cache/
[glance_store]
default_store = rbd
stores = glance.store.filesystem.Store,glance.store.rbd.Store
# ============ Filesystem Store Options ========================
# Directory that the Filesystem backend store
# writes image data to
filesystem_store_datadir = /var/lib/glance/images/
# ============ Swift Store Options =============================
# Version of the authentication service to use
# Valid versions are '2' for keystone and '1' for swauth and rackspace
swift_store_auth_version = 2
# Address where the Swift authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'https://'
# For swauth, use something like '127.0.0.1:8080/v1.0/'
swift_store_auth_address = 127.0.0.1:5000/v2.0/
# User to authenticate against the Swift authentication service
# If you use Swift authentication service, set it to 'account':'user'
# where 'account' is a Swift storage account and 'user'
# is a user in that account
swift_store_user = jdoe:jdoe
# Auth key for the user authenticating against the
# Swift authentication service
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
# Container within the account that the account should use
# for storing images in Swift
swift_store_container = glance
# Do we create the container if it does not exist?
swift_store_create_container_on_put = False
# What size, in MB, should Glance start chunking image files
# and do a large object manifest in Swift? By default, this is
# the maximum object size in Swift, which is 5GB
swift_store_large_object_size = 5120
# When doing a large object manifest, what size, in MB, should
# Glance write chunks to Swift? This amount of data is written
# to a temporary disk buffer during the process of chunking
# the image file, and the default is 200MB
swift_store_large_object_chunk_size = 200
# Whether to use ServiceNET to communicate with the Swift storage servers.
# (If you aren't RACKSPACE, leave this False!)
#
# To use ServiceNET for authentication, prefix hostname of
# `swift_store_auth_address` with 'snet-'.
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
swift_enable_snet = False
# If set to True enables multi-tenant storage mode which causes Glance images
# to be stored in tenant specific Swift accounts.
#swift_store_multi_tenant = False
# A list of swift ACL strings that will be applied as both read and
# write ACLs to the containers created by Glance in multi-tenant
# mode. This grants the specified tenants/users read and write access
# to all newly created image objects. The standard swift ACL string
# formats are allowed, including:
# <tenant_id>:<username>
# <tenant_name>:<username>
# *:<username>
# Multiple ACLs can be combined using a comma separated list, for
# example: swift_store_admin_tenants = service:glance,*:admin
#swift_store_admin_tenants =
# The region of the swift endpoint to be used for single tenant. This setting
# is only necessary if the tenant has multiple swift endpoints.
#swift_store_region =
# ============ S3 Store Options =============================
# Address where the S3 authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'http://'
s3_store_host = 127.0.0.1:8080/v1.0/
# User to authenticate against the S3 authentication service
s3_store_access_key = <20-char AWS access key>
# Auth key for the user authenticating against the
# S3 authentication service
s3_store_secret_key = <40-char AWS secret key>
# Container within the account that the account should use
# for storing images in S3. Note that S3 has a flat namespace,
# so you need a unique bucket name for your glance images. An
# easy way to do this is append your AWS access key to "glance".
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
# your AWS access key if you use it in your bucket name below!
s3_store_bucket = <lowercased 20-char aws access key>glance
# Do we create the bucket if it does not exist?
s3_store_create_bucket_on_put = False
# When sending images to S3, the data will first be written to a
# temporary buffer on disk. By default the platform's temporary directory
# will be used. If required, an alternative directory can be specified here.
#s3_store_object_buffer_dir = /path/to/dir
# When forming a bucket url, boto will either set the bucket name as the
# subdomain or as the first token of the path. Amazon's S3 service will
# accept it as the subdomain, but Swift's S3 middleware requires it be
# in the path. Set this to 'path' or 'subdomain' - defaults to 'subdomain'.
#s3_store_bucket_url_format = subdomain
# ============ RBD Store Options =============================
# Ceph configuration file path
# If using cephx authentication, this file should
# include a reference to the right keyring
# in a client.<USER> section
rbd_store_ceph_conf = /etc/ceph/ceph.conf
# RADOS user to authenticate as (only applicable if using cephx)
rbd_store_user = openstack
# RADOS pool in which images are stored
rbd_store_pool = images
# Images will be chunked into objects of this size (in megabytes).
# For best performance, this should be a power of two
rbd_store_chunk_size = 8
[keystone_authtoken]
auth_host = <CONTROLLER_IP>
auth_port = 35358
auth_protocol = https
admin_tenant_name = services
admin_user = glance
admin_password = <REDACTED>
#signing_dirname = /tmp/keystone-signing-glance-registry
auth_version = v2.0
auth_uri=https://<CONTROLLER_IP>:5001/v2.0/
[paste_deploy]
# Name of the paste configuration file that defines the available pipelines
config_file = /etc/glance/glance-api-paste.ini
# Partial name of a pipeline in your paste configuration file with the
# service name removed. For example, if your paste section name is
# [pipeline:glance-api-keystone], you would configure the flavor below
# as 'keystone'.
flavor=keystone+cachemanagement
[database]
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
connection = mysql://glance:<REDACTED>@<CONTROLLER_IP>/glance
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = 3600

View File

@ -0,0 +1,15 @@
[app:glance-pruner]
paste.app_factory = glance.common.wsgi:app_factory
glance.app_factory = glance.image_cache.pruner:Pruner
[app:glance-prefetcher]
paste.app_factory = glance.common.wsgi:app_factory
glance.app_factory = glance.image_cache.prefetcher:Prefetcher
[app:glance-cleaner]
paste.app_factory = glance.common.wsgi:app_factory
glance.app_factory = glance.image_cache.cleaner:Cleaner
[app:glance-queue-image]
paste.app_factory = glance.common.wsgi:app_factory
glance.app_factory = glance.image_cache.queue_image:Queuer

View File

@ -0,0 +1,148 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
debug = False
log_file = /var/log/glance/image-cache.log
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
#use_syslog = False
# Directory that the Image Cache writes data to
image_cache_dir = /var/lib/glance/image-cache/
# Number of seconds after which we should consider an incomplete image to be
# stalled and eligible for reaping
image_cache_stall_time = 86400
# image_cache_invalid_entry_grace_period - seconds
#
# If an exception is raised as we're writing to the cache, the cache-entry is
# deemed invalid and moved to <image_cache_datadir>/invalid so that it can be
# inspected for debugging purposes.
#
# This is number of seconds to leave these invalid images around before they
# are elibible to be reaped.
image_cache_invalid_entry_grace_period = 3600
# Max cache size in bytes
image_cache_max_size = 10737418240
# Address to find the registry server
registry_host = 0.0.0.0
# Port the registry server is listening on
registry_port = 9191
# Auth settings if using Keystone
# auth_url = http://127.0.0.1:5000/v2.0/
auth_url = http://localhost:5000/v2.0
# admin_tenant_name = %SERVICE_TENANT_NAME%
admin_tenant_name = services
# admin_user = %SERVICE_USER%
admin_user = glance
# admin_password = %SERVICE_PASSWORD%
admin_password = <REDACTED>
# List of which store classes and store class locations are
# currently known to glance at startup.
# known_stores = glance.store.filesystem.Store,
# glance.store.http.Store,
# glance.store.rbd.Store,
# glance.store.s3.Store,
# glance.store.swift.Store,
# ============ Filesystem Store Options ========================
# Directory that the Filesystem backend store
# writes image data to
filesystem_store_datadir = /var/lib/glance/images/
# ============ Swift Store Options =============================
# Version of the authentication service to use
# Valid versions are '2' for keystone and '1' for swauth and rackspace
swift_store_auth_version = 2
# Address where the Swift authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'https://'
# For swauth, use something like '127.0.0.1:8080/v1.0/'
swift_store_auth_address = 127.0.0.1:5000/v2.0/
# User to authenticate against the Swift authentication service
# If you use Swift authentication service, set it to 'account':'user'
# where 'account' is a Swift storage account and 'user'
# is a user in that account
swift_store_user = jdoe:jdoe
# Auth key for the user authenticating against the
# Swift authentication service
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
# Container within the account that the account should use
# for storing images in Swift
swift_store_container = glance
# Do we create the container if it does not exist?
swift_store_create_container_on_put = False
# What size, in MB, should Glance start chunking image files
# and do a large object manifest in Swift? By default, this is
# the maximum object size in Swift, which is 5GB
swift_store_large_object_size = 5120
# When doing a large object manifest, what size, in MB, should
# Glance write chunks to Swift? This amount of data is written
# to a temporary disk buffer during the process of chunking
# the image file, and the default is 200MB
swift_store_large_object_chunk_size = 200
# Whether to use ServiceNET to communicate with the Swift storage servers.
# (If you aren't RACKSPACE, leave this False!)
#
# To use ServiceNET for authentication, prefix hostname of
# `swift_store_auth_address` with 'snet-'.
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
swift_enable_snet = False
# ============ S3 Store Options =============================
# Address where the S3 authentication service lives
# Valid schemes are 'http://' and 'https://'
# If no scheme specified, default to 'http://'
s3_store_host = 127.0.0.1:8080/v1.0/
# User to authenticate against the S3 authentication service
s3_store_access_key = <20-char AWS access key>
# Auth key for the user authenticating against the
# S3 authentication service
s3_store_secret_key = <40-char AWS secret key>
# Container within the account that the account should use
# for storing images in S3. Note that S3 has a flat namespace,
# so you need a unique bucket name for your glance images. An
# easy way to do this is append your AWS access key to "glance".
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
# your AWS access key if you use it in your bucket name below!
s3_store_bucket = <lowercased 20-char aws access key>glance
# Do we create the bucket if it does not exist?
s3_store_create_bucket_on_put = False
# When sending images to S3, the data will first be written to a
# temporary buffer on disk. By default the platform's temporary directory
# will be used. If required, an alternative directory can be specified here.
# s3_store_object_buffer_dir = /path/to/dir
# ================= Security Options ==========================
# AES key for encrypting store 'location' metadata, including
# -- if used -- Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
# metadata_encryption_key = <16, 24 or 32 char registry metadata key>

View File

@ -0,0 +1,28 @@
# Use this pipeline for no auth - DEFAULT
[pipeline:glance-registry]
pipeline = unauthenticated-context registryapp
# Use this pipeline for keystone auth
[pipeline:glance-registry-keystone]
pipeline = authtoken context registryapp
[app:registryapp]
paste.app_factory = glance.registry.api.v1:API.factory
[filter:context]
paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
[filter:unauthenticated-context]
paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = <CONTROLLER_IP>
auth_port = 35357
auth_protocol = http
auth_uri = http://<CONTROLLER_IP>:5000/
admin_tenant_name = services
admin_user = glance
admin_password = <REDACTED>
signing_dirname = /tmp/keystone-signing-glance-registry
#auth_version = v2.0

View File

@ -0,0 +1,98 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
verbose = True
# Show debugging output in logs (sets DEBUG log level output)
debug = False
#debug = True
# Address to bind the registry server
bind_host = 0.0.0.0
# Port the bind the registry server to
bind_port = 9191
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = /var/log/glance/registry.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# Limit the api to return `param_limit_max` items in a call to a container. If
# a larger `limit` query param is provided, it will be reduced to this value.
api_limit_max = 1000
# If a `limit` query param is not provided in an api request, it will
# default to `limit_param_default`
limit_param_default = 25
# Role used to identify an authenticated user as administrator
admin_role = admin
# Whether to automatically create the database tables.
# Default: False
#db_auto_create = False
# ================= Syslog Options ============================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
#use_syslog = False
use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
#syslog_log_facility = LOG_LOCAL1
# ================= SSL Options ===============================
# Certificate file to use when starting registry server securely
#cert_file = /path/to/certfile
# Private key file to use when starting registry server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
[keystone_authtoken]
auth_host = <CONTROLLER_IP>
auth_port = 35358
auth_protocol = https
admin_tenant_name = services
admin_user = glance
admin_password = <REDACTED>
#signing_dirname = /tmp/keystone-signing-glance-registry
auth_version = v2.0
auth_uri=https://<CONTROLLER_IP>:5001/v2.0/
[paste_deploy]
# Name of the paste configuration file that defines the available pipelines
config_file = /etc/glance/glance-registry-paste.ini
# Partial name of a pipeline in your paste configuration file with the
# service name removed. For example, if your paste section name is
# [pipeline:glance-registry-keystone], you would configure the flavor below
# as 'keystone'.
flavor = keystone
[database]
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
connection = mysql://glance:<REDACTED>@<CONTROLLER_IP>/glance
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in 'MySQL Gone Away' exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = 3600

View File

@ -0,0 +1,3 @@
[app:glance-scrubber]
paste.app_factory = glance.common.wsgi:app_factory
glance.app_factory = glance.store.scrubber:Scrubber

View File

@ -0,0 +1,108 @@
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
# Log to this file. Make sure you do not set the same log file for both the API
# and registry servers!
#
# If `log_file` is omitted and `use_syslog` is false, then log messages are
# sent to stdout as a fallback.
log_file = /var/log/glance/scrubber.log
# Send logs to syslog (/dev/log) instead of to file specified by `log_file`
#use_syslog = False
# Should we run our own loop or rely on cron/scheduler to run us
daemon = False
# Loop time between checking for new items to schedule for delete
wakeup_time = 300
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-api.conf
scrubber_datadir = /var/lib/glance/scrubber
# Only one server in your deployment should be designated the cleanup host
cleanup_scrubber = False
# pending_delete items older than this time are candidates for cleanup
cleanup_scrubber_time = 86400
# Address to find the registry server for cleanups
registry_host = 0.0.0.0
# Port the registry server is listening on
registry_port = 9191
# Auth settings if using Keystone
# auth_url = http://127.0.0.1:5000/v2.0/
# admin_tenant_name = %SERVICE_TENANT_NAME%
# admin_user = %SERVICE_USER%
# admin_password = %SERVICE_PASSWORD%
# Directory to use for lock files. Default to a temp directory
# (string value). This setting needs to be the same for both
# glance-scrubber and glance-api.
#lock_path=<None>
# API to use for accessing data. Default value points to sqlalchemy
# package, it is also possible to use: glance.db.registry.api
#data_api = glance.db.sqlalchemy.api
# ================= Security Options ==========================
# AES key for encrypting store 'location' metadata, including
# -- if used -- Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# ================= Database Options ===============+==========
[database]
# The SQLAlchemy connection string used to connect to the
# database (string value)
#connection=sqlite:////glance/openstack/common/db/$sqlite_db
# The SQLAlchemy connection string used to connect to the
# slave database (string value)
#slave_connection=
# timeout before idle sql connections are reaped (integer
# value)
#idle_timeout=3600
# Minimum number of SQL connections to keep open in a pool
# (integer value)
#min_pool_size=1
# Maximum number of SQL connections to keep open in a pool
# (integer value)
#max_pool_size=<None>
# maximum db connection retries during startup. (setting -1
# implies an infinite retry count) (integer value)
#max_retries=10
# interval between retries of opening a sql connection
# (integer value)
#retry_interval=10
# If set, use this value for max_overflow with sqlalchemy
# (integer value)
#max_overflow=<None>
# Verbosity of SQL debugging information. 0=None,
# 100=Everything (integer value)
#connection_debug=0
# Add python stack traces to SQL as comment strings (boolean
# value)
#connection_trace=false
# If set, use this value for pool_timeout with sqlalchemy
# (integer value)
#pool_timeout=<None>

View File

@ -0,0 +1,52 @@
{
"context_is_admin": "role:admin",
"default": "",
"add_image": "",
"delete_image": "",
"get_image": "",
"get_images": "",
"modify_image": "",
"publicize_image": "role:admin",
"copy_from": "",
"download_image": "",
"upload_image": "",
"delete_image_location": "",
"get_image_location": "",
"set_image_location": "",
"add_member": "",
"delete_member": "",
"get_member": "",
"get_members": "",
"modify_member": "",
"manage_image_cache": "role:admin",
"get_task": "",
"get_tasks": "",
"add_task": "",
"modify_task": "",
"get_metadef_namespace": "",
"get_metadef_namespaces":"",
"modify_metadef_namespace":"",
"add_metadef_namespace":"",
"get_metadef_object":"",
"get_metadef_objects":"",
"modify_metadef_object":"",
"add_metadef_object":"",
"list_metadef_resource_types":"",
"get_metadef_resource_type":"",
"add_metadef_resource_type_association":"",
"get_metadef_property":"",
"get_metadef_properties":"",
"modify_metadef_property":"",
"add_metadef_property":""
}

View File

@ -0,0 +1,28 @@
{
"kernel_id": {
"type": "string",
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
"description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image."
},
"ramdisk_id": {
"type": "string",
"pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$",
"description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image."
},
"instance_uuid": {
"type": "string",
"description": "ID of instance used to create this image."
},
"architecture": {
"description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
"type": "string"
},
"os_distro": {
"description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html",
"type": "string"
},
"os_version": {
"description": "Operating system version as specified by the distributor",
"type": "string"
}
}

View File

@ -0,0 +1,94 @@
# heat-api pipeline
[pipeline:heat-api]
pipeline = faultwrap ssl versionnegotiation authurl authtoken context apiv1app
# heat-api pipeline for standalone heat
# ie. uses alternative auth backend that authenticates users against keystone
# using username and password instead of validating token (which requires
# an admin/service token).
# To enable, in heat.conf:
# [paste_deploy]
# flavor = standalone
#
[pipeline:heat-api-standalone]
pipeline = faultwrap ssl versionnegotiation authurl authpassword context apiv1app
# heat-api pipeline for custom cloud backends
# i.e. in heat.conf:
# [paste_deploy]
# flavor = custombackend
#
[pipeline:heat-api-custombackend]
pipeline = faultwrap versionnegotiation context custombackendauth apiv1app
# heat-api-cfn pipeline
[pipeline:heat-api-cfn]
pipeline = cfnversionnegotiation ec2authtoken authtoken context apicfnv1app
# heat-api-cfn pipeline for standalone heat
# relies exclusively on authenticating with ec2 signed requests
[pipeline:heat-api-cfn-standalone]
pipeline = cfnversionnegotiation ec2authtoken context apicfnv1app
# heat-api-cloudwatch pipeline
[pipeline:heat-api-cloudwatch]
pipeline = versionnegotiation ec2authtoken authtoken context apicwapp
# heat-api-cloudwatch pipeline for standalone heat
# relies exclusively on authenticating with ec2 signed requests
[pipeline:heat-api-cloudwatch-standalone]
pipeline = versionnegotiation ec2authtoken context apicwapp
[app:apiv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.openstack.v1:API
[app:apicfnv1app]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cfn.v1:API
[app:apicwapp]
paste.app_factory = heat.common.wsgi:app_factory
heat.app_factory = heat.api.cloudwatch:API
[filter:versionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.openstack:version_negotiation_filter
[filter:faultwrap]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.openstack:faultwrap_filter
[filter:cfnversionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.cfn:version_negotiation_filter
[filter:cwversionnegotiation]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.cloudwatch:version_negotiation_filter
[filter:context]
paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory
[filter:ec2authtoken]
paste.filter_factory = heat.api.aws.ec2token:EC2Token_filter_factory
[filter:ssl]
paste.filter_factory = heat.common.wsgi:filter_factory
heat.filter_factory = heat.api.openstack:sslmiddleware_filter
# Middleware to set auth_url header appropriately
[filter:authurl]
paste.filter_factory = heat.common.auth_url:filter_factory
# Auth middleware that validates token against keystone
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
# Auth middleware that validates username/password against keystone
[filter:authpassword]
paste.filter_factory = heat.common.auth_password:filter_factory
# Auth middleware that validates against custom backend
[filter:custombackendauth]
paste.filter_factory = heat.common.custom_backend_auth:filter_factory

View File

@ -0,0 +1,9 @@
resource_registry:
# allow older templates with Quantum in them.
"OS::Quantum*": "OS::Neutron*"
# Choose your implementation of AWS::CloudWatch::Alarm
"AWS::CloudWatch::Alarm": "file:///etc/heat/templates/AWS_CloudWatch_Alarm.yaml"
#"AWS::CloudWatch::Alarm": "OS::Heat::CWLiteAlarm"
"OS::Metering::Alarm": "OS::Ceilometer::Alarm"
"AWS::RDS::DBInstance": "file:///etc/heat/templates/AWS_RDS_DBInstance.yaml"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,70 @@
{
"context_is_admin": "role:admin",
"deny_stack_user": "not role:heat_stack_user",
"deny_everybody": "!",
"cloudformation:ListStacks": "rule:deny_stack_user",
"cloudformation:CreateStack": "rule:deny_stack_user",
"cloudformation:DescribeStacks": "rule:deny_stack_user",
"cloudformation:DeleteStack": "rule:deny_stack_user",
"cloudformation:UpdateStack": "rule:deny_stack_user",
"cloudformation:CancelUpdateStack": "rule:deny_stack_user",
"cloudformation:DescribeStackEvents": "rule:deny_stack_user",
"cloudformation:ValidateTemplate": "rule:deny_stack_user",
"cloudformation:GetTemplate": "rule:deny_stack_user",
"cloudformation:EstimateTemplateCost": "rule:deny_stack_user",
"cloudformation:DescribeStackResource": "",
"cloudformation:DescribeStackResources": "rule:deny_stack_user",
"cloudformation:ListStackResources": "rule:deny_stack_user",
"cloudwatch:DeleteAlarms": "rule:deny_stack_user",
"cloudwatch:DescribeAlarmHistory": "rule:deny_stack_user",
"cloudwatch:DescribeAlarms": "rule:deny_stack_user",
"cloudwatch:DescribeAlarmsForMetric": "rule:deny_stack_user",
"cloudwatch:DisableAlarmActions": "rule:deny_stack_user",
"cloudwatch:EnableAlarmActions": "rule:deny_stack_user",
"cloudwatch:GetMetricStatistics": "rule:deny_stack_user",
"cloudwatch:ListMetrics": "rule:deny_stack_user",
"cloudwatch:PutMetricAlarm": "rule:deny_stack_user",
"cloudwatch:PutMetricData": "",
"cloudwatch:SetAlarmState": "rule:deny_stack_user",
"actions:action": "rule:deny_stack_user",
"build_info:build_info": "rule:deny_stack_user",
"events:index": "rule:deny_stack_user",
"events:show": "rule:deny_stack_user",
"resource:index": "rule:deny_stack_user",
"resource:metadata": "",
"resource:signal": "",
"resource:show": "rule:deny_stack_user",
"stacks:abandon": "rule:deny_stack_user",
"stacks:create": "rule:deny_stack_user",
"stacks:delete": "rule:deny_stack_user",
"stacks:detail": "rule:deny_stack_user",
"stacks:generate_template": "rule:deny_stack_user",
"stacks:global_index": "rule:deny_everybody",
"stacks:index": "rule:deny_stack_user",
"stacks:list_resource_types": "rule:deny_stack_user",
"stacks:lookup": "",
"stacks:preview": "rule:deny_stack_user",
"stacks:resource_schema": "rule:deny_stack_user",
"stacks:show": "rule:deny_stack_user",
"stacks:template": "rule:deny_stack_user",
"stacks:update": "rule:deny_stack_user",
"stacks:update_patch": "rule:deny_stack_user",
"stacks:validate_template": "rule:deny_stack_user",
"stacks:snapshot": "rule:deny_stack_user",
"stacks:show_snapshot": "rule:deny_stack_user",
"stacks:delete_snapshot": "rule:deny_stack_user",
"stacks:list_snapshots": "rule:deny_stack_user",
"software_configs:create": "rule:deny_stack_user",
"software_configs:show": "rule:deny_stack_user",
"software_configs:delete": "rule:deny_stack_user",
"software_deployments:index": "rule:deny_stack_user",
"software_deployments:create": "rule:deny_stack_user",
"software_deployments:show": "rule:deny_stack_user",
"software_deployments:update": "rule:deny_stack_user",
"software_deployments:delete": "rule:deny_stack_user",
"software_deployments:metadata": ""
}

View File

@ -0,0 +1,27 @@
# config for templated.Catalog, using camelCase because I don't want to do
# translations for keystone compat
catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0
catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0
catalog.RegionOne.identity.name = Identity Service
# fake compute service for now to help novaclient tests work
catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s
catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s
catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s
catalog.RegionOne.compute.name = Compute Service
catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s
catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s
catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s
catalog.RegionOne.volume.name = Volume Service
catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud
catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin
catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud
catalog.RegionOne.ec2.name = EC2 Service
catalog.RegionOne.image.publicURL = http://localhost:9292/v1
catalog.RegionOne.image.adminURL = http://localhost:9292/v1
catalog.RegionOne.image.internalURL = http://localhost:9292/v1
catalog.RegionOne.image.name = Image Service

View File

@ -0,0 +1,121 @@
# Keystone PasteDeploy configuration file.
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:build_auth_context]
paste.filter_factory = keystone.middleware:AuthContextMiddleware.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:xml_body_v2]
paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV2.factory
[filter:xml_body_v3]
paste.filter_factory = keystone.middleware:XmlBodyMiddlewareV3.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:ec2_extension_v3]
paste.filter_factory = keystone.contrib.ec2:Ec2ExtensionV3.factory
[filter:federation_extension]
paste.filter_factory = keystone.contrib.federation.routers:FederationExtension.factory
[filter:oauth1_extension]
paste.filter_factory = keystone.contrib.oauth1.routers:OAuth1Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:endpoint_filter_extension]
paste.filter_factory = keystone.contrib.endpoint_filter.routers:EndpointFilterExtension.factory
[filter:endpoint_policy_extension]
paste.filter_factory = keystone.contrib.endpoint_policy.routers:EndpointPolicyExtension.factory
[filter:simple_cert_extension]
paste.filter_factory = keystone.contrib.simple_cert:SimpleCertExtension.factory
[filter:revoke_extension]
paste.filter_factory = keystone.contrib.revoke.routers:RevokeExtension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:sizelimit]
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[filter:access_log]
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:service_v3]
paste.app_factory = keystone.service:v3_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
# The last item in this pipeline must be public_service or an equivalent
# application. It cannot be a filter.
pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension user_crud_extension public_service
[pipeline:admin_api]
# The last item in this pipeline must be admin_service or an equivalent
# application. It cannot be a filter.
pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v2 json_body ec2_extension s3_extension crud_extension admin_service
[pipeline:api_v3]
# The last item in this pipeline must be service_v3 or an equivalent
# application. It cannot be a filter.
pipeline = sizelimit url_normalize build_auth_context token_auth admin_token_auth xml_body_v3 json_body ec2_extension_v3 s3_extension simple_cert_extension revoke_extension service_v3
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = sizelimit url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = sizelimit url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api

View File

@ -0,0 +1,385 @@
[DEFAULT]
workers = 48
# A "shared secret" between keystone and other openstack services
# admin_token = ADMIN
admin_token = <REDACTED>
# The IP address of the network interface to listen on
# bind_host = 0.0.0.0
bind_host = 0.0.0.0
# The port number which the public service listens on
# public_port = 5000
public_port = 5000
# The port number which the public admin listens on
# admin_port = 35357
admin_port = 35357
# The base endpoint URLs for keystone that are advertised to clients
# (NOTE: this does NOT affect how keystone listens for connections)
# public_endpoint = http://localhost:%(public_port)d/
# admin_endpoint = http://localhost:%(admin_port)d/
# The number of worker processes to serve the public and admin WSGI
# applications respectively.
public_workers = 24
admin_workers = 24
# The port number which the OpenStack Compute service listens on
# compute_port = 8774
compute_port = 8774
# Path to your policy definition containing identity actions
# policy_file = policy.json
# Rule to check if no matching policy definition is found
# FIXME(dolph): This should really be defined as [policy] default_rule
# policy_default_rule = admin_required
# Role for migrating membership relationships
# During a SQL upgrade, the following values will be used to create a new role
# that will replace records in the user_tenant_membership table with explicit
# role grants. After migration, the member_role_id will be used in the API
# add_user_to_project, and member_role_name will be ignored.
# member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
# member_role_name = _member_
# === Logging Options ===
# Print debugging output
# (includes plaintext request logging, potentially including passwords)
debug = False
#debug = True
# debug = True
# Print more verbose output
verbose = False
#verbose = True
# Name of log file to output to. If not set, logging will go to stdout.
log_file = keystone.log
# The directory to keep log files in (will be prepended to --logfile)
log_dir = /var/log/keystone
# Use syslog for logging.
# use_syslog = False
use_syslog = False
# syslog facility to receive log lines
# syslog_log_facility = LOG_USER
# If this option is specified, the logging configuration file specified is
# used and overrides any other logging options specified. Please see the
# Python logging module documentation for details on logging configuration
# files.
# log_config = logging.conf
# A logging.Formatter log message format string which may use any of the
# available logging.LogRecord attributes.
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# Format string for %(asctime)s in log records.
# log_date_format = %Y-%m-%d %H:%M:%S
# onready allows you to send a notification when the process is ready to serve
# For example, to have it notify using systemd, one could set shell command:
# onready = systemd-notify --ready
# or a module with notify() method:
# onready = keystone.common.systemd
[sql]
# The SQLAlchemy connection string used to connect to the database
# connection = sqlite:////var/lib/keystone/keystone.db
connection = mysql://keystone_admin:<REDACTED>@<CONTROLLER_IP>/keystone
# the timeout before idle sql connections are reaped
# idle_timeout = 200
idle_timeout = 200
[memcache]
servers = localhost:11211
# max_compare_and_set_retry = 16
[identity]
driver = keystone.identity.backends.sql.Identity
# This references the domain to use for all Identity API v2 requests (which are
# not aware of domains). A domain with this ID will be created for you by
# keystone-manage db_sync in migration 008. The domain referenced by this ID
# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
# There is nothing special about this domain, other than the fact that it must
# exist to order to maintain support for your v2 clients.
# default_domain_id = default
[trust]
driver = keystone.trust.backends.sql.Trust
# delegation and impersonation features can be optionally disabled
# enabled = True
[catalog]
# dynamic, sql-based backend (supports API/CLI-based management commands)
driver = keystone.catalog.backends.sql.Catalog
# static, file-based backend (does *NOT* support any management commands)
# driver = keystone.catalog.backends.templated.TemplatedCatalog
# template_file = default_catalog.templates
[token]
#driver = keystone.token.backends.sql.Token
driver = keystone.token.backends.memcache.Token
# Amount of time a token should remain valid (in seconds)
#expiration = 86400
# shorter expiration keeps bloat down and performance up
# 10min pre patch. 1hr post patch.
expiration = 3600
#provider=keystone.token.providers.pki.Provider
provider=keystone.token.providers.uuid.Provider
# Token specific caching toggle. This has no effect unless the global caching
# option is set to True
# caching = True
# no point to caching tokens that are only stored in memcache
caching = False
# Token specific cache time-to-live (TTL) in seconds.
# cache_time =
# Revocation-List specific cache time-to-live (TTL) in seconds.
# revocation_cache_time = 3600
[cache]
# Global cache functionality toggle.
enabled = False
# enabled = True
# Prefix for building the configuration dictionary for the cache region. This
# should not need to be changed unless there is another dogpile.cache region
# with the same configuration name
# config_prefix = cache.keystone
# Default TTL, in seconds, for any cached item in the dogpile.cache region.
# This applies to any cached method that doesn't have an explicit cache
# expiration time defined for it.
# expiration_time = 600
# Dogpile.cache backend module. It is recommended that Memcache
# (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production
# deployments. Small workloads (single process) like devstack can use the
# dogpile.cache.memory backend.
# backend = keystone.common.cache.noop
#backend = dogpile.cache.memcache
backend = dogpile.cache.pylibmc
# Arguments supplied to the backend module. Specify this option once per
# argument to be passed to the dogpile.cache backend.
# Example format: <argname>:<value>
# backend_argument =
backend_argument = url:127.0.0.1
# Proxy Classes to import that will affect the way the dogpile.cache backend
# functions. See the dogpile.cache documentation on changing-backend-behavior.
# Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2
# proxies =
# Use a key-mangling function (sha1) to ensure fixed length cache-keys. This
# is toggle-able for debugging purposes, it is highly recommended to always
# leave this set to True.
# use_key_mangler = True
# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls)
# This is only really useful if you need to see the specific cache-backend
# get/set/delete calls with the keys/values. Typically this should be left
# set to False.
# debug_cache_backend = False
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[ssl]
#enable = True
#certfile = /etc/keystone/ssl/certs/keystone.pem
#keyfile = /etc/keystone/ssl/private/keystonekey.pem
#ca_certs = /etc/keystone/ssl/certs/ca.pem
#cert_required = True
[signing]
#token_format = PKI
#certfile = /etc/keystone/ssl/certs/signing_cert.pem
#keyfile = /etc/keystone/ssl/private/signing_key.pem
#ca_certs = /etc/keystone/ssl/certs/ca.pem
#key_size = 1024
#valid_days = 3650
#ca_password = None
[ldap]
# url = ldap://localhost
# user = dc=Manager,dc=example,dc=com
# password = None
# suffix = cn=example,cn=com
# use_dumb_member = False
# allow_subtree_delete = False
# dumb_member = cn=dumb,dc=example,dc=com
# Maximum results per page; a value of zero ('0') disables paging (default)
# page_size = 0
# The LDAP dereferencing option for queries. This can be either 'never',
# 'searching', 'always', 'finding' or 'default'. The 'default' option falls
# back to using default dereferencing configured by your ldap.conf.
# alias_dereferencing = default
# The LDAP scope for queries, this can be either 'one'
# (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)
# query_scope = one
# user_tree_dn = ou=Users,dc=example,dc=com
# user_filter =
# user_objectclass = inetOrgPerson
# user_domain_id_attribute = businessCategory
# user_id_attribute = cn
# user_name_attribute = sn
# user_mail_attribute = email
# user_pass_attribute = userPassword
# user_enabled_attribute = enabled
# user_enabled_mask = 0
# user_enabled_default = True
# user_attribute_ignore = tenant_id,tenants
# user_allow_create = True
# user_allow_update = True
# user_allow_delete = True
# user_enabled_emulation = False
# user_enabled_emulation_dn =
# tenant_tree_dn = ou=Groups,dc=example,dc=com
# tenant_filter =
# tenant_objectclass = groupOfNames
# tenant_domain_id_attribute = businessCategory
# tenant_id_attribute = cn
# tenant_member_attribute = member
# tenant_name_attribute = ou
# tenant_desc_attribute = desc
# tenant_enabled_attribute = enabled
# tenant_attribute_ignore =
# tenant_allow_create = True
# tenant_allow_update = True
# tenant_allow_delete = True
# tenant_enabled_emulation = False
# tenant_enabled_emulation_dn =
# role_tree_dn = ou=Roles,dc=example,dc=com
# role_filter =
# role_objectclass = organizationalRole
# role_id_attribute = cn
# role_name_attribute = ou
# role_member_attribute = roleOccupant
# role_attribute_ignore =
# role_allow_create = True
# role_allow_update = True
# role_allow_delete = True
# group_tree_dn =
# group_filter =
# group_objectclass = groupOfNames
# group_id_attribute = cn
# group_name_attribute = ou
# group_member_attribute = member
# group_desc_attribute = desc
# group_attribute_ignore =
# group_allow_create = True
# group_allow_update = True
# group_allow_delete = True
[auth]
methods = password,token
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:sizelimit]
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[filter:access_log]
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:service_v3]
paste.app_factory = keystone.service:v3_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
[pipeline:admin_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
[pipeline:api_v3]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api

View File

@ -0,0 +1,386 @@
[DEFAULT]
workers = 48
# A "shared secret" between keystone and other openstack services
# admin_token = ADMIN
admin_token = <REDACTED>
# The IP address of the network interface to listen on
# bind_host = 0.0.0.0
bind_host = 0.0.0.0
# The port number which the public service listens on
# public_port = 5000
public_port = 5000
# The port number which the public admin listens on
# admin_port = 35357
admin_port = 35357
# The base endpoint URLs for keystone that are advertised to clients
# (NOTE: this does NOT affect how keystone listens for connections)
# public_endpoint = http://localhost:%(public_port)d/
# admin_endpoint = http://localhost:%(admin_port)d/
# The number of worker processes to serve the public and admin WSGI
# applications respectively.
public_workers = 24
admin_workers = 24
# The port number which the OpenStack Compute service listens on
# compute_port = 8774
compute_port = 8774
# Path to your policy definition containing identity actions
# policy_file = policy.json
# Rule to check if no matching policy definition is found
# FIXME(dolph): This should really be defined as [policy] default_rule
# policy_default_rule = admin_required
# Role for migrating membership relationships
# During a SQL upgrade, the following values will be used to create a new role
# that will replace records in the user_tenant_membership table with explicit
# role grants. After migration, the member_role_id will be used in the API
# add_user_to_project, and member_role_name will be ignored.
# member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
# member_role_name = _member_
# === Logging Options ===
# Print debugging output
# (includes plaintext request logging, potentially including passwords)
debug = False
#debug = True
# debug = True
# Print more verbose output
verbose = False
#verbose = True
# Name of log file to output to. If not set, logging will go to stdout.
log_file = keystone.log
# The directory to keep log files in (will be prepended to --logfile)
log_dir = /var/log/keystone
# Use syslog for logging.
# use_syslog = False
use_syslog = False
# syslog facility to receive log lines
# syslog_log_facility = LOG_USER
# If this option is specified, the logging configuration file specified is
# used and overrides any other logging options specified. Please see the
# Python logging module documentation for details on logging configuration
# files.
# log_config = logging.conf
# A logging.Formatter log message format string which may use any of the
# available logging.LogRecord attributes.
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# Format string for %(asctime)s in log records.
# log_date_format = %Y-%m-%d %H:%M:%S
# onready allows you to send a notification when the process is ready to serve
# For example, to have it notify using systemd, one could set shell command:
# onready = systemd-notify --ready
# or a module with notify() method:
# onready = keystone.common.systemd
[database]
# The SQLAlchemy connection string used to connect to the database
# connection = sqlite:////var/lib/keystone/keystone.db
connection = mysql://keystone_admin:<REDACTED>@localhost/keystone
[sql]
# the timeout before idle sql connections are reaped
# idle_timeout = 200
idle_timeout = 200
[memcache]
servers = localhost:11211
# max_compare_and_set_retry = 16
[identity]
driver = keystone.identity.backends.sql.Identity
# This references the domain to use for all Identity API v2 requests (which are
# not aware of domains). A domain with this ID will be created for you by
# keystone-manage db_sync in migration 008. The domain referenced by this ID
# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
# There is nothing special about this domain, other than the fact that it must
# exist to order to maintain support for your v2 clients.
# default_domain_id = default
[trust]
driver = keystone.trust.backends.sql.Trust
# delegation and impersonation features can be optionally disabled
# enabled = True
[catalog]
# dynamic, sql-based backend (supports API/CLI-based management commands)
driver = keystone.catalog.backends.sql.Catalog
# static, file-based backend (does *NOT* support any management commands)
# driver = keystone.catalog.backends.templated.TemplatedCatalog
# template_file = default_catalog.templates
[token]
#driver = keystone.token.backends.sql.Token
driver = keystone.token.persistence.backends.memcache.Token
# Amount of time a token should remain valid (in seconds)
#expiration = 86400
# shorter expiration keeps bloat down and performance up
# icehouse 1hr max. juno 2hr fine does 10 work...?
expiration = 36000
#provider=keystone.token.providers.pki.Provider
provider=keystone.token.providers.uuid.Provider
# Token specific caching toggle. This has no effect unless the global caching
# option is set to True
# caching = True
# no point to caching tokens that are only stored in memcache
caching = False
# Token specific cache time-to-live (TTL) in seconds.
# cache_time =
# Revocation-List specific cache time-to-live (TTL) in seconds.
# revocation_cache_time = 3600
[cache]
# Global cache functionality toggle.
enabled = False
# enabled = True
# Prefix for building the configuration dictionary for the cache region. This
# should not need to be changed unless there is another dogpile.cache region
# with the same configuration name
# config_prefix = cache.keystone
# Default TTL, in seconds, for any cached item in the dogpile.cache region.
# This applies to any cached method that doesn't have an explicit cache
# expiration time defined for it.
# expiration_time = 600
# Dogpile.cache backend module. It is recommended that Memcache
# (dogpile.cache.memcache) or Redis (dogpile.cache.redis) be used in production
# deployments. Small workloads (single process) like devstack can use the
# dogpile.cache.memory backend.
# backend = keystone.common.cache.noop
#backend = dogpile.cache.memcache
backend = dogpile.cache.pylibmc
# Arguments supplied to the backend module. Specify this option once per
# argument to be passed to the dogpile.cache backend.
# Example format: <argname>:<value>
# backend_argument =
backend_argument = url:127.0.0.1
# Proxy Classes to import that will affect the way the dogpile.cache backend
# functions. See the dogpile.cache documentation on changing-backend-behavior.
# Comma delimited list e.g. my.dogpile.proxy.Class, my.dogpile.proxyClass2
# proxies =
# Use a key-mangling function (sha1) to ensure fixed length cache-keys. This
# is toggle-able for debugging purposes, it is highly recommended to always
# leave this set to True.
# use_key_mangler = True
# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls)
# This is only really useful if you need to see the specific cache-backend
# get/set/delete calls with the keys/values. Typically this should be left
# set to False.
# debug_cache_backend = False
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[ssl]
#enable = True
#certfile = /etc/keystone/ssl/certs/keystone.pem
#keyfile = /etc/keystone/ssl/private/keystonekey.pem
#ca_certs = /etc/keystone/ssl/certs/ca.pem
#cert_required = True
[signing]
#token_format = PKI
#certfile = /etc/keystone/ssl/certs/signing_cert.pem
#keyfile = /etc/keystone/ssl/private/signing_key.pem
#ca_certs = /etc/keystone/ssl/certs/ca.pem
#key_size = 1024
#valid_days = 3650
#ca_password = None
[ldap]
# url = ldap://localhost
# user = dc=Manager,dc=example,dc=com
# password = None
# suffix = cn=example,cn=com
# use_dumb_member = False
# allow_subtree_delete = False
# dumb_member = cn=dumb,dc=example,dc=com
# Maximum results per page; a value of zero ('0') disables paging (default)
# page_size = 0
# The LDAP dereferencing option for queries. This can be either 'never',
# 'searching', 'always', 'finding' or 'default'. The 'default' option falls
# back to using default dereferencing configured by your ldap.conf.
# alias_dereferencing = default
# The LDAP scope for queries, this can be either 'one'
# (onelevel/singleLevel) or 'sub' (subtree/wholeSubtree)
# query_scope = one
# user_tree_dn = ou=Users,dc=example,dc=com
# user_filter =
# user_objectclass = inetOrgPerson
# user_domain_id_attribute = businessCategory
# user_id_attribute = cn
# user_name_attribute = sn
# user_mail_attribute = email
# user_pass_attribute = userPassword
# user_enabled_attribute = enabled
# user_enabled_mask = 0
# user_enabled_default = True
# user_attribute_ignore = tenant_id,tenants
# user_allow_create = True
# user_allow_update = True
# user_allow_delete = True
# user_enabled_emulation = False
# user_enabled_emulation_dn =
# tenant_tree_dn = ou=Groups,dc=example,dc=com
# tenant_filter =
# tenant_objectclass = groupOfNames
# tenant_domain_id_attribute = businessCategory
# tenant_id_attribute = cn
# tenant_member_attribute = member
# tenant_name_attribute = ou
# tenant_desc_attribute = desc
# tenant_enabled_attribute = enabled
# tenant_attribute_ignore =
# tenant_allow_create = True
# tenant_allow_update = True
# tenant_allow_delete = True
# tenant_enabled_emulation = False
# tenant_enabled_emulation_dn =
# role_tree_dn = ou=Roles,dc=example,dc=com
# role_filter =
# role_objectclass = organizationalRole
# role_id_attribute = cn
# role_name_attribute = ou
# role_member_attribute = roleOccupant
# role_attribute_ignore =
# role_allow_create = True
# role_allow_update = True
# role_allow_delete = True
# group_tree_dn =
# group_filter =
# group_objectclass = groupOfNames
# group_id_attribute = cn
# group_name_attribute = ou
# group_member_attribute = member
# group_desc_attribute = desc
# group_attribute_ignore =
# group_allow_create = True
# group_allow_update = True
# group_allow_delete = True
[auth]
methods = password,token
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:sizelimit]
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[filter:access_log]
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:service_v3]
paste.app_factory = keystone.service:v3_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
[pipeline:admin_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
[pipeline:api_v3]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api

View File

@ -0,0 +1,89 @@
[DEFAULT]
bind_host = 0.0.0.0
public_port = 5000
admin_port = 35357
admin_token = <REDACTED>
compute_port = 3000
verbose = false
debug = false
log_file = /var/log/keystone/keystone.log
use_syslog = False
[sql]
connection = mysql://keystone_admin:<REDACTED>@127.0.0.1/keystone
idle_timeout = 300
min_pool_size = 5
max_pool_size = 10
pool_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
[catalog]
driver=keystone.catalog.backends.sql.Catalog
[token]
driver = keystone.token.backends.kvs.Token
expiration = 86400
[policy]
driver = keystone.policy.backends.rules.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension public_service
[pipeline:admin_api]
pipeline = token_auth admin_token_auth xml_body json_body debug ec2_extension s3_extension crud_extension admin_service
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = xml_body public_version_service
[pipeline:admin_version_api]
pipeline = xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/ = admin_version_api

View File

@ -0,0 +1,39 @@
[loggers]
keys=root
[formatters]
keys=normal,normal_with_name,debug
[handlers]
keys=production,file,devel
[logger_root]
level=WARNING
handlers=file
[handler_production]
class=handlers.SysLogHandler
level=ERROR
formatter=normal_with_name
args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
[handler_file]
class=FileHandler
level=DEBUG
formatter=normal_with_name
args=('/var/log/keystone/keystone.log', 'a')
[handler_devel]
class=StreamHandler
level=NOTSET
formatter=debug
args=(sys.stdout,)
[formatter_normal]
format=%(asctime)s %(levelname)s %(message)s
[formatter_normal_with_name]
format=(%(name)s): %(asctime)s %(levelname)s %(message)s
[formatter_debug]
format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s

View File

@ -0,0 +1,39 @@
[loggers]
keys=root
[formatters]
keys=normal,normal_with_name,debug
[handlers]
keys=production,file,devel
[logger_root]
level=WARNING
handlers=file
[handler_production]
class=handlers.SysLogHandler
level=ERROR
formatter=normal_with_name
args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
[handler_file]
class=FileHandler
level=DEBUG
formatter=normal_with_name
args=('keystone.log', 'a')
[handler_devel]
class=StreamHandler
level=NOTSET
formatter=debug
args=(sys.stdout,)
[formatter_normal]
format=%(asctime)s %(levelname)s %(message)s
[formatter_normal_with_name]
format=(%(name)s): %(asctime)s %(levelname)s %(message)s
[formatter_debug]
format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s

View File

@ -0,0 +1,171 @@
{
"admin_required": "role:admin or is_admin:1",
"service_role": "role:service",
"service_or_admin": "rule:admin_required or rule:service_role",
"owner" : "user_id:%(user_id)s",
"admin_or_owner": "rule:admin_required or rule:owner",
"default": "rule:admin_required",
"identity:get_region": "",
"identity:list_regions": "",
"identity:create_region": "rule:admin_required",
"identity:update_region": "rule:admin_required",
"identity:delete_region": "rule:admin_required",
"identity:get_service": "rule:admin_required",
"identity:list_services": "rule:admin_required",
"identity:create_service": "rule:admin_required",
"identity:update_service": "rule:admin_required",
"identity:delete_service": "rule:admin_required",
"identity:get_endpoint": "rule:admin_required",
"identity:list_endpoints": "rule:admin_required",
"identity:create_endpoint": "rule:admin_required",
"identity:update_endpoint": "rule:admin_required",
"identity:delete_endpoint": "rule:admin_required",
"identity:get_domain": "rule:admin_required",
"identity:list_domains": "rule:admin_required",
"identity:create_domain": "rule:admin_required",
"identity:update_domain": "rule:admin_required",
"identity:delete_domain": "rule:admin_required",
"identity:get_project": "rule:admin_required",
"identity:list_projects": "rule:admin_required",
"identity:list_user_projects": "rule:admin_or_owner",
"identity:create_project": "rule:admin_required",
"identity:update_project": "rule:admin_required",
"identity:delete_project": "rule:admin_required",
"identity:get_user": "rule:admin_required",
"identity:list_users": "rule:admin_required",
"identity:create_user": "rule:admin_required",
"identity:update_user": "rule:admin_required",
"identity:delete_user": "rule:admin_required",
"identity:change_password": "rule:admin_or_owner",
"identity:get_group": "rule:admin_required",
"identity:list_groups": "rule:admin_required",
"identity:list_groups_for_user": "rule:admin_or_owner",
"identity:create_group": "rule:admin_required",
"identity:update_group": "rule:admin_required",
"identity:delete_group": "rule:admin_required",
"identity:list_users_in_group": "rule:admin_required",
"identity:remove_user_from_group": "rule:admin_required",
"identity:check_user_in_group": "rule:admin_required",
"identity:add_user_to_group": "rule:admin_required",
"identity:get_credential": "rule:admin_required",
"identity:list_credentials": "rule:admin_required",
"identity:create_credential": "rule:admin_required",
"identity:update_credential": "rule:admin_required",
"identity:delete_credential": "rule:admin_required",
"identity:ec2_get_credential": "rule:admin_or_owner",
"identity:ec2_list_credentials": "rule:admin_or_owner",
"identity:ec2_create_credential": "rule:admin_or_owner",
"identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
"identity:get_role": "rule:admin_required",
"identity:list_roles": "rule:admin_required",
"identity:create_role": "rule:admin_required",
"identity:update_role": "rule:admin_required",
"identity:delete_role": "rule:admin_required",
"identity:check_grant": "rule:admin_required",
"identity:list_grants": "rule:admin_required",
"identity:create_grant": "rule:admin_required",
"identity:revoke_grant": "rule:admin_required",
"identity:list_role_assignments": "rule:admin_required",
"identity:get_policy": "rule:admin_required",
"identity:list_policies": "rule:admin_required",
"identity:create_policy": "rule:admin_required",
"identity:update_policy": "rule:admin_required",
"identity:delete_policy": "rule:admin_required",
"identity:check_token": "rule:admin_required",
"identity:validate_token": "rule:service_or_admin",
"identity:validate_token_head": "rule:service_or_admin",
"identity:revocation_list": "rule:service_or_admin",
"identity:revoke_token": "rule:admin_or_owner",
"identity:create_trust": "user_id:%(trust.trustor_user_id)s",
"identity:get_trust": "rule:admin_or_owner",
"identity:list_trusts": "",
"identity:list_roles_for_trust": "",
"identity:check_role_for_trust": "",
"identity:get_role_for_trust": "",
"identity:delete_trust": "",
"identity:create_consumer": "rule:admin_required",
"identity:get_consumer": "rule:admin_required",
"identity:list_consumers": "rule:admin_required",
"identity:delete_consumer": "rule:admin_required",
"identity:update_consumer": "rule:admin_required",
"identity:authorize_request_token": "rule:admin_required",
"identity:list_access_token_roles": "rule:admin_required",
"identity:get_access_token_role": "rule:admin_required",
"identity:list_access_tokens": "rule:admin_required",
"identity:get_access_token": "rule:admin_required",
"identity:delete_access_token": "rule:admin_required",
"identity:list_projects_for_endpoint": "rule:admin_required",
"identity:add_endpoint_to_project": "rule:admin_required",
"identity:check_endpoint_in_project": "rule:admin_required",
"identity:list_endpoints_for_project": "rule:admin_required",
"identity:remove_endpoint_from_project": "rule:admin_required",
"identity:create_endpoint_group": "rule:admin_required",
"identity:list_endpoint_groups": "rule:admin_required",
"identity:get_endpoint_group": "rule:admin_required",
"identity:update_endpoint_group": "rule:admin_required",
"identity:delete_endpoint_group": "rule:admin_required",
"identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
"identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
"identity:list_endpoint_groups_for_project": "rule:admin_required",
"identity:add_endpoint_group_to_project": "rule:admin_required",
"identity:remove_endpoint_group_from_project": "rule:admin_required",
"identity:create_identity_provider": "rule:admin_required",
"identity:list_identity_providers": "rule:admin_required",
"identity:get_identity_providers": "rule:admin_required",
"identity:update_identity_provider": "rule:admin_required",
"identity:delete_identity_provider": "rule:admin_required",
"identity:create_protocol": "rule:admin_required",
"identity:update_protocol": "rule:admin_required",
"identity:get_protocol": "rule:admin_required",
"identity:list_protocols": "rule:admin_required",
"identity:delete_protocol": "rule:admin_required",
"identity:create_mapping": "rule:admin_required",
"identity:get_mapping": "rule:admin_required",
"identity:list_mappings": "rule:admin_required",
"identity:delete_mapping": "rule:admin_required",
"identity:update_mapping": "rule:admin_required",
"identity:get_auth_catalog": "",
"identity:get_auth_projects": "",
"identity:get_auth_domains": "",
"identity:list_projects_for_groups": "",
"identity:list_domains_for_groups": "",
"identity:list_revoke_events": "",
"identity:create_policy_association_for_endpoint": "rule:admin_required",
"identity:check_policy_association_for_endpoint": "rule:admin_required",
"identity:delete_policy_association_for_endpoint": "rule:admin_required",
"identity:create_policy_association_for_service": "rule:admin_required",
"identity:check_policy_association_for_service": "rule:admin_required",
"identity:delete_policy_association_for_service": "rule:admin_required",
"identity:create_policy_association_for_region_and_service": "rule:admin_required",
"identity:check_policy_association_for_region_and_service": "rule:admin_required",
"identity:delete_policy_association_for_region_and_service": "rule:admin_required",
"identity:get_policy_for_endpoint": "rule:admin_required",
"identity:list_endpoints_for_policy": "rule:admin_required"
}

View File

@ -0,0 +1,32 @@
[composite:neutron]
use = egg:Paste#urlmap
/: neutronversions
/v2.0: neutronapi_v2_0
[composite:neutronapi_v2_0]
use = call:neutron.auth:pipeline_factory
noauth = extensions neutronapiapp_v2_0
keystone = authtoken keystonecontext extensions neutronapiapp_v2_0
[filter:keystonecontext]
paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
admin_user=neutron
auth_port=35357
admin_password=<REDACTED>
auth_protocol=http
auth_version = v2.0
auth_uri=http://<CONTROLLER_IP>:5000/
admin_tenant_name=services
auth_host=<CONTROLLER_IP>
[filter:extensions]
paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
[app:neutronversions]
paste.app_factory = neutron.api.versions:Versions.factory
[app:neutronapiapp_v2_0]
paste.app_factory = neutron.api.v2.router:APIRouter.factory

View File

@ -0,0 +1,80 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
debug = True
# The DHCP agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
# resync_interval = 5
resync_interval = 30
# The DHCP agent requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins(OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
# no additional setup of the DHCP server.
# dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
use_namespaces = True
# The DHCP server can assist with providing metadata support on isolated
# networks. Setting this value to True will cause the DHCP server to append
# specific host routes to the DHCP request. The metadata service will only
# be activated when the subnet gateway_ip is None. The guest instance must
# be configured to request host routes via DHCP (Option 121).
# enable_isolated_metadata = False
enable_isolated_metadata = True
# Allows for serving metadata requests coming from a dedicated metadata
# access network whose cidr is 169.254.169.254/16 (or larger prefix), and
# is connected to a Neutron router from which the VMs send metadata
# request. In this case DHCP Option 121 will not be injected in VMs, as
# they will be able to reach 169.254.169.254 through a router.
# This option requires enable_isolated_metadata = True
# enable_metadata_network = False
enable_metadata_network = True
# Number of threads to use during sync process. Should not exceed connection
# pool size configured on server.
# num_sync_threads = 4
# Location to store DHCP server config files
# dhcp_confs = $state_path/dhcp
# Domain to use for building the hostnames
# dhcp_domain = openstacklocal
# Override the default dnsmasq settings with this file
dnsmasq_config_file = /etc/neutron/dnsmasq.conf
# Use another DNS server before any in /etc/resolv.conf.
# dnsmasq_dns_server =
# Limit number of leases to prevent a denial-of-service.
# dnsmasq_lease_max = 16777216
# Location to DHCP lease relay UNIX domain socket
# dhcp_lease_relay_socket = $state_path/dhcp/lease_relay
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
root_helper=sudo neutron-rootwrap /etc/neutron/rootwrap.conf
state_path=/var/lib/neutron

View File

@ -0,0 +1,3 @@
domain=csail.mit.edu
dhcp-boot=pxelinux.0,<PXE_SERVER>.csail.mit.edu,<PXE_SERVER_IP>
dhcp-option=26,9000

View File

@ -0,0 +1,3 @@
[fwaas]
#driver = neutron.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver
#enabled = True

View File

@ -0,0 +1,75 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
# debug = False
debug = False
# L3 requires that an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC)
# that supports L3 agent
# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
# iproute2 package that supports namespaces).
# use_namespaces = True
use_namespaces = True
# If use_namespaces is set as False then the agent can only configure one router.
# This is done by setting the specific router_id.
# router_id =
# Each L3 agent can be associated with at most one external network. This
# value should be set to the UUID of that external network. If empty,
# the agent will enforce that only a single external networks exists and
# use that external network id
# gateway_external_network_id =
# Indicates that this L3 agent should also handle routers that do not have
# an external network gateway configured. This option should be True only
# for a single agent in a Neutron deployment, and may be False for all agents
# if all routers must have an external network gateway
# handle_internal_only_routers = True
handle_internal_only_routers = True
# Name of bridge used for external network traffic. This should be set to
# empty value for the linux bridge
# external_network_bridge = br-ex
external_network_bridge = br-ex
# TCP Port used by Neutron metadata server
# metadata_port = 9697
metadata_port = 9697
# Send this many gratuitous ARPs for HA setup. Set it below or equal to 0
# to disable this feature.
# send_arp_for_ha = 3
send_arp_for_ha = 3
# seconds between re-sync routers' data if needed
# periodic_interval = 40
periodic_interval = 40
# seconds to start to sync routers' data after
# starting agent
# periodic_fuzzy_delay = 5
periodic_fuzzy_delay = 5
# enable_metadata_proxy, which is true by default, can be set to False
# if the Nova metadata server is not available
# enable_metadata_proxy = True
enable_metadata_proxy = True
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy

View File

@ -0,0 +1,31 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output).
# debug = False
debug = True
# The LBaaS agent will resync its state with Neutron to recover from any
# transient notification or rpc errors. The interval is number of
# seconds between attempts.
periodic_interval = 10
# LBaas requires an interface driver be set. Choose the one that best
# matches your plugin.
# interface_driver =
# Example of interface_driver option for OVS based plugins (OVS, Ryu, NEC, NVP,
# BigSwitch/Floodlight)
interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
# Use veth for an OVS interface or not.
# Support kernels with limited namespace support
# (e.g. RHEL 6.5) so long as ovs_use_veth is set to True.
# ovs_use_veth = False
# Example of interface_driver option for LinuxBridge
# interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
# The agent requires a driver to manage the loadbalancer. HAProxy is the
# opensource version.
device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver
# The user group
# user_group = nogroup

View File

@ -0,0 +1,44 @@
[DEFAULT]
# Show debugging output in log (sets DEBUG log level output)
#debug = True
debug = False
metadata_workers = 12
# The Neutron user information for accessing the Neutron API.
auth_url = http://<CONTROLLER_IP>:35357/v2.0
auth_region = RegionOne
admin_tenant_name = services
admin_user = neutron
admin_password = <REDACTED>
# cache requests in memory for 5sec this was a juno back port &
# leaving it disabled (default for icehouse) kills performance (and
# possibly the service under load)
# default_ttl=0 parameter will cause cache entries to never expire.
# Otherwise default_ttl specifies time in seconds a cache entry is valid fo
#
# The problem:
# https://bugs.launchpad.net/cloud-archive/+bug/1361357
#
# the cause https://review.openstack.org/#/c/95491/
cache_url = memory://?default_ttl=20
# Network service endpoint type to pull from the keystone catalog
# endpoint_type = adminURL
# IP address used by Nova metadata server
# nova_metadata_ip = 127.0.0.1
nova_metadata_ip = 127.0.0.1
# TCP Port used by Nova metadata server
# nova_metadata_port = 8775
nova_metadata_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it must match here and in the configuration used by the Nova Metadata
# Server. NOTE: Nova uses a different key: neutron_metadata_proxy_shared_secret
# metadata_proxy_shared_secret =
# Location of Metadata Proxy UNIX domain socket
# metadata_proxy_socket = $state_path/metadata_proxy
metadata_proxy_shared_secret=<REDACTED>

View File

@ -0,0 +1,412 @@
[DEFAULT]
# Default log level is INFO
# verbose and debug has the same result.
# One of them will set DEBUG log level output
debug = False
#debug = True
verbose = False
#verbose = True
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
nova_url = http://<CONTROLLER>.csail.mit.edu:8774/v2
nova_admin_username = nova
# note ID not name this is (services)
nova_admin_tenant_id = <REDACTED>
nova_admin_password = <REDACTED>
nova_admin_auth_url = http://<CONTROLLER>.csail.mit.edu:35357/v2.0
# Where to store Neutron state files. This directory must be writable by the
# user executing the agent.
state_path = /var/lib/neutron
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server
# bind_host = 0.0.0.0
bind_host = 0.0.0.0
# Port the bind the API server to
# bind_port = 9696
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of neutron.extensions is appended to this, so if your
# extensions are in there you don't need to specify them here
# api_extensions_path =
# Neutron plugin provider module
core_plugin = ml2
# Advanced service modules
# service_plugins =
service_plugins = router,lbaas
# Paste configuration file
# api_paste_config = api-paste.ini
# The strategy to be used for auth.
# Supported values are 'keystone'(default), 'noauth'.
# auth_strategy = keystone
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
mac_generation_retries = 16
# DHCP Lease duration (in seconds)
dhcp_lease_duration = 86400
# dhcp_lease_duration = 1200
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Neutron is
# being used in conjunction with nova security groups
# allow_overlapping_ips = False
allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
rpc_backend = neutron.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
# control_exchange = neutron
control_exchange = neutron
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)'
# kombu_ssl_ca_certs =
# IP address of the RabbitMQ installation
# rabbit_host = localhost
rabbit_host = <CONTROLLER_IP>
# Password of the RabbitMQ server
# rabbit_password = guest
rabbit_password = <REDACTED>
# Port where RabbitMQ server is running/listening
# rabbit_port = 5672
rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to '$rabbit_host:$rabbit_port'
# rabbit_hosts = localhost:5672
#rabbit_hosts = <CONTROLLER_IP>:5672
# User ID used for RabbitMQ connections
# rabbit_userid = guest
rabbit_userid = <REDACTED>
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all).You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
rabbit_ha_queues = False
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to '$qpid_hostname:$qpid_port'
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ''
# Password for qpid connection
# qpid_password = ''
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=neutron.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The "host" option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are create, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = neutron.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = neutron.openstack.common.notifier.log_notifier
# RPC driver. DHCP agents needs it.
# notification_driver = neutron.openstack.common.notifier.rpc_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
# default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
# notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down.
# agent_down_time = 5
agent_down_time = 75
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
# Driver to use for scheduling a loadbalancer pool to an lbaas agent
# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# neutron server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to neutron server
# router_auto_schedule = True
# Number of DHCP agents scheduled to host a network. This enables redundant
# DHCP agents for configured networks.
# dhcp_agents_per_network = 1
dhcp_agents_per_network = 1
# =========== end of items for agent scheduler extension =====
# =========== WSGI parameters related to the API server ==============
# Number of separate worker processes to spawn. The default, 0, runs the
# worker thread in the current process. Greater than 0 launches that number of
# child processes as workers. The parent process manages them.
api_workers = 16
# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when
# starting API server. Not supported on OS X.
# tcp_keepidle = 600
# Number of seconds to keep retrying to listen
# retry_until_window = 30
# Number of backlog requests to configure the socket with.
# backlog = 4096
# Enable SSL on the API server
# use_ssl = False
# Certificate file to use when starting API server securely
# ssl_cert_file = /path/to/certfile
# Private key file to use when starting API server securely
# ssl_key_file = /path/to/keyfile
# CA certificate file to use when starting API server securely to
# verify connecting clients. This is an optional parameter only required if
# API clients need to authenticate to the API server using SSL certificates
# signed by a trusted CA
# ssl_ca_file = /path/to/cafile
# ======== end of WSGI parameters related to the API server ==========
log_dir=/var/log/neutron
[quotas]
# resource name(s) that are supported in quota features
# quota_items = network,subnet,port
# default number of resource allowed per tenant, minus for unlimited
# default_quota = -1
# number of networks allowed per tenant, and minus means unlimited
# quota_network = 10
# number of subnets allowed per tenant, and minus means unlimited
# quota_subnet = 10
# number of ports allowed per tenant, and minus means unlimited
# quota_port = 50
quota_port=-1
# number of security groups allowed per tenant, and minus means unlimited
# quota_security_group = 10
# number of security group rules allowed per tenant, and minus means unlimited
# quota_security_group_rule = 100
# default driver to use for quota checks
# quota_driver = neutron.db.quota_db.DbQuotaDriver
[agent]
# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real
# root filter facility.
# Change to "sudo" to skip the filtering and just run the comand directly
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
# =========== items for agent management extension =============
# seconds between nodes reporting state to server, should be less than
# agent_down_time
# report_interval = 4
report_interval = 30
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_host = <CONTROLLER_IP>
auth_port = 35357
auth_protocol = http
auth_vesrion = v2.0
admin_tenant_name = services
admin_user = neutron
admin_password = <REDACTED>
signing_dir = $state_path/keystone-signing
auth_uri=http://<CONTROLLER_IP>:5000/v2.0
[database]
# This line MUST be changed to actually run the plugin.
# Example:
# connection = mysql://root:pass@127.0.0.1:3306/neutron
# Replace 127.0.0.1 above with the IP address of the database used by the
# main neutron server. (Leave it as is if the database runs on this host.)
connection = mysql://neutron:<REDACTED>@<CONTROLLER_IP>/neutron
# The SQLAlchemy connection string used to connect to the slave database
# slave_connection =
# Database reconnection retry times - in event connectivity is lost
# set to -1 implies an infinite retry count
# max_retries = 10
max_retries = 10
# Database reconnection interval in seconds - if the initial connection to the
# database fails
# retry_interval = 10
retry_interval = 10
# Minimum number of SQL connections to keep open in a pool
# min_pool_size = 1
# Maximum number of SQL connections to keep open in a pool
# max_pool_size = 10
# Timeout in seconds before idle sql connections are reaped
# idle_timeout = 3600
idle_timeout = 3600
# If set, use this value for max_overflow with sqlalchemy
# max_overflow = 20
# Verbosity of SQL debugging information. 0=None, 100=Everything
# connection_debug = 0
# Add python stack traces to SQL as comment strings
# connection_trace = False
# If set, use this value for pool_timeout with sqlalchemy
# pool_timeout = 10
[service_providers]
# Specify service providers (drivers) for advanced services like loadbalancer, VPN, Firewall.
# Must be in form:
# service_provider=<service_type>:<name>:<driver>[:default]
# List of allowed service type include LOADBALANCER, FIREWALL, VPN
# Combination of <service type> and <name> must be unique; <driver> must also be unique
# this is multiline option, example for default provider:
# service_provider=LOADBALANCER:name:lbaas_plugin_driver_path:default
# example of non-default provider:
# service_provider=FIREWALL:name2:firewall_driver_path
# --- Reference implementations ---
service_provider=LOADBALANCER:Haproxy:neutron.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default

View File

@ -0,0 +1,88 @@
[ovs]
network_vlan_ranges=trunk:2112:2114
local_ip=<CONTROLLER_IP>
enable_tunneling=True
integration_bridge=br-int
tunnel_id_ranges=1:1000
tunnel_bridge=br-tun
tenant_network_type=gre
bridge_mappings=trunk:eth1-br
[agent]
tunnel_types=gre
l2_population=false
polling_interval=30
veth_mtu=9134
[securitygroup]
enable_security_group=true
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
type_drivers=vlan,gre
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
tenant_network_types=gre
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
mechanism_drivers = openvswitch
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
# flat_networks =
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
network_vlan_ranges=trunk:2112:2114
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
# tunnel_id_ranges =
tunnel_id_ranges=1:1000
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
# vni_ranges =
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
# vxlan_group =
# Example: vxlan_group = 239.1.1.1
#[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True

View File

@ -0,0 +1,71 @@
[ml2]
# (ListOpt) List of network type driver entrypoints to be loaded from
# the neutron.ml2.type_drivers namespace.
#
# type_drivers = local,flat,vlan,gre,vxlan
# Example: type_drivers = flat,vlan,gre,vxlan
# (ListOpt) Ordered list of network_types to allocate as tenant
# networks. The default value 'local' is useful for single-box testing
# but provides no connectivity between hosts.
#
# tenant_network_types = local
# Example: tenant_network_types = vlan,gre,vxlan
# (ListOpt) Ordered list of networking mechanism driver entrypoints
# to be loaded from the neutron.ml2.mechanism_drivers namespace.
# mechanism_drivers =
# Example: mechanism_drivers = openvswitch,mlnx
# Example: mechanism_drivers = arista
# Example: mechanism_drivers = cisco,logger
# Example: mechanism_drivers = openvswitch,brocade
# Example: mechanism_drivers = linuxbridge,brocade
# (ListOpt) Ordered list of extension driver entrypoints
# to be loaded from the neutron.ml2.extension_drivers namespace.
# extension_drivers =
# Example: extension_drivers = anewextensiondriver
[ml2_type_flat]
# (ListOpt) List of physical_network names with which flat networks
# can be created. Use * to allow flat networks with arbitrary
# physical_network names.
#
# flat_networks =
# Example:flat_networks = physnet1,physnet2
# Example:flat_networks = *
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999,physnet2
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE tunnel IDs that are available for tenant network allocation
# tunnel_id_ranges =
[ml2_type_vxlan]
# (ListOpt) Comma-separated list of <vni_min>:<vni_max> tuples enumerating
# ranges of VXLAN VNI IDs that are available for tenant network allocation.
#
# vni_ranges =
# (StrOpt) Multicast group for the VXLAN interface. When configured, will
# enable sending all broadcast traffic to this multicast group. When left
# unconfigured, will disable multicast VXLAN mode.
#
# vxlan_group =
# Example: vxlan_group = 239.1.1.1
[securitygroup]
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
# enable_security_group = True
# Use ipset to speed-up the iptables security groups. Enabling ipset support
# requires that ipset is installed on L2 agent node.
# enable_ipset = True

View File

@ -0,0 +1,100 @@
# Defines configuration options specific for Arista ML2 Mechanism driver
[ml2_arista]
# (StrOpt) EOS IP address. This is required field. If not set, all
# communications to Arista EOS will fail
#
# eapi_host =
# Example: eapi_host = 192.168.0.1
#
# (StrOpt) EOS command API username. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_username =
# Example: arista_eapi_username = admin
#
# (StrOpt) EOS command API password. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# eapi_password =
# Example: eapi_password = my_password
#
# (StrOpt) Defines if hostnames are sent to Arista EOS as FQDNs
# ("node1.domain.com") or as short names ("node1"). This is
# optional. If not set, a value of "True" is assumed.
#
# use_fqdn =
# Example: use_fqdn = True
#
# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
# This field defines how often the synchronization is performed.
# This is an optional field. If not set, a value of 180 seconds
# is assumed.
#
# sync_interval =
# Example: sync_interval = 60
#
# (StrOpt) Defines Region Name that is assigned to this OpenStack Controller.
# This is useful when multiple OpenStack/Neutron controllers are
# managing the same Arista HW clusters. Note that this name must
# match with the region name registered (or known) to keystone
# service. Authentication with Keysotne is performed by EOS.
# This is optional. If not set, a value of "RegionOne" is assumed.
#
# region_name =
# Example: region_name = RegionOne
[l3_arista]
# (StrOpt) primary host IP address. This is required field. If not set, all
# communications to Arista EOS will fail. This is the host where
# primary router is created.
#
# primary_l3_host =
# Example: primary_l3_host = 192.168.10.10
#
# (StrOpt) Primary host username. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# primary_l3_host_username =
# Example: arista_primary_l3_username = admin
#
# (StrOpt) Primary host password. This is required field.
# if not set, all communications to Arista EOS will fail.
#
# primary_l3_host_password =
# Example: primary_l3_password = my_password
#
# (StrOpt) IP address of the second Arista switch paired as
# MLAG (Multi-chassis Link Aggregation) with the first.
# This is optional field, however, if mlag_config flag is set,
# then this is a required field. If not set, all
# communications to Arista EOS will fail. If mlag_config is set
# to False, then this field is ignored
#
# seconadary_l3_host =
# Example: seconadary_l3_host = 192.168.10.20
#
# (BoolOpt) Defines if Arista switches are configured in MLAG mode
# If yes, all L3 configuration is pushed to both switches
# automatically. If this flag is set, ensure that secondary_l3_host
# is set to the second switch's IP.
# This flag is Optional. If not set, a value of "False" is assumed.
#
# mlag_config =
# Example: mlag_config = True
#
# (BoolOpt) Defines if the router is created in default VRF or a
# a specific VRF. This is optional.
# If not set, a value of "False" is assumed.
#
# Example: use_vrf = True
#
# (IntOpt) Sync interval in seconds between Neutron plugin and EOS.
# This field defines how often the synchronization is performed.
# This is an optional field. If not set, a value of 180 seconds
# is assumed.
#
# l3_sync_interval =
# Example: l3_sync_interval = 60

View File

@ -0,0 +1,15 @@
[ml2_brocade]
# username = <mgmt admin username>
# password = <mgmt admin password>
# address = <switch mgmt ip address>
# ostype = NOS
# osversion = autodetect | n.n.n
# physical_networks = physnet1,physnet2
#
# Example:
# username = admin
# password = password
# address = 10.24.84.38
# ostype = NOS
# osversion = 4.1.1
# physical_networks = physnet1,physnet2

View File

@ -0,0 +1,118 @@
[ml2_cisco]
# (StrOpt) A short prefix to prepend to the VLAN number when creating a
# VLAN interface. For example, if an interface is being created for
# VLAN 2001 it will be named 'q-2001' using the default prefix.
#
# vlan_name_prefix = q-
# Example: vlan_name_prefix = vnet-
# (BoolOpt) A flag to enable round robin scheduling of routers for SVI.
# svi_round_robin = False
#
# (StrOpt) The name of the physical_network managed via the Cisco Nexus Switch.
# This string value must be present in the ml2_conf.ini network_vlan_ranges
# variable.
#
# managed_physical_network =
# Example: managed_physical_network = physnet1
# Cisco Nexus Switch configurations.
# Each switch to be managed by Openstack Neutron must be configured here.
#
# Cisco Nexus Switch Format.
# [ml2_mech_cisco_nexus:<IP address of switch>]
# <hostname>=<intf_type:port> (1)
# ssh_port=<ssh port> (2)
# username=<credential username> (3)
# password=<credential password> (4)
#
# (1) For each host connected to a port on the switch, specify the hostname
# and the Nexus physical port (interface) it is connected to.
# Valid intf_type's are 'ethernet' and 'port-channel'.
# The default setting for <intf_type:> is 'ethernet' and need not be
# added to this setting.
# (2) The TCP port for connecting via SSH to manage the switch. This is
# port number 22 unless the switch has been configured otherwise.
# (3) The username for logging into the switch to manage it.
# (4) The password for logging into the switch to manage it.
#
# Example:
# [ml2_mech_cisco_nexus:1.1.1.1]
# compute1=1/1
# compute2=ethernet:1/2
# compute3=port-channel:1
# ssh_port=22
# username=admin
# password=mySecretPassword
[ml2_cisco_apic]
# Hostname:port list of APIC controllers
# apic_hosts = 1.1.1.1:80, 1.1.1.2:8080, 1.1.1.3:80
# Username for the APIC controller
# apic_username = user
# Password for the APIC controller
# apic_password = password
# Whether use SSl for connecting to the APIC controller or not
# apic_use_ssl = True
# How to map names to APIC: use_uuid or use_name
# apic_name_mapping = use_name
# Names for APIC objects used by Neutron
# Note: When deploying multiple clouds against one APIC,
# these names must be unique between the clouds.
# apic_vmm_domain = openstack
# apic_vlan_ns_name = openstack_ns
# apic_node_profile = openstack_profile
# apic_entity_profile = openstack_entity
# apic_function_profile = openstack_function
# apic_app_profile_name = openstack_app
# Agent timers for State reporting and topology discovery
# apic_sync_interval = 30
# apic_agent_report_interval = 30
# apic_agent_poll_interval = 2
# Specify your network topology.
# This section indicates how your compute nodes are connected to the fabric's
# switches and ports. The format is as follows:
#
# [apic_switch:<swich_id_from_the_apic>]
# <compute_host>,<compute_host> = <switchport_the_host(s)_are_connected_to>
#
# You can have multiple sections, one for each switch in your fabric that is
# participating in Openstack. e.g.
#
# [apic_switch:17]
# ubuntu,ubuntu1 = 1/10
# ubuntu2,ubuntu3 = 1/11
#
# [apic_switch:18]
# ubuntu5,ubuntu6 = 1/1
# ubuntu7,ubuntu8 = 1/2
# Describe external connectivity.
# In this section you can specify the external network configuration in order
# for the plugin to be able to teach the fabric how to route the internal
# traffic to the outside world. The external connectivity configuration
# format is as follows:
#
# [apic_external_network:<externalNetworkName>]
# switch = <switch_id_from_the_apic>
# port = <switchport_the_external_router_is_connected_to>
# encap = <encapsulation>
# cidr_exposed = <cidr_exposed_to_the_external_router>
# gateway_ip = <ip_of_the_external_gateway>
#
# An example follows:
# [apic_external_network:network_ext]
# switch=203
# port=1/34
# encap=vlan-100
# cidr_exposed=10.10.40.2/16
# gateway_ip=10.10.40.1

View File

@ -0,0 +1,52 @@
# Defines Configuration options for FSL SDN OS Mechanism Driver
# Cloud Resource Discovery (CRD) authorization credentials
[ml2_fslsdn]
#(StrOpt) User name for authentication to CRD.
# e.g.: user12
#
# crd_user_name =
#(StrOpt) Password for authentication to CRD.
# e.g.: secret
#
# crd_password =
#(StrOpt) Tenant name for CRD service.
# e.g.: service
#
# crd_tenant_name =
#(StrOpt) CRD auth URL.
# e.g.: http://127.0.0.1:5000/v2.0/
#
# crd_auth_url =
#(StrOpt) URL for connecting to CRD Service.
# e.g.: http://127.0.0.1:9797
#
# crd_url=
#(IntOpt) Timeout value for connecting to CRD service
# in seconds, e.g.: 30
#
# crd_url_timeout=
#(StrOpt) Region name for connecting to CRD in
# admin context, e.g.: RegionOne
#
# crd_region_name=
#(BoolOpt)If set, ignore any SSL validation issues (boolean value)
# e.g.: False
#
# crd_api_insecure=
#(StrOpt)Authorization strategy for connecting to CRD in admin
# context, e.g.: keystone
#
# crd_auth_strategy=
#(StrOpt)Location of CA certificates file to use for CRD client
# requests.
#
# crd_ca_certificates_file=

View File

@ -0,0 +1,4 @@
[eswitch]
# (StrOpt) Type of Network Interface to allocate for VM:
# mlnx_direct or hostdev according to libvirt terminology
# vnic_type = mlnx_direct

View File

@ -0,0 +1,28 @@
# Defines configuration options specific to the Tail-f NCS Mechanism Driver
[ml2_ncs]
# (StrOpt) Tail-f NCS HTTP endpoint for REST access to the OpenStack
# subtree.
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://ncs/api/running/services/openstack
# (StrOpt) Username for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to NCS.
# This is an optional parameter. If unspecified then no authentication is used.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for NCS HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout =
# Example: timeout = 15

View File

@ -0,0 +1,30 @@
# Configuration for the OpenDaylight MechanismDriver
[ml2_odl]
# (StrOpt) OpenDaylight REST URL
# If this is not set then no HTTP requests will be made.
#
# url =
# Example: url = http://192.168.56.1:8080/controller/nb/v2/neutron
# (StrOpt) Username for HTTP basic authentication to ODL.
#
# username =
# Example: username = admin
# (StrOpt) Password for HTTP basic authentication to ODL.
#
# password =
# Example: password = admin
# (IntOpt) Timeout in seconds to wait for ODL HTTP request completion.
# This is an optional parameter, default value is 10 seconds.
#
# timeout = 10
# Example: timeout = 15
# (IntOpt) Timeout in minutes to wait for a Tomcat session timeout.
# This is an optional parameter, default value is 30 minutes.
#
# session_timeout = 30
# Example: session_timeout = 60

View File

@ -0,0 +1,13 @@
# Defines configuration options specific to the OpenFlow Agent Mechanism Driver
[ovs]
# Please refer to configuration options to the OpenvSwitch
[agent]
# (IntOpt) Number of seconds to retry acquiring an Open vSwitch datapath.
# This is an optional parameter, default value is 60 seconds.
#
# get_datapath_retry_times =
# Example: get_datapath_retry_times = 30
# Please refer to configuration options to the OpenvSwitch else the above.

View File

@ -0,0 +1,31 @@
# Defines configuration options for SRIOV NIC Switch MechanismDriver
# and Agent
[ml2_sriov]
# (ListOpt) Comma-separated list of
# supported Vendor PCI Devices, in format vendor_id:product_id
#
# supported_pci_vendor_devs = 15b3:1004, 8086:10c9
# Example: supported_pci_vendor_devs = 15b3:1004
#
# (BoolOpt) Requires running SRIOV neutron agent for port binding
# agent_required = True
[sriov_nic]
# (ListOpt) Comma-separated list of <physical_network>:<network_device>
# tuples mapping physical network names to the agent's node-specific
# physical network device interfaces of SR-IOV physical function to be used
# for VLAN networks. All physical networks listed in network_vlan_ranges on
# the server should have mappings to appropriate interfaces on each agent.
#
# physical_device_mappings =
# Example: physical_device_mappings = physnet1:eth1
#
# (ListOpt) Comma-separated list of <network_device>:<vfs__to_exclude>
# tuples, mapping network_device to the agent's node-specific list of virtual
# functions that should not be used for virtual networking.
# vfs_to_exclude is a semicolon-separated list of virtual
# functions to exclude from network_device. The network_device in the
# mapping should appear in the physical_device_mappings list.
# exclude_devices =
# Example: exclude_devices = eth1:0000:07:00.2; 0000:07:00.3

View File

@ -0,0 +1,169 @@
[ovs]
# (StrOpt) Type of network to allocate for tenant networks. The
# default value 'local' is useful only for single-box testing and
# provides no connectivity between hosts. You MUST either change this
# to 'vlan' and configure network_vlan_ranges below or change this to
# 'gre' or 'vxlan' and configure tunnel_id_ranges below in order for
# tenant networks to provide connectivity between hosts. Set to 'none'
# to disable creation of tenant networks.
#
# tenant_network_type = local
# Example: tenant_network_type = gre
# Example: tenant_network_type = vxlan
# (ListOpt) Comma-separated list of
# <physical_network>[:<vlan_min>:<vlan_max>] tuples enumerating ranges
# of VLAN IDs on named physical networks that are available for
# allocation. All physical networks listed are available for flat and
# VLAN provider network creation. Specified ranges of VLAN IDs are
# available for tenant network allocation if tenant_network_type is
# 'vlan'. If empty, only gre, vxlan and local networks may be created.
#
# network_vlan_ranges =
# Example: network_vlan_ranges = physnet1:1000:2999
# (BoolOpt) Set to True in the server and the agents to enable support
# for GRE or VXLAN networks. Requires kernel support for OVS patch ports and
# GRE or VXLAN tunneling.
#
# WARNING: This option will be deprecated in the Icehouse release, at which
# point setting tunnel_type below will be required to enable
# tunneling.
#
# enable_tunneling = False
# (StrOpt) The type of tunnel network, if any, supported by the plugin. If
# this is set, it will cause tunneling to be enabled. If this is not set and
# the option enable_tunneling is set, this will default to 'gre'.
#
# tunnel_type =
# Example: tunnel_type = gre
# Example: tunnel_type = vxlan
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples
# enumerating ranges of GRE or VXLAN tunnel IDs that are available for
# tenant network allocation if tenant_network_type is 'gre' or 'vxlan'.
#
# tunnel_id_ranges =
# Example: tunnel_id_ranges = 1:1000
# Do not change this parameter unless you have a good reason to.
# This is the name of the OVS integration bridge. There is one per hypervisor.
# The integration bridge acts as a virtual "patch bay". All VM VIFs are
# attached to this bridge and then "patched" according to their network
# connectivity.
#
# integration_bridge = br-int
# Only used for the agent if tunnel_id_ranges (above) is not empty for
# the server. In most cases, the default value should be fine.
#
# tunnel_bridge = br-tun
# Peer patch port in integration bridge for tunnel bridge
# int_peer_patch_port = patch-tun
# Peer patch port in tunnel bridge for integration bridge
# tun_peer_patch_port = patch-int
# Uncomment this line for the agent if tunnel_id_ranges (above) is not
# empty for the server. Set local-ip to be the local IP address of
# this hypervisor.
#
# local_ip =
# (ListOpt) Comma-separated list of <physical_network>:<bridge> tuples
# mapping physical network names to the agent's node-specific OVS
# bridge names to be used for flat and VLAN networks. The length of
# bridge names should be no more than 11. Each bridge must
# exist, and should have a physical network interface configured as a
# port. All physical networks listed in network_vlan_ranges on the
# server should have mappings to appropriate bridges on each agent.
#
# bridge_mappings =
# Example: bridge_mappings = physnet1:br-eth1
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (ListOpt) The types of tenant network tunnels supported by the agent.
# Setting this will enable tunneling support in the agent. This can be set to
# either 'gre' or 'vxlan'. If this is unset, it will default to [] and
# disable tunneling support in the agent. When running the agent with the OVS
# plugin, this value must be the same as "tunnel_type" in the "[ovs]" section.
# When running the agent with ML2, you can specify as many values here as
# your compute hosts supports.
#
# tunnel_types =
# Example: tunnel_types = gre
# Example: tunnel_types = vxlan
# Example: tunnel_types = vxlan, gre
# (IntOpt) The port number to utilize if tunnel_types includes 'vxlan'. By
# default, this will make use of the Open vSwitch default value of '4789' if
# not specified.
#
# vxlan_udp_port =
# Example: vxlan_udp_port = 8472
# (IntOpt) This is the MTU size of veth interfaces.
# Do not change unless you have a good reason to.
# The default MTU size of veth interfaces is 1500.
# veth_mtu =
# Example: veth_mtu = 1504
# (BoolOpt) Flag to enable l2-population extension. This option should only be
# used in conjunction with ml2 plugin and l2population mechanism driver. It'll
# enable plugin to populate remote ports macs and IPs (using fdb_add/remove
# RPC calbbacks instead of tunnel_sync/update) on OVS agents in order to
# optimize tunnel management.
#
# l2_population = False
[securitygroup]
# Firewall driver for realizing neutron security group function.
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
# Example: firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
#-----------------------------------------------------------------------------
# Sample Configurations.
#-----------------------------------------------------------------------------
#
# 1. With VLANs on eth1.
# [database]
# connection = mysql://root:nova@127.0.0.1:3306/ovs_neutron
# [OVS]
# network_vlan_ranges = default:2000:3999
# tunnel_id_ranges =
# integration_bridge = br-int
# bridge_mappings = default:br-eth1
# [AGENT]
# Add the following setting, if you want to log to a file
#
# 2. With tunneling.
# [database]
# connection = mysql://root:nova@127.0.0.1:3306/ovs_neutron
# [OVS]
# network_vlan_ranges =
# tunnel_id_ranges = 1:1000
# integration_bridge = br-int
# tunnel_bridge = br-tun
# local_ip = 10.0.0.3
[OVS]
network_vlan_ranges=trunk:2112:2114
local_ip=<CONTROLLER_IP>
enable_tunneling=True
integration_bridge=br-int
tunnel_id_ranges=1:1000
tunnel_bridge=br-tun
tenant_network_type=gre
bridge_mappings=trunk:eth1-br
[AGENT]
polling_interval=30
veth_mtu=9134
[SECURITYGROUP]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

View File

@ -0,0 +1,140 @@
{
"context_is_admin": "role:admin",
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"subnets:private:read": "rule:admin_or_owner",
"subnets:private:write": "rule:admin_or_owner",
"subnets:shared:read": "rule:regular_user",
"subnets:shared:write": "rule:admin_only",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"create_port": "",
"create_port:mac_address": "",
"create_port:fixed_ips": "",
"create_port:port_security_enabled": "rule:admin_or_network_owner",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:binding:vnic_type": "rule:admin_or_owner",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner",
"get_port": "rule:admin_or_owner",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"get_port:binding:vnic_type": "rule:admin_or_owner",
"update_port": "rule:admin_or_owner",
"update_port:fixed_ips": "rule:admin_or_network_owner",
"update_port:port_security_enabled": "rule:admin_or_network_owner",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:binding:vnic_type": "rule:admin_or_owner",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner",
"delete_port": "rule:admin_or_owner",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"create_firewall_rule:shared": "rule:admin_or_owner",
"get_firewall_rule:shared": "rule:admin_or_owner",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"create_router": "rule:regular_user",
"get_router": "rule:admin_or_owner",
"update_router:add_router_interface": "rule:admin_or_owner",
"update_router:remove_router_interface": "rule:admin_or_owner",
"delete_router": "rule:admin_or_owner",
"create_floatingip": "rule:regular_user",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only"
}

View File

@ -0,0 +1,34 @@
# Configuration for neutron-rootwrap
# This file should be owned by (and only-writeable by) the root user
[DEFAULT]
# List of directories to load filter definitions from (separated by ',').
# These directories MUST all be only writeable by root !
filters_path=/etc/neutron/rootwrap.d,/usr/share/neutron/rootwrap
# List of directories to search executables in, in case filters do not
# explicitely specify a full path (separated by ',')
# If not specified, defaults to system PATH environment variable.
# These directories MUST all be only writeable by root !
exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin
# Enable logging to syslog
# Default value is False
use_syslog=False
# Which syslog facility to use.
# Valid values include auth, authpriv, syslog, local0, local1...
# Default value is 'syslog'
syslog_log_facility=syslog
# Which messages to log.
# INFO means log all usage
# ERROR means only log unsuccessful attempts
syslog_log_level=ERROR
[xenapi]
# XenAPI configuration is only required by the L2 agent if it is to
# target a XenServer/XCP compute host's dom0.
xenapi_connection_url=<None>
xenapi_connection_username=root
xenapi_connection_password=<None>

View File

@ -0,0 +1,14 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# This is needed because we should ping
# from inside a namespace which requires root
ping: RegExpFilter, ping, root, ping, -w, \d+, -c, \d+, [0-9\.]+
ping6: RegExpFilter, ping6, root, ping6, -w, \d+, -c, \d+, [0-9A-Fa-f:]+

View File

@ -0,0 +1,36 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# dhcp-agent
dnsmasq: EnvFilter, dnsmasq, root, NEUTRON_NETWORK_ID=
# dhcp-agent uses kill as well, that's handled by the generic KillFilter
# it looks like these are the only signals needed, per
# neutron/agent/linux/dhcp.py
kill_dnsmasq: KillFilter, root, /sbin/dnsmasq, -9, -HUP
kill_dnsmasq_usr: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
ivs-ctl: CommandFilter, ivs-ctl, root
mm-ctl: CommandFilter, mm-ctl, root
dhcp_release: CommandFilter, dhcp_release, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
kill_metadata6: KillFilter, root, python2.6, -9
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

View File

@ -0,0 +1,12 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_firewall.py
# "ipset", "-A", ...
ipset: CommandFilter, ipset, root

View File

@ -0,0 +1,21 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# neutron/agent/linux/iptables_manager.py
# "iptables-save", ...
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# neutron/agent/linux/iptables_manager.py
# "iptables", "-A", ...
iptables: CommandFilter, iptables, root
ip6tables: CommandFilter, ip6tables, root

View File

@ -0,0 +1,49 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# arping
arping: CommandFilter, arping, root
# l3_agent
sysctl: CommandFilter, sysctl, root
route: CommandFilter, route, root
radvd: CommandFilter, radvd, root
# metadata proxy
metadata_proxy: CommandFilter, neutron-ns-metadata-proxy, root
# If installed from source (say, by devstack), the prefix will be
# /usr/local instead of /usr/bin.
metadata_proxy_local: CommandFilter, /usr/local/bin/neutron-ns-metadata-proxy, root
# RHEL invocation of the metadata proxy will report /usr/bin/python
kill_metadata: KillFilter, root, python, -9
kill_metadata7: KillFilter, root, python2.7, -9
kill_metadata6: KillFilter, root, python2.6, -9
kill_radvd_usr: KillFilter, root, /usr/sbin/radvd, -9, -HUP
kill_radvd: KillFilter, root, /sbin/radvd, -9, -HUP
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
# ovs_lib (if OVSInterfaceDriver is used)
ovs-vsctl: CommandFilter, ovs-vsctl, root
# iptables_manager
iptables-save: CommandFilter, iptables-save, root
iptables-restore: CommandFilter, iptables-restore, root
ip6tables-save: CommandFilter, ip6tables-save, root
ip6tables-restore: CommandFilter, ip6tables-restore, root
# Keepalived
keepalived: CommandFilter, keepalived, root
kill_keepalived: KillFilter, root, /usr/sbin/keepalived, -HUP, -15, -9
# l3 agent to delete floatingip's conntrack state
conntrack: CommandFilter, conntrack, root

View File

@ -0,0 +1,26 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# haproxy
haproxy: CommandFilter, haproxy, root
# lbaas-agent uses kill as well, that's handled by the generic KillFilter
kill_haproxy_usr: KillFilter, root, /usr/sbin/haproxy, -9, -HUP
ovs-vsctl: CommandFilter, ovs-vsctl, root
mm-ctl: CommandFilter, mm-ctl, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root
route: CommandFilter, route, root
# arping
arping: CommandFilter, arping, root

View File

@ -0,0 +1,22 @@
# neutron-rootwrap command filters for nodes on which neutron is
# expected to control network
#
# This file should be owned by (and only-writeable by) the root user
# format seems to be
# cmd-name: filter-name, raw-command, user, args
[Filters]
# openvswitch-agent
# unclear whether both variants are necessary, but I'm transliterating
# from the old mechanism
ovs-vsctl: CommandFilter, ovs-vsctl, root
ovs-ofctl: CommandFilter, ovs-ofctl, root
kill_ovsdb_client: KillFilter, root, /usr/bin/ovsdb-client, -9
ovsdb-client: CommandFilter, ovsdb-client, root
xe: CommandFilter, xe, root
# ip_lib
ip: IpFilter, ip, root
ip_exec: IpNetnsExecFilter, ip, root

Some files were not shown because too many files have changed in this diff Show More