We said this would be going away back in 1.7.0 -- lets actually remove it. Change-Id: I9742dd907abea86da9259740d913924bb1ce73e7 Related-Change: Id7d6d547b103b4f23ebf5be98b88f09ec6027ce4
		
			
				
	
	
		
			1313 lines
		
	
	
		
			57 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			1313 lines
		
	
	
		
			57 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
[DEFAULT]
 | 
						|
# bind_ip = 0.0.0.0
 | 
						|
bind_port = 8080
 | 
						|
# keep_idle = 600
 | 
						|
# bind_timeout = 30
 | 
						|
# backlog = 4096
 | 
						|
# swift_dir = /etc/swift
 | 
						|
# user = swift
 | 
						|
 | 
						|
# Enables exposing configuration settings via HTTP GET /info.
 | 
						|
# expose_info = true
 | 
						|
 | 
						|
# Key to use for admin calls that are HMAC signed.  Default is empty,
 | 
						|
# which will disable admin calls to /info.
 | 
						|
# admin_key = secret_admin_key
 | 
						|
#
 | 
						|
# Allows the ability to withhold sections from showing up in the public calls
 | 
						|
# to /info.  You can withhold subsections by separating the dict level with a
 | 
						|
# ".". Default value is 'swift.valid_api_versions, swift.auto_create_account_prefix'
 | 
						|
# which allows all registered features to be listed via HTTP GET /info except
 | 
						|
# swift.valid_api_versions and swift.auto_create_account_prefix information.
 | 
						|
# As an example, the following would cause the sections 'container_quotas' and
 | 
						|
# 'tempurl' to not be listed, and the key max_failed_deletes would be removed from
 | 
						|
# bulk_delete.
 | 
						|
# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl, bulk_delete.max_failed_deletes
 | 
						|
 | 
						|
# Use an integer to override the number of pre-forked processes that will
 | 
						|
# accept connections.  Should default to the number of effective cpu
 | 
						|
# cores in the system.  It's worth noting that individual workers will
 | 
						|
# use many eventlet co-routines to service multiple concurrent requests.
 | 
						|
# workers = auto
 | 
						|
#
 | 
						|
# Maximum concurrent requests per worker
 | 
						|
# max_clients = 1024
 | 
						|
#
 | 
						|
# Set the following two lines to enable SSL. This is for testing only.
 | 
						|
# cert_file = /etc/swift/proxy.crt
 | 
						|
# key_file = /etc/swift/proxy.key
 | 
						|
#
 | 
						|
# expiring_objects_container_divisor = 86400
 | 
						|
# expiring_objects_account_name = expiring_objects
 | 
						|
#
 | 
						|
# You can specify default log routing here if you want:
 | 
						|
# log_name = swift
 | 
						|
# log_facility = LOG_LOCAL0
 | 
						|
# log_level = INFO
 | 
						|
# log_headers = false
 | 
						|
# log_address = /dev/log
 | 
						|
# The following caps the length of log lines to the value given; no limit if
 | 
						|
# set to 0, the default.
 | 
						|
# log_max_line_length = 0
 | 
						|
#
 | 
						|
# This optional suffix (default is empty) that would be appended to the swift transaction
 | 
						|
# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
 | 
						|
# This is very useful when one is managing more than one swift cluster.
 | 
						|
# trans_id_suffix =
 | 
						|
#
 | 
						|
# comma separated list of functions to call to setup custom log handlers.
 | 
						|
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
 | 
						|
# adapted_logger
 | 
						|
# log_custom_handlers =
 | 
						|
#
 | 
						|
# If set, log_udp_host will override log_address
 | 
						|
# log_udp_host =
 | 
						|
# log_udp_port = 514
 | 
						|
#
 | 
						|
# You can enable StatsD logging here:
 | 
						|
# log_statsd_host =
 | 
						|
# log_statsd_port = 8125
 | 
						|
# log_statsd_default_sample_rate = 1.0
 | 
						|
# log_statsd_sample_rate_factor = 1.0
 | 
						|
# log_statsd_metric_prefix =
 | 
						|
#
 | 
						|
# List of origin hosts that are allowed for CORS requests in addition to what
 | 
						|
# the container has set.
 | 
						|
# Use a comma separated list of full URL (http://foo.bar:1234,https://foo.bar)
 | 
						|
# cors_allow_origin =
 | 
						|
 | 
						|
# If True (default) then CORS requests are only allowed if their Origin header
 | 
						|
# matches an allowed origin. Otherwise, any Origin is allowed.
 | 
						|
# strict_cors_mode = True
 | 
						|
#
 | 
						|
# Comma separated list of headers to expose through Access-Control-Expose-Headers,
 | 
						|
# in addition to the defaults and any headers set in container metadata (see
 | 
						|
# CORS documentation).
 | 
						|
# cors_expose_headers =
 | 
						|
#
 | 
						|
# client_timeout = 60.0
 | 
						|
#
 | 
						|
# Note: enabling evenlet_debug might reveal sensitive information, for example
 | 
						|
# signatures for temp urls
 | 
						|
# eventlet_debug = false
 | 
						|
#
 | 
						|
# You can set scheduling priority of processes. Niceness values range from -20
 | 
						|
# (most favorable to the process) to 19 (least favorable to the process).
 | 
						|
# nice_priority =
 | 
						|
#
 | 
						|
# You can set I/O scheduling class and priority of processes. I/O niceness
 | 
						|
# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
 | 
						|
# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
 | 
						|
# 0 to 7. The higher the value, the lower the I/O priority of the process.
 | 
						|
# Work only with ionice_class.
 | 
						|
# ionice_class =
 | 
						|
# ionice_priority =
 | 
						|
 | 
						|
[pipeline:main]
 | 
						|
# This sample pipeline uses tempauth and is used for SAIO dev work and
 | 
						|
# testing. See below for a pipeline using keystone.
 | 
						|
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache listing_formats container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes symlink proxy-logging proxy-server
 | 
						|
 | 
						|
# The following pipeline shows keystone integration. Comment out the one
 | 
						|
# above and uncomment this one. Additional steps for integrating keystone are
 | 
						|
# covered further below in the filter sections for authtoken and keystoneauth.
 | 
						|
#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth copy container-quotas account-quotas slo dlo versioned_writes symlink proxy-logging proxy-server
 | 
						|
 | 
						|
[app:proxy-server]
 | 
						|
use = egg:swift#proxy
 | 
						|
# You can override the default log routing for this app here:
 | 
						|
# set log_name = proxy-server
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_address = /dev/log
 | 
						|
#
 | 
						|
# When deployed behind a proxy, load balancer, or SSL terminator that is
 | 
						|
# configured to speak the human-readable (v1) PROXY protocol (see
 | 
						|
# http://www.haproxy.org/download/1.7/doc/proxy-protocol.txt), you should set
 | 
						|
# this option to true.  The proxy-server will populate the client connection
 | 
						|
# information using the PROXY protocol and reject any connection missing a
 | 
						|
# valid PROXY line with a 400.  Only v1 (human-readable) of the PROXY protocol
 | 
						|
# is supported.
 | 
						|
# require_proxy_protocol = false
 | 
						|
#
 | 
						|
# log_handoffs = true
 | 
						|
# recheck_account_existence = 60
 | 
						|
# recheck_container_existence = 60
 | 
						|
#
 | 
						|
# How long the proxy should cache a set of shard ranges for a container when
 | 
						|
# the set is to be used for directing object updates.
 | 
						|
# Note that stale shard range info should be fine; updates will still
 | 
						|
# eventually make their way to the correct shard. As a result, you can
 | 
						|
# usually set this much higher than the existence checks above.
 | 
						|
# recheck_updating_shard_ranges = 3600
 | 
						|
#
 | 
						|
# How long the proxy should cache a set of shard ranges for a container when
 | 
						|
# the set is to be used for gathering object listings.
 | 
						|
# Note that stale shard range info might result in incomplete object listings
 | 
						|
# so this value should be set less than recheck_updating_shard_ranges.
 | 
						|
# recheck_listing_shard_ranges = 600
 | 
						|
#
 | 
						|
# For particularly active containers, having information age out of cache can
 | 
						|
# be quite painful: suddenly thousands of requests per second all miss and
 | 
						|
# have to go to disk. By (rarely) going direct to disk regardless of whether
 | 
						|
# data is present in memcache, we can periodically refresh the data in memcache
 | 
						|
# without causing a thundering herd. Values around 0.0 - 0.1 (i.e., one in
 | 
						|
# every thousand requests skips cache, or fewer) are recommended.
 | 
						|
# container_updating_shard_ranges_skip_cache_pct = 0.0
 | 
						|
# container_listing_shard_ranges_skip_cache_pct = 0.0
 | 
						|
#
 | 
						|
# object_chunk_size = 65536
 | 
						|
# client_chunk_size = 65536
 | 
						|
#
 | 
						|
# How long the proxy server will wait on responses from the a/c/o servers.
 | 
						|
# node_timeout = 10
 | 
						|
#
 | 
						|
# How long the proxy server will wait for an initial response and to read a
 | 
						|
# chunk of data from the object servers while serving GET / HEAD requests.
 | 
						|
# Timeouts from these requests can be recovered from so setting this to
 | 
						|
# something lower than node_timeout would provide quicker error recovery
 | 
						|
# while allowing for a longer timeout for non-recoverable requests (PUTs).
 | 
						|
# Defaults to node_timeout, should be overridden if node_timeout is set to a
 | 
						|
# high number to prevent client timeouts from firing before the proxy server
 | 
						|
# has a chance to retry.
 | 
						|
# recoverable_node_timeout = node_timeout
 | 
						|
#
 | 
						|
# conn_timeout = 0.5
 | 
						|
#
 | 
						|
# How long to wait for requests to finish after a quorum has been established.
 | 
						|
# post_quorum_timeout = 0.5
 | 
						|
#
 | 
						|
# How long without an error before a node's error count is reset. This will
 | 
						|
# also be how long before a node is reenabled after suppression is triggered.
 | 
						|
# Set to 0 to disable error-limiting.
 | 
						|
# error_suppression_interval = 60.0
 | 
						|
#
 | 
						|
# How many errors can accumulate before a node is temporarily ignored.
 | 
						|
# error_suppression_limit = 10
 | 
						|
#
 | 
						|
# If set to 'true' any authorized user may create and delete accounts; if
 | 
						|
# 'false' no one, even authorized, can.
 | 
						|
# allow_account_management = false
 | 
						|
#
 | 
						|
# If set to 'true' authorized accounts that do not yet exist within the Swift
 | 
						|
# cluster will be automatically created.
 | 
						|
# account_autocreate = false
 | 
						|
#
 | 
						|
# If set to a positive value, trying to create a container when the account
 | 
						|
# already has at least this maximum containers will result in a 403 Forbidden.
 | 
						|
# Note: This is a soft limit, meaning a user might exceed the cap for
 | 
						|
# recheck_account_existence before the 403s kick in.
 | 
						|
# max_containers_per_account = 0
 | 
						|
#
 | 
						|
# This is a comma separated list of account hashes that ignore the
 | 
						|
# max_containers_per_account cap.
 | 
						|
# max_containers_whitelist =
 | 
						|
#
 | 
						|
# Comma separated list of Host headers to which the proxy will deny requests.
 | 
						|
# deny_host_headers =
 | 
						|
#
 | 
						|
# During GET and HEAD requests, storage nodes can be chosen at random
 | 
						|
# (shuffle), by using timing measurements (timing), or by using an explicit
 | 
						|
# region/zone match (affinity). Using timing measurements may allow for lower
 | 
						|
# overall latency, while using affinity allows for finer control. In both the
 | 
						|
# timing and affinity cases, equally-sorting nodes are still randomly chosen to
 | 
						|
# spread load.
 | 
						|
# The valid values for sorting_method are "affinity", "shuffle", or "timing".
 | 
						|
# This option may be overridden in a per-policy configuration section.
 | 
						|
# sorting_method = shuffle
 | 
						|
#
 | 
						|
# If the "timing" sorting_method is used, the timings will only be valid for
 | 
						|
# the number of seconds configured by timing_expiry.
 | 
						|
# timing_expiry = 300
 | 
						|
#
 | 
						|
# Normally, you should only be moving one replica's worth of data at a time
 | 
						|
# when rebalancing. If you're rebalancing more aggressively, increase this
 | 
						|
# to avoid erroneously returning a 404 when the primary assignments that
 | 
						|
# *didn't* change get overloaded.
 | 
						|
# rebalance_missing_suppression_count = 1
 | 
						|
#
 | 
						|
# By default on a GET/HEAD swift will connect to a minimum number storage nodes
 | 
						|
# in a minimum number of threads - for replicated data just a single request to
 | 
						|
# a single node one at a time.  When enabled concurrent_gets allows the proxy
 | 
						|
# to use up to replica count threads when waiting on a response.  In
 | 
						|
# conjunction with the concurrency_timeout option this will allow swift to send
 | 
						|
# out GET/HEAD requests to the storage nodes concurrently and answer as soon as
 | 
						|
# the minimum number of backend responses are available - in replicated
 | 
						|
# contexts this will be the first backend replica to respond.
 | 
						|
# concurrent_gets = off
 | 
						|
#
 | 
						|
# This parameter controls how long to wait before firing off the next
 | 
						|
# concurrent_get thread. A value of 0 would be fully concurrent, any other
 | 
						|
# number will stagger the firing of the threads. This number should be
 | 
						|
# between 0 and node_timeout. The default is what ever you set for the
 | 
						|
# conn_timeout parameter.
 | 
						|
# concurrency_timeout = 0.5
 | 
						|
#
 | 
						|
# By default on a EC GET request swift will connect to a minimum number of
 | 
						|
# storage nodes in a minimum number of threads - for erasure coded data, ndata
 | 
						|
# requests to primary nodes are started at the same time.  When greater than
 | 
						|
# zero this option provides additional robustness and may reduce first byte
 | 
						|
# latency by starting additional requests - up to as many as nparity.
 | 
						|
# concurrent_ec_extra_requests = 0
 | 
						|
#
 | 
						|
# Set to the number of nodes to contact for a normal request. You can use
 | 
						|
# '* replicas' at the end to have it use the number given times the number of
 | 
						|
# replicas for the ring being used for the request.
 | 
						|
# request_node_count = 2 * replicas
 | 
						|
#
 | 
						|
# Specifies which backend servers to prefer on reads. Format is a comma
 | 
						|
# separated list of affinity descriptors of the form <selection>=<priority>.
 | 
						|
# The <selection> may be r<N> for selecting nodes in region N or r<N>z<M> for
 | 
						|
# selecting nodes in region N, zone M. The <priority> value should be a whole
 | 
						|
# number that represents the priority to be given to the selection; lower
 | 
						|
# numbers are higher priority.
 | 
						|
#
 | 
						|
# Example: first read from region 1 zone 1, then region 1 zone 2, then
 | 
						|
# anything in region 2, then everything else:
 | 
						|
# read_affinity = r1z1=100, r1z2=200, r2=300
 | 
						|
# Default is empty, meaning no preference.
 | 
						|
# This option may be overridden in a per-policy configuration section.
 | 
						|
# read_affinity =
 | 
						|
#
 | 
						|
# Specifies which backend servers to prefer on object writes. Format is a comma
 | 
						|
# separated list of affinity descriptors of the form r<N> for region N or
 | 
						|
# r<N>z<M> for region N, zone M. If this is set, then when handling an object
 | 
						|
# PUT request, some number (see setting write_affinity_node_count) of local
 | 
						|
# backend servers will be tried before any nonlocal ones.
 | 
						|
#
 | 
						|
# Example: try to write to regions 1 and 2 before writing to any other
 | 
						|
# nodes:
 | 
						|
# write_affinity = r1, r2
 | 
						|
# Default is empty, meaning no preference.
 | 
						|
# This option may be overridden in a per-policy configuration section.
 | 
						|
# write_affinity =
 | 
						|
#
 | 
						|
# The number of local (as governed by the write_affinity setting) nodes to
 | 
						|
# attempt to contact first on writes, before any non-local ones. The value
 | 
						|
# should be an integer number, or use '* replicas' at the end to have it use
 | 
						|
# the number given times the number of replicas for the ring being used for the
 | 
						|
# request.
 | 
						|
# This option may be overridden in a per-policy configuration section.
 | 
						|
# write_affinity_node_count = 2 * replicas
 | 
						|
#
 | 
						|
# The number of local (as governed by the write_affinity setting) handoff nodes
 | 
						|
# to attempt to contact on deletion, in addition to primary nodes.
 | 
						|
#
 | 
						|
# Example: in geographically distributed deployment of 2 regions, If
 | 
						|
# replicas=3, sometimes there may be 1 primary node and 2 local handoff nodes
 | 
						|
# in one region holding the object after uploading but before object replicated
 | 
						|
# to the appropriate locations in other regions. In this case, include these
 | 
						|
# handoff nodes to send request when deleting object could help make correct
 | 
						|
# decision for the response. The default value 'auto' means Swift will
 | 
						|
# calculate the number automatically, the default value is
 | 
						|
# (replicas - len(local_primary_nodes)). This option may be overridden in a
 | 
						|
# per-policy configuration section.
 | 
						|
# write_affinity_handoff_delete_count = auto
 | 
						|
#
 | 
						|
# These are the headers whose values will only be shown to swift_owners. The
 | 
						|
# exact definition of a swift_owner is up to the auth system in use, but
 | 
						|
# usually indicates administrative responsibilities.
 | 
						|
# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control
 | 
						|
#
 | 
						|
# You can set scheduling priority of processes. Niceness values range from -20
 | 
						|
# (most favorable to the process) to 19 (least favorable to the process).
 | 
						|
# nice_priority =
 | 
						|
#
 | 
						|
# You can set I/O scheduling class and priority of processes. I/O niceness
 | 
						|
# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and
 | 
						|
# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from
 | 
						|
# 0 to 7. The higher the value, the lower the I/O priority of the process.
 | 
						|
# Work only with ionice_class.
 | 
						|
# ionice_class =
 | 
						|
# ionice_priority =
 | 
						|
#
 | 
						|
# When upgrading from liberasurecode<=1.5.0, you may want to continue writing
 | 
						|
# legacy CRCs until all nodes are upgraded and capabale of reading fragments
 | 
						|
# with zlib CRCs. liberasurecode>=1.6.2 checks for the environment variable
 | 
						|
# LIBERASURECODE_WRITE_LEGACY_CRC; if set (value doesn't matter), it will use
 | 
						|
# its legacy CRC. Set this option to true or false to ensure the environment
 | 
						|
# variable is or is not set. Leave the option blank or absent to not touch
 | 
						|
# the environment (default). For more information, see
 | 
						|
# https://bugs.launchpad.net/liberasurecode/+bug/1886088
 | 
						|
# write_legacy_ec_crc =
 | 
						|
 | 
						|
# Some proxy-server configuration options may be overridden on a per-policy
 | 
						|
# basis by including per-policy config section(s). The value of any option
 | 
						|
# specified a per-policy section will override any value given in the
 | 
						|
# proxy-server section for that policy only. Otherwise the value of these
 | 
						|
# options will be that specified in the proxy-server section.
 | 
						|
# The section name should refer to the policy index, not the policy name.
 | 
						|
# [proxy-server:policy:<policy index>]
 | 
						|
# sorting_method =
 | 
						|
# read_affinity =
 | 
						|
# write_affinity =
 | 
						|
# write_affinity_node_count =
 | 
						|
# write_affinity_handoff_delete_count =
 | 
						|
# rebalance_missing_suppression_count = 1
 | 
						|
# concurrent_gets = off
 | 
						|
# concurrency_timeout = 0.5
 | 
						|
# concurrent_ec_extra_requests = 0
 | 
						|
 | 
						|
[filter:tempauth]
 | 
						|
use = egg:swift#tempauth
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# set log_name = tempauth
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_headers = false
 | 
						|
# set log_address = /dev/log
 | 
						|
#
 | 
						|
# The reseller prefix will verify a token begins with this prefix before even
 | 
						|
# attempting to validate it. Also, with authorization, only Swift storage
 | 
						|
# accounts with this prefix will be authorized by this middleware. Useful if
 | 
						|
# multiple auth systems are in use for one Swift cluster.
 | 
						|
# The reseller_prefix may contain a comma separated list of items. The first
 | 
						|
# item is used for the token as mentioned above. If second and subsequent
 | 
						|
# items exist, the middleware will handle authorization for an account with
 | 
						|
# that prefix. For example, for prefixes "AUTH, SERVICE", a path of
 | 
						|
# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty
 | 
						|
# (blank) reseller prefix is required, it must be first in the list. Two
 | 
						|
# single quote characters indicates an empty (blank) reseller prefix.
 | 
						|
# reseller_prefix = AUTH
 | 
						|
 | 
						|
#
 | 
						|
# The require_group parameter names a group that must be presented by
 | 
						|
# either X-Auth-Token or X-Service-Token. Usually this parameter is
 | 
						|
# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah).
 | 
						|
# By default, no group is needed. Do not use .admin.
 | 
						|
# require_group =
 | 
						|
 | 
						|
# The auth prefix will cause requests beginning with this prefix to be routed
 | 
						|
# to the auth subsystem, for granting tokens, etc.
 | 
						|
# auth_prefix = /auth/
 | 
						|
# token_life = 86400
 | 
						|
#
 | 
						|
# This allows middleware higher in the WSGI pipeline to override auth
 | 
						|
# processing, useful for middleware such as tempurl and formpost. If you know
 | 
						|
# you're not going to use such middleware and you want a bit of extra security,
 | 
						|
# you can set this to false.
 | 
						|
# allow_overrides = true
 | 
						|
#
 | 
						|
# This specifies what scheme to return with storage URLs:
 | 
						|
# http, https, or default (chooses based on what the server is running as)
 | 
						|
# This can be useful with an SSL load balancer in front of a non-SSL server.
 | 
						|
# storage_url_scheme = default
 | 
						|
#
 | 
						|
# Lastly, you need to list all the accounts/users you want here. The format is:
 | 
						|
#   user_<account>_<user> = <key> [group] [group] [...] [storage_url]
 | 
						|
# or if you want underscores in <account> or <user>, you can base64 encode them
 | 
						|
# (with no equal signs) and use this format:
 | 
						|
#   user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
 | 
						|
# There are special groups of:
 | 
						|
#   .reseller_admin = can do anything to any account for this auth
 | 
						|
#   .reseller_reader = can GET/HEAD anything in any account for this auth
 | 
						|
#   .admin = can do anything within the account
 | 
						|
# If none of these groups are specified, the user can only access containers
 | 
						|
# that have been explicitly allowed for them by a .admin or .reseller_admin.
 | 
						|
# The trailing optional storage_url allows you to specify an alternate url to
 | 
						|
# hand back to the user upon authentication. If not specified, this defaults to
 | 
						|
# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
 | 
						|
# to what the requester would need to use to reach this host.
 | 
						|
# Here are example entries, required for running the tests:
 | 
						|
user_admin_admin = admin .admin .reseller_admin
 | 
						|
user_admin_auditor = admin_ro .reseller_reader
 | 
						|
user_test_tester = testing .admin
 | 
						|
user_test_tester2 = testing2 .admin
 | 
						|
user_test_tester3 = testing3
 | 
						|
user_test2_tester2 = testing2 .admin
 | 
						|
user_test5_tester5 = testing5 service
 | 
						|
 | 
						|
# To enable Keystone authentication you need to have the auth token
 | 
						|
# middleware first to be configured. Here is an example below, please
 | 
						|
# refer to the keystone's documentation for details about the
 | 
						|
# different settings.
 | 
						|
#
 | 
						|
# You'll also need to have the keystoneauth middleware enabled and have it in
 | 
						|
# your main pipeline, as show in the sample pipeline at the top of this file.
 | 
						|
#
 | 
						|
# Following parameters are known to work with keystonemiddleware v2.3.0
 | 
						|
# (above v2.0.0), but checking the latest information in the wiki page[1]
 | 
						|
# is recommended.
 | 
						|
# 1. https://docs.openstack.org/keystonemiddleware/latest/middlewarearchitecture.html#configuration
 | 
						|
#
 | 
						|
# [filter:authtoken]
 | 
						|
# paste.filter_factory = keystonemiddleware.auth_token:filter_factory
 | 
						|
# www_authenticate_uri = http://keystonehost:5000
 | 
						|
# auth_url = http://keystonehost:5000
 | 
						|
# auth_plugin = password
 | 
						|
# The following credentials must match the Keystone credentials for the Swift
 | 
						|
# service and may need to be changed to match your Keystone configuration. The
 | 
						|
# example values shown here assume a user named 'swift' with admin role on a
 | 
						|
# project named 'service', both being in the Keystone domain with id 'default'.
 | 
						|
# Refer to the keystonemiddleware documentation link above [1] for other
 | 
						|
# examples.
 | 
						|
# project_domain_id = default
 | 
						|
# user_domain_id = default
 | 
						|
# project_name = service
 | 
						|
# username = swift
 | 
						|
# password = password
 | 
						|
#
 | 
						|
# delay_auth_decision defaults to False, but leaving it as false will
 | 
						|
# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from
 | 
						|
# working. This value must be explicitly set to True.
 | 
						|
# delay_auth_decision = False
 | 
						|
#
 | 
						|
# cache = swift.cache
 | 
						|
# include_service_catalog = False
 | 
						|
#
 | 
						|
# [filter:keystoneauth]
 | 
						|
# use = egg:swift#keystoneauth
 | 
						|
# The reseller_prefix option lists account namespaces that this middleware is
 | 
						|
# responsible for. The prefix is placed before the Keystone project id.
 | 
						|
# For example, for project 12345678, and prefix AUTH, the account is
 | 
						|
# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...).
 | 
						|
# Several prefixes are allowed by specifying a comma-separated list
 | 
						|
# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a
 | 
						|
# single blank/empty prefix. If an empty prefix is required in a list of
 | 
						|
# prefixes, a value of '' (two single quote characters) indicates a
 | 
						|
# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_')
 | 
						|
# character is appended to the value unless already present.
 | 
						|
# reseller_prefix = AUTH
 | 
						|
#
 | 
						|
# The user must have at least one role named by operator_roles on a
 | 
						|
# project in order to create, delete and modify containers and objects
 | 
						|
# and to set and read privileged headers such as ACLs.
 | 
						|
# If there are several reseller prefix items, you can prefix the
 | 
						|
# parameter so it applies only to those accounts (for example
 | 
						|
# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_<project>
 | 
						|
# path). If you omit the prefix, the option applies to all reseller
 | 
						|
# prefix items. For the blank/empty prefix, prefix with '' (do not put
 | 
						|
# underscore after the two single quote characters).
 | 
						|
# operator_roles = admin, swiftoperator
 | 
						|
#
 | 
						|
# The reseller admin role has the ability to create and delete accounts
 | 
						|
# reseller_admin_role = ResellerAdmin
 | 
						|
#
 | 
						|
# This allows middleware higher in the WSGI pipeline to override auth
 | 
						|
# processing, useful for middleware such as tempurl and formpost. If you know
 | 
						|
# you're not going to use such middleware and you want a bit of extra security,
 | 
						|
# you can set this to false.
 | 
						|
# allow_overrides = true
 | 
						|
#
 | 
						|
# If the service_roles parameter is present, an X-Service-Token must be
 | 
						|
# present in the request that when validated, grants at least one role listed
 | 
						|
# in the parameter. The X-Service-Token may be scoped to any project.
 | 
						|
# If there are several reseller prefix items, you can prefix the
 | 
						|
# parameter so it applies only to those accounts (for example
 | 
						|
# the parameter SERVICE_service_roles applies to the /v1/SERVICE_<project>
 | 
						|
# path). If you omit the prefix, the option applies to all reseller
 | 
						|
# prefix items. For the blank/empty prefix, prefix with '' (do not put
 | 
						|
# underscore after the two single quote characters).
 | 
						|
# By default, no service_roles are required.
 | 
						|
# service_roles =
 | 
						|
#
 | 
						|
# For backwards compatibility, keystoneauth will match names in cross-tenant
 | 
						|
# access control lists (ACLs) when both the requesting user and the tenant
 | 
						|
# are in the default domain i.e the domain to which existing tenants are
 | 
						|
# migrated. The default_domain_id value configured here should be the same as
 | 
						|
# the value used during migration of tenants to keystone domains.
 | 
						|
# default_domain_id = default
 | 
						|
#
 | 
						|
# For a new installation, or an installation in which keystone projects may
 | 
						|
# move between domains, you should disable backwards compatible name matching
 | 
						|
# in ACLs by setting allow_names_in_acls to false:
 | 
						|
# allow_names_in_acls = true
 | 
						|
#
 | 
						|
# In OpenStack terms, these reader roles are scoped for system: they
 | 
						|
# can read anything across projects and domains.
 | 
						|
# They are used for auditing and compliance fuctions.
 | 
						|
# In Swift terms, these roles are as powerful as the reseller_admin_role,
 | 
						|
# only do not modify the cluster.
 | 
						|
# By default the list of reader roles is empty.
 | 
						|
# system_reader_roles =
 | 
						|
#
 | 
						|
# This is a reader role scoped for a Keystone project.
 | 
						|
# An identity that has this role can read anything in a project, so it is
 | 
						|
# basically a swiftoperator, but read-only.
 | 
						|
# project_reader_roles =
 | 
						|
 | 
						|
[filter:s3api]
 | 
						|
use = egg:swift#s3api
 | 
						|
 | 
						|
# s3api setup:
 | 
						|
#
 | 
						|
# With either tempauth or your custom auth:
 | 
						|
# - Put s3api just before your auth filter(s) in the pipeline
 | 
						|
# With keystone:
 | 
						|
# - Put s3api and s3token before keystoneauth in the pipeline, but after
 | 
						|
#   auth_token
 | 
						|
# If you have ratelimit enabled for Swift requests, you may want to place a
 | 
						|
# second copy after auth to also ratelimit S3 requests.
 | 
						|
#
 | 
						|
# Swift has no concept of the S3's resource owner; the resources
 | 
						|
# (i.e. containers and objects) created via the Swift API have no owner
 | 
						|
# information. This option specifies how the s3api middleware handles them
 | 
						|
# with the S3 API.  If this option is 'false', such kinds of resources will be
 | 
						|
# invisible and no users can access them with the S3 API.  If set to 'true',
 | 
						|
# a resource without an owner belongs to everyone and everyone can access it
 | 
						|
# with the S3 API.  If you care about S3 compatibility, set 'false' here.  This
 | 
						|
# option makes sense only when the s3_acl option is set to 'true' and your
 | 
						|
# Swift cluster has the resources created via the Swift API.
 | 
						|
# allow_no_owner = false
 | 
						|
#
 | 
						|
# Set a region name of your Swift cluster.  Note that the s3api doesn't choose
 | 
						|
# a region of the newly created bucket.  This value is used for the
 | 
						|
# GET Bucket location API and v4 signatures calculation.
 | 
						|
# location = us-east-1
 | 
						|
#
 | 
						|
# Set whether to enforce DNS-compliant bucket names. Note that S3 enforces
 | 
						|
# these conventions in all regions except the US Standard region.
 | 
						|
# dns_compliant_bucket_names = True
 | 
						|
#
 | 
						|
# Set the default maximum number of objects returned in the GET Bucket
 | 
						|
# response.
 | 
						|
# max_bucket_listing = 1000
 | 
						|
#
 | 
						|
# Set the maximum number of parts returned in the List Parts operation.
 | 
						|
# (default: 1000 as well as S3 specification)
 | 
						|
# If setting it larger than 10000 (swift container_listing_limit default)
 | 
						|
# make sure you also increase the container_listing_limit in swift.conf.
 | 
						|
# max_parts_listing = 1000
 | 
						|
#
 | 
						|
# Set the maximum number of objects we can delete with the Multi-Object Delete
 | 
						|
# operation.
 | 
						|
# max_multi_delete_objects = 1000
 | 
						|
#
 | 
						|
# Set the number of objects to delete at a time with the Multi-Object Delete
 | 
						|
# operation.
 | 
						|
# multi_delete_concurrency = 2
 | 
						|
#
 | 
						|
# If set to 'true', s3api uses its own metadata for ACLs
 | 
						|
# (e.g. X-Container-Sysmeta-S3Api-Acl) to achieve the best S3 compatibility.
 | 
						|
# If set to 'false', s3api tries to use Swift ACLs (e.g. X-Container-Read)
 | 
						|
# instead of S3 ACLs as far as possible.
 | 
						|
# There are some caveats that one should know about this setting. Firstly,
 | 
						|
# if set to 'false' after being previously set to 'true' any new objects or
 | 
						|
# containers stored while 'true' setting will be accessible to all users
 | 
						|
# because the s3 ACLs will be ignored under s3_acl=False setting. Secondly,
 | 
						|
# s3_acl True mode don't keep ACL consistency between both the S3 and Swift
 | 
						|
# API. Meaning with s3_acl enabled S3 ACLs only effect objects and buckets
 | 
						|
# via the S3 API. As this ACL information wont be available via the Swift API
 | 
						|
# and so the ACL wont be applied.
 | 
						|
# Note that s3_acl currently supports only keystone and tempauth.
 | 
						|
# DON'T USE THIS for production before enough testing for your use cases.
 | 
						|
# This stuff is still under development and it might cause something
 | 
						|
# you don't expect.
 | 
						|
# s3_acl = false
 | 
						|
#
 | 
						|
# Specify a (comma-separated) list of host names for your Swift cluster.
 | 
						|
# This enables virtual-hosted style requests.
 | 
						|
# storage_domain =
 | 
						|
#
 | 
						|
# Enable pipeline order check for SLO, s3token, authtoken, keystoneauth
 | 
						|
# according to standard s3api/Swift construction using either tempauth or
 | 
						|
# keystoneauth. If the order is incorrect, it raises an exception to stop
 | 
						|
# proxy. Turn auth_pipeline_check off only when you want to bypass these
 | 
						|
# authenticate middlewares in order to use other 3rd party (or your
 | 
						|
# proprietary) authenticate middleware.
 | 
						|
# auth_pipeline_check = True
 | 
						|
#
 | 
						|
# Enable multi-part uploads. (default: true)
 | 
						|
# This is required to store files larger than Swift's max_file_size (by
 | 
						|
# default, 5GiB). Note that has performance implications when deleting objects,
 | 
						|
# as we now have to check for whether there are also segments to delete. The
 | 
						|
# SLO middleware must be in the pipeline after s3api for this option to have
 | 
						|
# effect.
 | 
						|
# allow_multipart_uploads = True
 | 
						|
#
 | 
						|
# Set the maximum number of parts for Upload Part operation.(default: 1000)
 | 
						|
# When setting it to be larger than the default value in order to match the
 | 
						|
# specification of S3, set to be larger max_manifest_segments for slo
 | 
						|
# middleware.(specification of S3: 10000)
 | 
						|
# max_upload_part_num = 1000
 | 
						|
#
 | 
						|
# Enable returning only buckets which owner are the user who requested
 | 
						|
# GET Service operation. (default: false)
 | 
						|
# If you want to enable the above feature, set this and s3_acl to true.
 | 
						|
# That might cause significant performance degradation. So, only if your
 | 
						|
# service absolutely need this feature, set this setting to true.
 | 
						|
# If you set this to false, s3api returns all buckets.
 | 
						|
# check_bucket_owner = false
 | 
						|
#
 | 
						|
# By default, Swift reports only S3 style access log.
 | 
						|
# (e.g. PUT /bucket/object) If set force_swift_request_proxy_log
 | 
						|
# to be 'true', Swift will become to output Swift style log
 | 
						|
# (e.g. PUT /v1/account/container/object) in addition to S3 style log.
 | 
						|
# Note that they will be reported twice (i.e. s3api doesn't care about
 | 
						|
# the duplication) and Swift style log will includes also various subrequests
 | 
						|
# to achieve S3 compatibilities when force_swift_request_proxy_log is set to
 | 
						|
# 'true'
 | 
						|
# force_swift_request_proxy_log = false
 | 
						|
#
 | 
						|
# AWS S3 document says that each part must be at least 5 MB in a multipart
 | 
						|
# upload, except the last part.
 | 
						|
# min_segment_size = 5242880
 | 
						|
#
 | 
						|
# AWS allows clock skew up to 15 mins; note that older versions of swift/swift3
 | 
						|
# allowed at most 5 mins.
 | 
						|
# allowable_clock_skew = 900
 | 
						|
#
 | 
						|
# CORS preflight requests don't contain enough information for us to
 | 
						|
# identify the account that should be used for the real request, so
 | 
						|
# the allowed origins must be set cluster-wide. (default: blank; all
 | 
						|
# preflight requests will be denied)
 | 
						|
# cors_preflight_allow_origin =
 | 
						|
#
 | 
						|
# AWS will return a 503 Slow Down when clients are making too many requests,
 | 
						|
# but that can make client logs confusing if they only log/give metrics on
 | 
						|
# status ints. Turn this on to return 429 instead.
 | 
						|
# ratelimit_as_client_error = false
 | 
						|
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# log_name = s3api
 | 
						|
 | 
						|
[filter:s3token]
 | 
						|
# s3token middleware authenticates with keystone using the s3 credentials
 | 
						|
# provided in the request header. Please put s3token between s3api
 | 
						|
# and keystoneauth if you're using keystoneauth.
 | 
						|
use = egg:swift#s3token
 | 
						|
 | 
						|
# Prefix that will be prepended to the tenant to form the account
 | 
						|
reseller_prefix = AUTH_
 | 
						|
 | 
						|
# By default, s3token will reject all invalid S3-style requests. Set this to
 | 
						|
# True to delegate that decision to downstream WSGI components. This may be
 | 
						|
# useful if there are multiple auth systems in the proxy pipeline.
 | 
						|
delay_auth_decision = False
 | 
						|
 | 
						|
# Keystone server details. Note that this differs from how swift3 was
 | 
						|
# configured: in particular, the Keystone API version must be included.
 | 
						|
auth_uri = http://keystonehost:5000/v3
 | 
						|
 | 
						|
# Connect/read timeout to use when communicating with Keystone
 | 
						|
http_timeout = 10.0
 | 
						|
 | 
						|
# Number of seconds to cache the S3 secret. By setting this to a positive
 | 
						|
# number, the S3 authorization validation checks can happen locally.
 | 
						|
# secret_cache_duration = 0
 | 
						|
 | 
						|
# If S3 secret caching is enabled, Keystone auth credentials to be used to
 | 
						|
# validate S3 authorization must be provided here. The appropriate options
 | 
						|
# are the same as used in the authtoken middleware above. The values are
 | 
						|
# likely the same as used in the authtoken middleware.
 | 
						|
# Note that the Keystone auth credentials used by s3token will need to be
 | 
						|
# able to view all project credentials too.
 | 
						|
 | 
						|
# SSL-related options
 | 
						|
# insecure = False
 | 
						|
# certfile =
 | 
						|
# keyfile =
 | 
						|
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# log_name = s3token
 | 
						|
 | 
						|
# Secrets may be cached to reduce latency for the client and load on Keystone.
 | 
						|
# Set this to some number of seconds greater than zero to enable caching.
 | 
						|
# secret_cache_duration = 0
 | 
						|
 | 
						|
# Secret caching requires Keystone credentials similar to the authtoken middleware;
 | 
						|
# these credentials require access to view all project credentials.
 | 
						|
# auth_url = http://keystonehost:5000
 | 
						|
# auth_type = password
 | 
						|
# project_domain_id = default
 | 
						|
# project_name = service
 | 
						|
# user_domain_id = default
 | 
						|
# username = swift
 | 
						|
# password = password
 | 
						|
 | 
						|
[filter:healthcheck]
 | 
						|
use = egg:swift#healthcheck
 | 
						|
# An optional filesystem path, which if present, will cause the healthcheck
 | 
						|
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
 | 
						|
# This facility may be used to temporarily remove a Swift node from a load
 | 
						|
# balancer pool during maintenance or upgrade (remove the file to allow the
 | 
						|
# node back into the load balancer pool).
 | 
						|
# disable_path =
 | 
						|
 | 
						|
[filter:cache]
 | 
						|
use = egg:swift#memcache
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# set log_name = cache
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_headers = false
 | 
						|
# set log_address = /dev/log
 | 
						|
#
 | 
						|
# If not set here, the value for memcache_servers will be read from
 | 
						|
# memcache.conf (see memcache.conf-sample) or lacking that file, it will
 | 
						|
# default to the value below. You can specify multiple servers separated with
 | 
						|
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must
 | 
						|
# follow rfc3986 section-3.2.2, i.e. [::1]:11211)
 | 
						|
# memcache_servers = 127.0.0.1:11211
 | 
						|
#
 | 
						|
# Sets the maximum number of connections to each memcached server per worker
 | 
						|
# memcache_max_connections = 2
 | 
						|
#
 | 
						|
# How long without an error before a server's error count is reset. This will
 | 
						|
# also be how long before a server is reenabled after suppression is triggered.
 | 
						|
# Set to 0 to disable error-limiting.
 | 
						|
# error_suppression_interval = 60.0
 | 
						|
#
 | 
						|
# How many errors can accumulate before a server is temporarily ignored.
 | 
						|
# error_suppression_limit = 10
 | 
						|
#
 | 
						|
# (Optional) Global toggle for TLS usage when comunicating with
 | 
						|
# the caching servers.
 | 
						|
# tls_enabled =
 | 
						|
#
 | 
						|
# More options documented in memcache.conf-sample
 | 
						|
 | 
						|
[filter:ratelimit]
 | 
						|
use = egg:swift#ratelimit
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# set log_name = ratelimit
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_headers = false
 | 
						|
# set log_address = /dev/log
 | 
						|
#
 | 
						|
# clock_accuracy should represent how accurate the proxy servers' system clocks
 | 
						|
# are with each other. 1000 means that all the proxies' clock are accurate to
 | 
						|
# each other within 1 millisecond.  No ratelimit should be higher than the
 | 
						|
# clock accuracy.
 | 
						|
# clock_accuracy = 1000
 | 
						|
#
 | 
						|
# max_sleep_time_seconds = 60
 | 
						|
#
 | 
						|
# log_sleep_time_seconds of 0 means disabled
 | 
						|
# log_sleep_time_seconds = 0
 | 
						|
#
 | 
						|
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
 | 
						|
# rate_buffer_seconds = 5
 | 
						|
#
 | 
						|
# account_ratelimit of 0 means disabled
 | 
						|
# account_ratelimit = 0
 | 
						|
 | 
						|
# DEPRECATED- these will continue to work but will be replaced
 | 
						|
# by the X-Account-Sysmeta-Global-Write-Ratelimit flag.
 | 
						|
# Please see ratelimiting docs for details.
 | 
						|
# these are comma separated lists of account names
 | 
						|
# account_whitelist = a,b
 | 
						|
# account_blacklist = c,d
 | 
						|
 | 
						|
# with container_limit_x = r
 | 
						|
# for containers of size x limit write requests per second to r.  The container
 | 
						|
# rate will be linearly interpolated from the values given. With the values
 | 
						|
# below, a container of size 5 will get a rate of 75.
 | 
						|
# container_ratelimit_0 = 100
 | 
						|
# container_ratelimit_10 = 50
 | 
						|
# container_ratelimit_50 = 20
 | 
						|
 | 
						|
# Similarly to the above container-level write limits, the following will limit
 | 
						|
# container GET (listing) requests.
 | 
						|
# container_listing_ratelimit_0 = 100
 | 
						|
# container_listing_ratelimit_10 = 50
 | 
						|
# container_listing_ratelimit_50 = 20
 | 
						|
 | 
						|
[filter:read_only]
 | 
						|
use = egg:swift#read_only
 | 
						|
# read_only set to true means turn global read only on
 | 
						|
# read_only = false
 | 
						|
# allow_deletes set to true means to allow deletes
 | 
						|
# allow_deletes = false
 | 
						|
# Note: Put after ratelimit in the pipeline.
 | 
						|
 | 
						|
# Note: needs to be placed before listing_formats;
 | 
						|
# otherwise remapped listings will always be JSON
 | 
						|
[filter:domain_remap]
 | 
						|
use = egg:swift#domain_remap
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# set log_name = domain_remap
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_headers = false
 | 
						|
# set log_address = /dev/log
 | 
						|
#
 | 
						|
# Specify the storage_domain that match your cloud, multiple domains
 | 
						|
# can be specified separated by a comma
 | 
						|
# storage_domain = example.com
 | 
						|
 | 
						|
# Specify a root path part that will be added to the start of paths if not
 | 
						|
# already present.
 | 
						|
# path_root = v1
 | 
						|
 | 
						|
# Browsers can convert a host header to lowercase, so check that reseller
 | 
						|
# prefix on the account is the correct case. This is done by comparing the
 | 
						|
# items in the reseller_prefixes config option to the found prefix. If they
 | 
						|
# match except for case, the item from reseller_prefixes will be used
 | 
						|
# instead of the found reseller prefix. When none match, the default reseller
 | 
						|
# prefix is used. When no default reseller prefix is configured, any request
 | 
						|
# with an account prefix not in that list will be ignored by this middleware.
 | 
						|
# reseller_prefixes = AUTH
 | 
						|
# default_reseller_prefix =
 | 
						|
 | 
						|
# Enable legacy remapping behavior for versioned path requests:
 | 
						|
#   c.a.example.com/v1/o -> /v1/AUTH_a/c/o
 | 
						|
# instead of
 | 
						|
#   c.a.example.com/v1/o -> /v1/AUTH_a/c/v1/o
 | 
						|
# ... by default all path parts after a remapped domain are considered part of
 | 
						|
# the object name with no special case for the path "v1"
 | 
						|
# mangle_client_paths = False
 | 
						|
 | 
						|
[filter:catch_errors]
 | 
						|
use = egg:swift#catch_errors
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# set log_name = catch_errors
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_headers = false
 | 
						|
# set log_address = /dev/log
 | 
						|
 | 
						|
[filter:cname_lookup]
 | 
						|
# Note: this middleware requires python-dnspython
 | 
						|
use = egg:swift#cname_lookup
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# set log_name = cname_lookup
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_headers = false
 | 
						|
# set log_address = /dev/log
 | 
						|
#
 | 
						|
# Specify the storage_domain that match your cloud, multiple domains
 | 
						|
# can be specified separated by a comma
 | 
						|
# storage_domain = example.com
 | 
						|
#
 | 
						|
# lookup_depth = 1
 | 
						|
#
 | 
						|
# Specify the nameservers to use to do the CNAME resolution. If unset, the
 | 
						|
# system configuration is used. Multiple nameservers can be specified
 | 
						|
# separated by a comma. Default port 53 can be overridden. IPv6 is accepted.
 | 
						|
# Example: 127.0.0.1, 127.0.0.2, 127.0.0.3:5353, [::1], [::1]:5353
 | 
						|
# nameservers =
 | 
						|
 | 
						|
# Note: Put staticweb just after your auth filter(s) in the pipeline
 | 
						|
[filter:staticweb]
 | 
						|
use = egg:swift#staticweb
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# set log_name = staticweb
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_headers = false
 | 
						|
# set log_address = /dev/log
 | 
						|
#
 | 
						|
# At times when it's impossible for staticweb to guess the outside
 | 
						|
# endpoint correctly, the url_base may be used to supply the URL
 | 
						|
# scheme and/or the host name (and port number) in order to generate
 | 
						|
# redirects.
 | 
						|
# Example values:
 | 
						|
#    http://www.example.com    - redirect to www.example.com
 | 
						|
#    https:                    - changes the schema only
 | 
						|
#    https://                  - same, changes the schema only
 | 
						|
#    //www.example.com:8080    - redirect www.example.com on port 8080
 | 
						|
#                                (schema unchanged)
 | 
						|
# url_base =
 | 
						|
 | 
						|
# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
 | 
						|
[filter:tempurl]
 | 
						|
use = egg:swift#tempurl
 | 
						|
# The methods allowed with Temp URLs.
 | 
						|
# methods = GET HEAD PUT POST DELETE
 | 
						|
#
 | 
						|
# The headers to remove from incoming requests. Simply a whitespace delimited
 | 
						|
# list of header names and names can optionally end with '*' to indicate a
 | 
						|
# prefix match. incoming_allow_headers is a list of exceptions to these
 | 
						|
# removals.
 | 
						|
# incoming_remove_headers = x-timestamp
 | 
						|
#
 | 
						|
# The headers allowed as exceptions to incoming_remove_headers. Simply a
 | 
						|
# whitespace delimited list of header names and names can optionally end with
 | 
						|
# '*' to indicate a prefix match.
 | 
						|
# incoming_allow_headers =
 | 
						|
#
 | 
						|
# The headers to remove from outgoing responses. Simply a whitespace delimited
 | 
						|
# list of header names and names can optionally end with '*' to indicate a
 | 
						|
# prefix match. outgoing_allow_headers is a list of exceptions to these
 | 
						|
# removals.
 | 
						|
# outgoing_remove_headers = x-object-meta-*
 | 
						|
#
 | 
						|
# The headers allowed as exceptions to outgoing_remove_headers. Simply a
 | 
						|
# whitespace delimited list of header names and names can optionally end with
 | 
						|
# '*' to indicate a prefix match.
 | 
						|
# outgoing_allow_headers = x-object-meta-public-*
 | 
						|
#
 | 
						|
# The digest algorithm(s) supported for generating signatures;
 | 
						|
# whitespace-delimited.
 | 
						|
# allowed_digests = sha1 sha256 sha512
 | 
						|
 | 
						|
# Note: Put formpost just before your auth filter(s) in the pipeline
 | 
						|
[filter:formpost]
 | 
						|
use = egg:swift#formpost
 | 
						|
 | 
						|
# Note: Just needs to be placed before the proxy-server in the pipeline.
 | 
						|
[filter:name_check]
 | 
						|
use = egg:swift#name_check
 | 
						|
# forbidden_chars = '"`<>
 | 
						|
# maximum_length = 255
 | 
						|
# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
 | 
						|
 | 
						|
# Note: Etag quoter should be placed just after cache in the pipeline.
 | 
						|
[filter:etag-quoter]
 | 
						|
use = egg:swift#etag_quoter
 | 
						|
# Historically, Swift has emitted bare MD5 hex digests as ETags, which is not
 | 
						|
# RFC compliant. With this middleware in the pipeline, users can opt-in to
 | 
						|
# RFC-compliant ETags on a per-account or per-container basis.
 | 
						|
#
 | 
						|
# Set to true to enable RFC-compliant ETags cluster-wide by default. Users
 | 
						|
# can still opt-out by setting appropriate account or container metadata.
 | 
						|
# enable_by_default = false
 | 
						|
 | 
						|
[filter:list-endpoints]
 | 
						|
use = egg:swift#list_endpoints
 | 
						|
# list_endpoints_path = /endpoints/
 | 
						|
 | 
						|
[filter:proxy-logging]
 | 
						|
use = egg:swift#proxy_logging
 | 
						|
# If not set, logging directives from [DEFAULT] without "access_" will be used
 | 
						|
# access_log_name = swift
 | 
						|
# access_log_facility = LOG_LOCAL0
 | 
						|
# access_log_level = INFO
 | 
						|
# access_log_address = /dev/log
 | 
						|
#
 | 
						|
# Log route for this filter. Useful if you want to have different configs for
 | 
						|
# the two proxy-logging filters.
 | 
						|
# access_log_route = proxy-server
 | 
						|
#
 | 
						|
# If set, access_log_udp_host will override access_log_address
 | 
						|
# access_log_udp_host =
 | 
						|
# access_log_udp_port = 514
 | 
						|
#
 | 
						|
# You can use log_statsd_* from [DEFAULT] or override them here:
 | 
						|
# access_log_statsd_host =
 | 
						|
# access_log_statsd_port = 8125
 | 
						|
# access_log_statsd_default_sample_rate = 1.0
 | 
						|
# access_log_statsd_sample_rate_factor = 1.0
 | 
						|
# access_log_statsd_metric_prefix =
 | 
						|
# access_log_headers = false
 | 
						|
#
 | 
						|
# If access_log_headers is True and access_log_headers_only is set only
 | 
						|
# these headers are logged. Multiple headers can be defined as comma separated
 | 
						|
# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
 | 
						|
# access_log_headers_only =
 | 
						|
#
 | 
						|
# The default log format includes several sensitive values in logs:
 | 
						|
#   * X-Auth-Token header
 | 
						|
#   * temp_url_sig query parameter
 | 
						|
#   * Authorization header
 | 
						|
#   * X-Amz-Signature query parameter
 | 
						|
# To prevent an unauthorized access of the log file leading to an unauthorized
 | 
						|
# access of cluster data, only a portion of these values are written, with the
 | 
						|
# remainder replaced by '...' in the log. Set reveal_sensitive_prefix to the
 | 
						|
# number of characters to log.  Set to 0 to suppress the values entirely; set
 | 
						|
# to something large (1000, say) to write full values. Note that some values
 | 
						|
# may start appearing in full at values as low as 33.
 | 
						|
# reveal_sensitive_prefix = 16
 | 
						|
#
 | 
						|
# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
 | 
						|
# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
 | 
						|
# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
 | 
						|
#
 | 
						|
# Note: The double proxy-logging in the pipeline is not a mistake. The
 | 
						|
# left-most proxy-logging is there to log requests that were handled in
 | 
						|
# middleware and never made it through to the right-most middleware (and
 | 
						|
# proxy server). Double logging is prevented for normal requests. See
 | 
						|
# proxy-logging docs.
 | 
						|
#
 | 
						|
# Hashing algorithm for log anonymization. Must be one of algorithms supported
 | 
						|
# by Python's hashlib.
 | 
						|
# log_anonymization_method = MD5
 | 
						|
#
 | 
						|
# Salt added during log anonymization
 | 
						|
# log_anonymization_salt =
 | 
						|
#
 | 
						|
# Template used to format access logs. All words surrounded by curly brackets
 | 
						|
# will be substituted with the appropriate values
 | 
						|
# log_msg_template = {client_ip} {remote_addr} {end_time.datetime} {method} {path} {protocol} {status_int} {referer} {user_agent} {auth_token} {bytes_recvd} {bytes_sent} {client_etag} {transaction_id} {headers} {request_time} {source} {log_info} {start_time} {end_time} {policy_index}
 | 
						|
 | 
						|
# Note: Put before both ratelimit and auth in the pipeline.
 | 
						|
[filter:bulk]
 | 
						|
use = egg:swift#bulk
 | 
						|
# max_containers_per_extraction = 10000
 | 
						|
# max_failed_extractions = 1000
 | 
						|
# max_deletes_per_request = 10000
 | 
						|
# max_failed_deletes = 1000
 | 
						|
#
 | 
						|
# In order to keep a connection active during a potentially long bulk request,
 | 
						|
# Swift may return whitespace prepended to the actual response body. This
 | 
						|
# whitespace will be yielded no more than every yield_frequency seconds.
 | 
						|
# yield_frequency = 10
 | 
						|
#
 | 
						|
# Note: The following parameter is used during a bulk delete of objects and
 | 
						|
# their container. This would frequently fail because it is very likely
 | 
						|
# that all replicated objects have not been deleted by the time the middleware got a
 | 
						|
# successful response. It can be configured the number of retries. And the
 | 
						|
# number of seconds to wait between each retry will be 1.5**retry
 | 
						|
# delete_container_retry_count = 0
 | 
						|
#
 | 
						|
# To speed up the bulk delete process, multiple deletes may be executed in
 | 
						|
# parallel. Avoid setting this too high, as it gives clients a force multiplier
 | 
						|
# which may be used in DoS attacks. The suggested range is between 2 and 10.
 | 
						|
# delete_concurrency = 2
 | 
						|
 | 
						|
# Note: Put after auth and staticweb in the pipeline.
 | 
						|
[filter:slo]
 | 
						|
use = egg:swift#slo
 | 
						|
# max_manifest_segments = 1000
 | 
						|
# max_manifest_size = 8388608
 | 
						|
#
 | 
						|
# Rate limiting applies only to segments smaller than this size (bytes).
 | 
						|
# rate_limit_under_size = 1048576
 | 
						|
#
 | 
						|
# Start rate-limiting SLO segment serving after the Nth small segment of a
 | 
						|
# segmented object.
 | 
						|
# rate_limit_after_segment = 10
 | 
						|
#
 | 
						|
# Once segment rate-limiting kicks in for an object, limit segments served
 | 
						|
# to N per second. 0 means no rate-limiting.
 | 
						|
# rate_limit_segments_per_sec = 1
 | 
						|
#
 | 
						|
# Time limit on GET requests (seconds)
 | 
						|
# max_get_time = 86400
 | 
						|
#
 | 
						|
# When creating an SLO, multiple segment validations may be executed in
 | 
						|
# parallel. Further, multiple deletes may be executed in parallel when deleting
 | 
						|
# with ?multipart-manifest=delete. Use this setting to limit how many
 | 
						|
# subrequests may be executed concurrently. Avoid setting it too high, as it
 | 
						|
# gives clients a force multiplier which may be used in DoS attacks. The
 | 
						|
# suggested range is between 2 and 10.
 | 
						|
# concurrency = 2
 | 
						|
#
 | 
						|
# This may be used to separately tune validation and delete concurrency values.
 | 
						|
# Default is to use the concurrency value from above; all of the same caveats
 | 
						|
# apply regarding recommended ranges.
 | 
						|
# delete_concurrency = 2
 | 
						|
#
 | 
						|
# In order to keep a connection active during a potentially long PUT request,
 | 
						|
# clients may request that Swift send whitespace ahead of the final response
 | 
						|
# body. This whitespace will be yielded at most every yield_frequency seconds.
 | 
						|
# yield_frequency = 10
 | 
						|
#
 | 
						|
# Since SLOs may have thousands of segments, clients may request that the
 | 
						|
# object-expirer handle the deletion of segments using query params like
 | 
						|
# `?multipart-manifest=delete&async=on`. You may want to keep this off if it
 | 
						|
# negatively impacts your expirers; in that case, the deletes will still
 | 
						|
# be done as part of the client request.
 | 
						|
# allow_async_delete = false
 | 
						|
 | 
						|
# Note: Put after auth and staticweb in the pipeline.
 | 
						|
# If you don't put it in the pipeline, it will be inserted for you.
 | 
						|
[filter:dlo]
 | 
						|
use = egg:swift#dlo
 | 
						|
# Start rate-limiting DLO segment serving after the Nth segment of a
 | 
						|
# segmented object.
 | 
						|
# rate_limit_after_segment = 10
 | 
						|
#
 | 
						|
# Once segment rate-limiting kicks in for an object, limit segments served
 | 
						|
# to N per second. 0 means no rate-limiting.
 | 
						|
# rate_limit_segments_per_sec = 1
 | 
						|
#
 | 
						|
# Time limit on GET requests (seconds)
 | 
						|
# max_get_time = 86400
 | 
						|
 | 
						|
# Note: Put after auth in the pipeline.
 | 
						|
[filter:container-quotas]
 | 
						|
use = egg:swift#container_quotas
 | 
						|
 | 
						|
# Note: Put after auth in the pipeline.
 | 
						|
[filter:account-quotas]
 | 
						|
use = egg:swift#account_quotas
 | 
						|
 | 
						|
[filter:gatekeeper]
 | 
						|
use = egg:swift#gatekeeper
 | 
						|
# Set this to false if you want to allow clients to set arbitrary X-Timestamps
 | 
						|
# on uploaded objects. This may be used to preserve timestamps when migrating
 | 
						|
# from a previous storage system, but risks allowing users to upload
 | 
						|
# difficult-to-delete data.
 | 
						|
# shunt_inbound_x_timestamp = true
 | 
						|
#
 | 
						|
# Set this to true if you want to allow clients to access and manipulate the
 | 
						|
# (normally internal-to-swift) null namespace by including a header like
 | 
						|
#    X-Allow-Reserved-Names: true
 | 
						|
# allow_reserved_names_header = false
 | 
						|
#
 | 
						|
# You can override the default log routing for this filter here:
 | 
						|
# set log_name = gatekeeper
 | 
						|
# set log_facility = LOG_LOCAL0
 | 
						|
# set log_level = INFO
 | 
						|
# set log_headers = false
 | 
						|
# set log_address = /dev/log
 | 
						|
 | 
						|
[filter:container_sync]
 | 
						|
use = egg:swift#container_sync
 | 
						|
# Set this to false if you want to disallow any full URL values to be set for
 | 
						|
# any new X-Container-Sync-To headers. This will keep any new full URLs from
 | 
						|
# coming in, but won't change any existing values already in the cluster.
 | 
						|
# Updating those will have to be done manually, as knowing what the true realm
 | 
						|
# endpoint should be cannot always be guessed.
 | 
						|
# allow_full_urls = true
 | 
						|
# Set this to specify this clusters //realm/cluster as "current" in /info
 | 
						|
# current = //REALM/CLUSTER
 | 
						|
 | 
						|
# Note: Put it at the beginning of the pipeline to profile all middleware. But
 | 
						|
# it is safer to put this after catch_errors, gatekeeper and healthcheck.
 | 
						|
[filter:xprofile]
 | 
						|
use = egg:swift#xprofile
 | 
						|
# This option enable you to switch profilers which should inherit from python
 | 
						|
# standard profiler. Currently the supported value can be 'cProfile',
 | 
						|
# 'eventlet.green.profile' etc.
 | 
						|
# profile_module = eventlet.green.profile
 | 
						|
#
 | 
						|
# This prefix will be used to combine process ID and timestamp to name the
 | 
						|
# profile data file.  Make sure the executing user has permission to write
 | 
						|
# into this path (missing path segments will be created, if necessary).
 | 
						|
# If you enable profiling in more than one type of daemon, you must override
 | 
						|
# it with an unique value like: /var/log/swift/profile/proxy.profile
 | 
						|
# log_filename_prefix = /tmp/log/swift/profile/default.profile
 | 
						|
#
 | 
						|
# the profile data will be dumped to local disk based on above naming rule
 | 
						|
# in this interval.
 | 
						|
# dump_interval = 5.0
 | 
						|
#
 | 
						|
# Be careful, this option will enable profiler to dump data into the file with
 | 
						|
# time stamp which means there will be lots of files piled up in the directory.
 | 
						|
# dump_timestamp = false
 | 
						|
#
 | 
						|
# This is the path of the URL to access the mini web UI.
 | 
						|
# path = /__profile__
 | 
						|
#
 | 
						|
# Clear the data when the wsgi server shutdown.
 | 
						|
# flush_at_shutdown = false
 | 
						|
#
 | 
						|
# unwind the iterator of applications
 | 
						|
# unwind = false
 | 
						|
 | 
						|
# Note: Put after slo, dlo in the pipeline.
 | 
						|
# If you don't put it in the pipeline, it will be inserted automatically.
 | 
						|
[filter:versioned_writes]
 | 
						|
use = egg:swift#versioned_writes
 | 
						|
# Enables using versioned writes middleware and exposing configuration
 | 
						|
# settings via HTTP GET /info.
 | 
						|
# WARNING: Setting this option bypasses the "allow_versions" option
 | 
						|
# in the container configuration file, which will be eventually
 | 
						|
# deprecated. See documentation for more details.
 | 
						|
# allow_versioned_writes = false
 | 
						|
# Enables Swift object-versioning API
 | 
						|
# allow_object_versioning = false
 | 
						|
 | 
						|
# Note: Put after auth and before dlo and slo middlewares.
 | 
						|
# If you don't put it in the pipeline, it will be inserted for you.
 | 
						|
[filter:copy]
 | 
						|
use = egg:swift#copy
 | 
						|
 | 
						|
# Note: To enable encryption, add the following 2 dependent pieces of crypto
 | 
						|
# middleware to the proxy-server pipeline. They should be to the right of all
 | 
						|
# other middleware apart from the final proxy-logging middleware, and in the
 | 
						|
# order shown in this example:
 | 
						|
# <other middleware> keymaster encryption proxy-logging proxy-server
 | 
						|
[filter:keymaster]
 | 
						|
use = egg:swift#keymaster
 | 
						|
 | 
						|
# Over time, the format of crypto metadata on disk may change slightly to resolve
 | 
						|
# ambiguities. In general, you want to be writing the newest version, but to
 | 
						|
# ensure that all writes can still be read during rolling upgrades, there's the
 | 
						|
# option to write older formats as well.
 | 
						|
# Before upgrading from Swift 2.20.0 or Swift 2.19.1 or earlier, ensure this is set to 1
 | 
						|
# Before upgrading from Swift 2.25.0 or earlier, ensure this is set to at most 2
 | 
						|
# After upgrading all proxy servers, set this to 3 (currently the highest version)
 | 
						|
#
 | 
						|
# The default is currently 2 to support upgrades with no configuration changes,
 | 
						|
# but may change to 3 in the future.
 | 
						|
meta_version_to_write = 2
 | 
						|
 | 
						|
# Sets the root secret from which encryption keys are derived. This must be set
 | 
						|
# before first use to a value that is a base64 encoding of at least 32 bytes.
 | 
						|
# The security of all encrypted data critically depends on this key, therefore
 | 
						|
# it should be set to a high-entropy value. For example, a suitable value may
 | 
						|
# be obtained by base-64 encoding a 32 byte (or longer) value generated by a
 | 
						|
# cryptographically secure random number generator. Changing the root secret is
 | 
						|
# likely to result in data loss.
 | 
						|
encryption_root_secret = changeme
 | 
						|
 | 
						|
# Multiple root secrets may be configured using options named
 | 
						|
# 'encryption_root_secret_<secret_id>' where 'secret_id' is a unique
 | 
						|
# identifier. This enables the root secret to be changed from time to time.
 | 
						|
# Only one root secret is used for object PUTs or POSTs at any moment in time.
 | 
						|
# This is specified by the 'active_root_secret_id' option. If
 | 
						|
# 'active_root_secret_id' is not specified then the root secret specified by
 | 
						|
# 'encryption_root_secret' is considered to be the default. Once a root secret
 | 
						|
# has been used as the default root secret it must remain in the config file in
 | 
						|
# order that any objects that were encrypted with it may be subsequently
 | 
						|
# decrypted. The secret_id used to identify the key cannot change.
 | 
						|
# encryption_root_secret_myid = changeme
 | 
						|
# active_root_secret_id = myid
 | 
						|
 | 
						|
# Sets the path from which the keymaster config options should be read. This
 | 
						|
# allows multiple processes which need to be encryption-aware (for example,
 | 
						|
# proxy-server and container-sync) to share the same config file, ensuring
 | 
						|
# that the encryption keys used are the same. The format expected is similar
 | 
						|
# to other config files, with a single [keymaster] section and a single
 | 
						|
# encryption_root_secret option. If this option is set, the root secret
 | 
						|
# MUST NOT be set in proxy-server.conf.
 | 
						|
# keymaster_config_path =
 | 
						|
 | 
						|
# To store the encryption root secret in a remote key management system (KMS)
 | 
						|
# such as Barbican, replace the keymaster middleware with the kms_keymaster
 | 
						|
# middleware in the proxy-server pipeline. They should be to the right of all
 | 
						|
# other middleware apart from the final proxy-logging middleware, and in the
 | 
						|
# order shown in this example:
 | 
						|
# <other middleware> kms_keymaster encryption proxy-logging proxy-server
 | 
						|
[filter:kms_keymaster]
 | 
						|
use = egg:swift#kms_keymaster
 | 
						|
 | 
						|
# Sets the path from which the keymaster config options should be read. This
 | 
						|
# allows multiple processes which need to be encryption-aware (for example,
 | 
						|
# proxy-server and container-sync) to share the same config file, ensuring
 | 
						|
# that the encryption keys used are the same. The format expected is similar
 | 
						|
# to other config files, with a single [kms_keymaster] section. See the
 | 
						|
# keymaster.conf-sample file for details on the kms_keymaster configuration
 | 
						|
# options.
 | 
						|
# keymaster_config_path =
 | 
						|
 | 
						|
# kmip_keymaster middleware may be used to fetch an encryption root secret from
 | 
						|
# a KMIP service. It should replace, in the same position, any other keymaster
 | 
						|
# middleware in the proxy-server pipeline, so that the middleware order is as
 | 
						|
# shown in this example:
 | 
						|
# <other middleware> kmip_keymaster encryption proxy-logging proxy-server
 | 
						|
[filter:kmip_keymaster]
 | 
						|
use = egg:swift#kmip_keymaster
 | 
						|
 | 
						|
# Sets the path from which the keymaster config options should be read. This
 | 
						|
# allows multiple processes which need to be encryption-aware (for example,
 | 
						|
# proxy-server and container-sync) to share the same config file, ensuring
 | 
						|
# that the encryption keys used are the same. As an added benefit the
 | 
						|
# keymaster configuration file can have different permissions than the
 | 
						|
# `proxy-server.conf` file. The format expected is similar
 | 
						|
# to other config files, with a single [kmip_keymaster] section. See the
 | 
						|
# keymaster.conf-sample file for details on the kmip_keymaster configuration
 | 
						|
# options.
 | 
						|
# keymaster_config_path =
 | 
						|
 | 
						|
[filter:encryption]
 | 
						|
use = egg:swift#encryption
 | 
						|
 | 
						|
# By default all PUT or POST'ed object data and/or metadata will be encrypted.
 | 
						|
# Encryption of new data and/or metadata may be disabled by setting
 | 
						|
# disable_encryption to True. However, all encryption middleware should remain
 | 
						|
# in the pipeline in order for existing encrypted data to be read.
 | 
						|
# disable_encryption = False
 | 
						|
 | 
						|
# listing_formats should be just right of the first proxy-logging middleware,
 | 
						|
# and left of most other middlewares. If it is not already present, it will
 | 
						|
# be automatically inserted for you.
 | 
						|
[filter:listing_formats]
 | 
						|
use = egg:swift#listing_formats
 | 
						|
 | 
						|
# Note: Put after slo, dlo, versioned_writes, but before encryption in the
 | 
						|
# pipeline.
 | 
						|
[filter:symlink]
 | 
						|
use = egg:swift#symlink
 | 
						|
# Symlinks can point to other symlinks provided the number of symlinks in a
 | 
						|
# chain does not exceed the symloop_max value. If the number of chained
 | 
						|
# symlinks exceeds the limit symloop_max a 409 (HTTPConflict) error
 | 
						|
# response will be produced.
 | 
						|
# symloop_max = 2
 |