Added my inital work on ansible-swift.
I have tried to put all the files in where I think they will go: swift_setup.yml -> etc/rpc_deploy/ roles swift_{common,account,container,object} -> rpc_deployment/roles swift_intentory -> scripts/swift_inventory.py To generate a hosts file run: python swift_inventory.py -s etc/rpc_deploy/swift_setup.yml -d NOTE: -d is dry-run so prints to screen, it will actually place it whereever you specify it in the swift_setup.yml file.
This commit is contained in:
parent
541e648088
commit
31ce4e14a0
100
etc/rpc_deploy/swift_setup.yml
Normal file
100
etc/rpc_deploy/swift_setup.yml
Normal file
@ -0,0 +1,100 @@
|
||||
---
|
||||
# This swift setup file, is used as a simple way of managing your swift
|
||||
# ring configuration. The swift_inventory binary will generate the ansible
|
||||
# iventory file for you based of this file. Giving you one place to manage your
|
||||
# cluster.
|
||||
#
|
||||
# NOTE: The swift ansible configuration has other variables that can be tweeked,
|
||||
# see group_vars/ and roles/*/vars/ for other areas to tweek.
|
||||
local:
|
||||
ansible_connection=local
|
||||
|
||||
# Swift settings are global through out the cluster.
|
||||
swift:
|
||||
part_power: 8
|
||||
output_directory: /tmp/swift
|
||||
output_filename: hosts
|
||||
user: swift
|
||||
hash_path_suffix: changeme
|
||||
hash_path_prefix: changeme
|
||||
syslog_host: 10.1.1.1:514
|
||||
|
||||
proxy:
|
||||
memcache_servers:
|
||||
- 127.0.0.1:11211
|
||||
authtoken:
|
||||
active: true
|
||||
delay_auth_decision: true
|
||||
auth_version: v2.0
|
||||
auth_host: keystone.local.lan
|
||||
auth_port: 35357
|
||||
auth_protocol: https
|
||||
auth_uri: http://keystonehost:5000/
|
||||
admin_tenant_name: service
|
||||
admin_user: swift
|
||||
admin_password: ADMIN
|
||||
hosts:
|
||||
- host: 10.0.0.1
|
||||
|
||||
account:
|
||||
repl_number: 3
|
||||
hosts:
|
||||
- host: 10.0.0.2
|
||||
drive: /srv/disk
|
||||
region: 0
|
||||
zone: 0
|
||||
weight: 100
|
||||
|
||||
container:
|
||||
repl_number: 3
|
||||
hosts:
|
||||
- host: 10.0.0.3
|
||||
drive: /srv/disk
|
||||
region: 0
|
||||
zone: 0
|
||||
weight: 100
|
||||
|
||||
storage_policies:
|
||||
default: gold
|
||||
policies:
|
||||
- name: gold
|
||||
index: 0
|
||||
type: replication
|
||||
repl_number: 3
|
||||
hosts:
|
||||
- host: 10.0.0.4
|
||||
drive: /srv/disk
|
||||
region: 0
|
||||
zone: 0
|
||||
weight: 100
|
||||
- host: 10.0.0.5
|
||||
drive: /srv/disk
|
||||
region: 0
|
||||
zone: 1
|
||||
weight: 100
|
||||
- host: 10.0.0.6
|
||||
drive: /srv/disk
|
||||
region: 1
|
||||
zone: 0
|
||||
weight: 50
|
||||
- host: 10.0.0.7
|
||||
drive: /srv/disk
|
||||
region: 1
|
||||
zone: 1
|
||||
weight: 50
|
||||
- name: silver
|
||||
index: 1
|
||||
type: replication
|
||||
repl_number: 2
|
||||
depricated: True
|
||||
hosts:
|
||||
- host: 10.0.0.4
|
||||
drive: /srv/disk
|
||||
region: 0
|
||||
zone: 0
|
||||
weight: 100
|
||||
- host: 10.0.0.5
|
||||
drive: /srv/disk
|
||||
region: 0
|
||||
zone: 1
|
||||
weight: 100
|
12
rpc_deployment/roles/swift_account/handlers/main.yml
Normal file
12
rpc_deployment/roles/swift_account/handlers/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: (re)start account server
|
||||
command: swift-init account-server restart
|
||||
|
||||
- name: (re)start account auditor
|
||||
command: swift-init account-auditor restart
|
||||
|
||||
- name: (re)start account replicator
|
||||
command: swift-init account-replicator restart
|
||||
|
||||
- name: (re)start account reaper
|
||||
command: swift-init account-reaper restart
|
20
rpc_deployment/roles/swift_account/tasks/main.yml
Normal file
20
rpc_deployment/roles/swift_account/tasks/main.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: "swift account server configuration"
|
||||
template: src=account-server.conf.j2 dest=/etc/swfit/account-server.conf.j2 owner={{ swift }} mode=0644
|
||||
notfiy:
|
||||
- (re)start account server
|
||||
- (re)start account auditor
|
||||
- (re)start account replicator
|
||||
- (re)start account reaper
|
||||
|
||||
- name: "Set account server to start at boot"
|
||||
cron: special_time=reboot job="swift-init account-server start"
|
||||
|
||||
- name: "Set account auditor to start at boot"
|
||||
cron: special_time=reboot job="swift-init account-auditor start"
|
||||
|
||||
- name: "Set account replicator to start at boot"
|
||||
cron: special_time=reboot job="swift-init account-replicator start"
|
||||
|
||||
- name: "Set account reaper to start at boot"
|
||||
cron: special_time=reboot job="swift-init account-reaper start"
|
@ -0,0 +1,194 @@
|
||||
[DEFAULT]
|
||||
bind_ip = {{ inventory_hostname }}
|
||||
bind_port = 6002
|
||||
# bind_timeout = 30
|
||||
# backlog = 4096
|
||||
user = {{ user }}
|
||||
# swift_dir = /etc/swift
|
||||
devices = {{ drive }}
|
||||
# mount_check = true
|
||||
# disable_fallocate = false
|
||||
#
|
||||
# Use an integer to override the number of pre-forked processes that will
|
||||
# accept connections.
|
||||
# workers = auto
|
||||
#
|
||||
# Maximum concurrent requests per worker
|
||||
# max_clients = 1024
|
||||
#
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
# The following caps the length of log lines to the value given; no limit if
|
||||
# set to 0, the default.
|
||||
# log_max_line_length = 0
|
||||
#
|
||||
# comma separated list of functions to call to setup custom log handlers.
|
||||
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
# adapted_logger
|
||||
# log_custom_handlers =
|
||||
#
|
||||
# If set, log_udp_host will override log_address
|
||||
# log_udp_host =
|
||||
# log_udp_port = 514
|
||||
#
|
||||
# You can enable StatsD logging here:
|
||||
# log_statsd_host = localhost
|
||||
# log_statsd_port = 8125
|
||||
# log_statsd_default_sample_rate = 1.0
|
||||
# log_statsd_sample_rate_factor = 1.0
|
||||
# log_statsd_metric_prefix =
|
||||
#
|
||||
# If you don't mind the extra disk space usage in overhead, you can turn this
|
||||
# on to preallocate disk space with SQLite databases to decrease fragmentation.
|
||||
# db_preallocation = off
|
||||
#
|
||||
# eventlet_debug = false
|
||||
#
|
||||
# You can set fallocate_reserve to the number of bytes you'd like fallocate to
|
||||
# reserve, whether there is space for the given file size or not.
|
||||
# fallocate_reserve = 0
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck recon account-server
|
||||
|
||||
[app:account-server]
|
||||
use = egg:swift#account
|
||||
log_facility = LOG_LOCAL1
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = account-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_requests = true
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# auto_create_account_prefix = .
|
||||
#
|
||||
# Configure parameter for creating specific server
|
||||
# To handle all verbs, including replication verbs, do not specify
|
||||
# "replication_server" (this is the default). To only handle replication,
|
||||
# set to a True value (e.g. "True" or "1"). To handle only non-replication
|
||||
# verbs, set to "False". Unless you have a separate replication network, you
|
||||
# should not specify any value for "replication_server".
|
||||
# replication_server = false
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
# An optional filesystem path, which if present, will cause the healthcheck
|
||||
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
|
||||
# disable_path =
|
||||
|
||||
[filter:recon]
|
||||
use = egg:swift#recon
|
||||
log_facility = LOG_LOCAL2
|
||||
recon_cache_path = /var/cache/swift
|
||||
recon_lock_path = /var/lock/swift
|
||||
|
||||
[account-replicator]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = account-replicator
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# vm_test_mode = no
|
||||
per_diff = 10000
|
||||
# max_diffs = 100
|
||||
# concurrency = 8
|
||||
# interval = 30
|
||||
#
|
||||
# How long without an error before a node's error count is reset. This will
|
||||
# also be how long before a node is reenabled after suppression is triggered.
|
||||
# error_suppression_interval = 60
|
||||
#
|
||||
# How many errors can accumulate before a node is temporarily ignored.
|
||||
# error_suppression_limit = 10
|
||||
#
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
#
|
||||
# The replicator also performs reclamation
|
||||
# reclaim_age = 604800
|
||||
#
|
||||
# Time in seconds to wait between replication passes
|
||||
# Note: if the parameter 'interval' is defined then it will be used in place
|
||||
# of run_pause.
|
||||
# run_pause = 30
|
||||
#
|
||||
# recon_cache_path = /var/cache/swift
|
||||
|
||||
[account-auditor]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = account-auditor
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# Will audit each account at most once per interval
|
||||
# interval = 1800
|
||||
#
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# accounts_per_second = 200
|
||||
# recon_cache_path = /var/cache/swift
|
||||
|
||||
[account-reaper]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = account-reaper
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# concurrency = 25
|
||||
# interval = 3600
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
#
|
||||
# Normally, the reaper begins deleting account information for deleted accounts
|
||||
# immediately; you can set this to delay its work however. The value is in
|
||||
# seconds; 2592000 = 30 days for example.
|
||||
delay_reaping = 604800
|
||||
#
|
||||
# If the account fails to be be reaped due to a persistent error, the
|
||||
# account reaper will log a message such as:
|
||||
# Account <name> has not been reaped since <date>
|
||||
# You can search logs for this message if space is not being reclaimed
|
||||
# after you delete account(s).
|
||||
# Default is 2592000 seconds (30 days). This is in addition to any time
|
||||
# requested by delay_reaping.
|
||||
# reap_warn_after = 2592000
|
||||
|
||||
# Note: Put it at the beginning of the pipleline to profile all middleware. But
|
||||
# it is safer to put this after healthcheck.
|
||||
[filter:xprofile]
|
||||
use = egg:swift#xprofile
|
||||
# This option enable you to switch profilers which should inherit from python
|
||||
# standard profiler. Currently the supported value can be 'cProfile',
|
||||
# 'eventlet.green.profile' etc.
|
||||
# profile_module = eventlet.green.profile
|
||||
#
|
||||
# This prefix will be used to combine process ID and timestamp to name the
|
||||
# profile data file. Make sure the executing user has permission to write
|
||||
# into this path (missing path segments will be created, if necessary).
|
||||
# If you enable profiling in more than one type of daemon, you must override
|
||||
# it with an unique value like: /var/log/swift/profile/account.profile
|
||||
# log_filename_prefix = /tmp/log/swift/profile/default.profile
|
||||
#
|
||||
# the profile data will be dumped to local disk based on above naming rule
|
||||
# in this interval.
|
||||
# dump_interval = 5.0
|
||||
#
|
||||
# Be careful, this option will enable profiler to dump data into the file with
|
||||
# time stamp which means there will be lots of files piled up in the directory.
|
||||
# dump_timestamp = false
|
||||
#
|
||||
# This is the path of the URL to access the mini web UI.
|
||||
# path = /__profile__
|
||||
#
|
||||
# Clear the data when the wsgi server shutdown.
|
||||
# flush_at_shutdown = false
|
||||
#
|
||||
# unwind the iterator of applications
|
||||
# unwind = false
|
1
rpc_deployment/roles/swift_account/vars/main.yml
Normal file
1
rpc_deployment/roles/swift_account/vars/main.yml
Normal file
@ -0,0 +1 @@
|
||||
---
|
6
rpc_deployment/roles/swift_common/handlers/main.yml
Normal file
6
rpc_deployment/roles/swift_common/handlers/main.yml
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
- name: "Install swift dependencies"
|
||||
pip: requirements=/opt/swift/swift/requirements.txt
|
||||
|
||||
- name: "Install swift"
|
||||
shell: chdir=/opt/swift/swift python setup.py install
|
4
rpc_deployment/roles/swift_common/tasks/debian.yml
Normal file
4
rpc_deployment/roles/swift_common/tasks/debian.yml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
- name: "Install Debian/Ubuntu common swift packages"
|
||||
apt: pkg={{ item }} state=installed update_cache=yes
|
||||
with_items: ${swift_common_packages.debian}
|
34
rpc_deployment/roles/swift_common/tasks/main.yml
Normal file
34
rpc_deployment/roles/swift_common/tasks/main.yml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: "Setup hosts file"
|
||||
lineinfile: dest=/etc/hosts regexp='.*{{ item }}$' line="{{ hostvars[item].ansible_default_ipv4.address }} {{item}}" state=present
|
||||
when: hostvars[item].ansible_default_ipv4.address is defined
|
||||
with_items: groups['all']
|
||||
|
||||
# If target is a debian/ubuntu system
|
||||
- include: debian.yml
|
||||
when: ansible_os_family == 'Debian'
|
||||
|
||||
# If target is a centos/redhat system
|
||||
- include: redhat.yml
|
||||
when: ansible_os_family == 'RedHat'
|
||||
|
||||
- name: "Create a swift user"
|
||||
group: name={{ user }} state=present system=yes
|
||||
user: name={{ user }} state=present shell=/bin/false home=/opt/swift system=yes
|
||||
|
||||
- name: "Checkout git repo"
|
||||
git: repo={{ swift_repo }} version={{ git_tag }} dest=/opt/swift/swift update=yes
|
||||
notifiy:
|
||||
- Install swift dependencies
|
||||
- Install swift
|
||||
|
||||
- name: "Swift directory"
|
||||
file: path=/etc/swift owner={{ user }} group={{ user }} state=directory
|
||||
|
||||
- name: "Swift lock directory"
|
||||
file: path=/var/lock/swift owner={{ user }} group={{ user }} state=directory
|
||||
|
||||
- name: "Swift log directory"
|
||||
file: path=/var/log/swift owner={{ user }} group=adm state=directory
|
||||
|
||||
|
5
rpc_deployment/roles/swift_common/tasks/redhat.yml
Normal file
5
rpc_deployment/roles/swift_common/tasks/redhat.yml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
|
||||
- name: "install Redhat/CentOS common swift packages"
|
||||
yum: pkg={{ item }} state=installed
|
||||
with_items: ${swift_common_packages.redhat}
|
97
rpc_deployment/roles/swift_common/templates/swift.conf.j2
Normal file
97
rpc_deployment/roles/swift_common/templates/swift.conf.j2
Normal file
@ -0,0 +1,97 @@
|
||||
[swift-hash]
|
||||
|
||||
# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
|
||||
# the hashing algorithm when determining data placement in the cluster.
|
||||
# These values should remain secret and MUST NOT change
|
||||
# once a cluster has been deployed.
|
||||
|
||||
swift_hash_path_suffix = {{ hash_path_suffix }}
|
||||
swift_hash_path_prefix = {{ hash_path_prefix }}
|
||||
|
||||
# Storage Policies
|
||||
{% for group in groups %}
|
||||
{% if group.startswith("storagepolicy") %}
|
||||
{% vars = hostvars[groups[group].pop] %}
|
||||
[storage-policy:{{ vars["index"] }}]
|
||||
name = {{ vars["policy_name"] }}
|
||||
{% if "depricated" in vars %}
|
||||
depricated = {{ var["depricated"] }}
|
||||
{% endif %}
|
||||
{% if "default" in vars %}
|
||||
default = {{ var["default"] }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
[swift-constraints]
|
||||
|
||||
# max_file_size is the largest "normal" object that can be saved in
|
||||
# the cluster. This is also the limit on the size of each segment of
|
||||
# a "large" object when using the large object manifest support.
|
||||
# This value is set in bytes. Setting it to lower than 1MiB will cause
|
||||
# some tests to fail. It is STRONGLY recommended to leave this value at
|
||||
# the default (5 * 2**30 + 2).
|
||||
|
||||
#max_file_size = 5368709122
|
||||
|
||||
|
||||
# max_meta_name_length is the max number of bytes in the utf8 encoding
|
||||
# of the name portion of a metadata header.
|
||||
|
||||
#max_meta_name_length = 128
|
||||
|
||||
|
||||
# max_meta_value_length is the max number of bytes in the utf8 encoding
|
||||
# of a metadata value
|
||||
|
||||
#max_meta_value_length = 256
|
||||
|
||||
|
||||
# max_meta_count is the max number of metadata keys that can be stored
|
||||
# on a single account, container, or object
|
||||
|
||||
#max_meta_count = 90
|
||||
|
||||
|
||||
# max_meta_overall_size is the max number of bytes in the utf8 encoding
|
||||
# of the metadata (keys + values)
|
||||
|
||||
#max_meta_overall_size = 4096
|
||||
|
||||
# max_header_size is the max number of bytes in the utf8 encoding of each
|
||||
# header. Using 8192 as default because eventlet use 8192 as max size of
|
||||
# header line. This value may need to be increased when using identity
|
||||
# v3 API tokens including more than 7 catalog entries.
|
||||
# See also include_service_catalog in proxy-server.conf-sample
|
||||
# (documented in overview_auth.rst)
|
||||
|
||||
#max_header_size = 8192
|
||||
|
||||
|
||||
# max_object_name_length is the max number of bytes in the utf8 encoding
|
||||
# of an object name
|
||||
|
||||
#max_object_name_length = 1024
|
||||
|
||||
|
||||
# container_listing_limit is the default (and max) number of items
|
||||
# returned for a container listing request
|
||||
|
||||
#container_listing_limit = 10000
|
||||
|
||||
|
||||
# account_listing_limit is the default (and max) number of items returned
|
||||
# for an account listing request
|
||||
#account_listing_limit = 10000
|
||||
|
||||
|
||||
# max_account_name_length is the max number of bytes in the utf8 encoding
|
||||
# of an account name
|
||||
|
||||
#max_account_name_length = 256
|
||||
|
||||
|
||||
# max_container_name_length is the max number of bytes in the utf8 encoding
|
||||
# of a container name
|
||||
|
||||
#max_container_name_length = 256
|
22
rpc_deployment/roles/swift_common/vars/main.yml
Normal file
22
rpc_deployment/roles/swift_common/vars/main.yml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
swift_repo: https://github.com/openstack/swift.git
|
||||
swift_repo_tag: master
|
||||
|
||||
swift_common_packages:
|
||||
debian:
|
||||
- curl
|
||||
- python-pip
|
||||
- rsync
|
||||
- openssh-server
|
||||
- git-core
|
||||
- python-setuptools
|
||||
- python-dev
|
||||
- gcc
|
||||
|
||||
redhat:
|
||||
- curl
|
||||
- gcc
|
||||
- rsync
|
||||
- git-core
|
||||
- python-setuptools
|
||||
- python-pip
|
12
rpc_deployment/roles/swift_container/handlers/main.yml
Normal file
12
rpc_deployment/roles/swift_container/handlers/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: (re)start container server
|
||||
command: swift-init container-server restart
|
||||
|
||||
- name: (re)start container auditor
|
||||
command: swift-init container-auditor restart
|
||||
|
||||
- name: (re)start container replicator
|
||||
command: swift-init container-replicator restart
|
||||
|
||||
- name: (re)start container updater
|
||||
command: swift-init container-updater restart
|
20
rpc_deployment/roles/swift_container/tasks/main.yml
Normal file
20
rpc_deployment/roles/swift_container/tasks/main.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: "swift container server configuration"
|
||||
template: src=container-server.conf.j2 dest=/etc/swfit/container-server.conf.j2 owner={{ swift }} mode=0644
|
||||
notfiy:
|
||||
- (re)start container server
|
||||
- (re)start container auditor
|
||||
- (re)start container replicator
|
||||
- (re)start container updater
|
||||
|
||||
- name: "Set container server to start at boot"
|
||||
cron: special_time=reboot job="swift-init container-server start"
|
||||
|
||||
- name: "Set container auditor to start at boot"
|
||||
cron: special_time=reboot job="swift-init container-auditor start"
|
||||
|
||||
- name: "Set container replicator to start at boot"
|
||||
cron: special_time=reboot job="swift-init container-replicator start"
|
||||
|
||||
- name: "Set container updater to start at boot"
|
||||
cron: special_time=reboot job="swift-init container-updater start"
|
@ -0,0 +1,205 @@
|
||||
[DEFAULT]
|
||||
bind_ip = {{ inventory_hostname }}
|
||||
bind_port = 6001
|
||||
# bind_timeout = 30
|
||||
# backlog = 4096
|
||||
user = {{ user }}
|
||||
# swift_dir = /etc/swift
|
||||
devices = {{ drive }}
|
||||
# mount_check = true
|
||||
# disable_fallocate = false
|
||||
#
|
||||
# Use an integer to override the number of pre-forked processes that will
|
||||
# accept connections.
|
||||
# workers = auto
|
||||
#
|
||||
# Maximum concurrent requests per worker
|
||||
# max_clients = 1024
|
||||
#
|
||||
# This is a comma separated list of hosts allowed in the X-Container-Sync-To
|
||||
# field for containers. This is the old-style of using container sync. It is
|
||||
# strongly recommended to use the new style of a separate
|
||||
# container-sync-realms.conf -- see container-sync-realms.conf-sample
|
||||
# allowed_sync_hosts = 127.0.0.1
|
||||
#
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
# The following caps the length of log lines to the value given; no limit if
|
||||
# set to 0, the default.
|
||||
# log_max_line_length = 0
|
||||
#
|
||||
# comma separated list of functions to call to setup custom log handlers.
|
||||
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
# adapted_logger
|
||||
# log_custom_handlers =
|
||||
#
|
||||
# If set, log_udp_host will override log_address
|
||||
# log_udp_host =
|
||||
# log_udp_port = 514
|
||||
#
|
||||
# You can enable StatsD logging here:
|
||||
# log_statsd_host = localhost
|
||||
# log_statsd_port = 8125
|
||||
# log_statsd_default_sample_rate = 1.0
|
||||
# log_statsd_sample_rate_factor = 1.0
|
||||
# log_statsd_metric_prefix =
|
||||
#
|
||||
# If you don't mind the extra disk space usage in overhead, you can turn this
|
||||
# on to preallocate disk space with SQLite databases to decrease fragmentation.
|
||||
# db_preallocation = off
|
||||
#
|
||||
# eventlet_debug = false
|
||||
#
|
||||
# You can set fallocate_reserve to the number of bytes you'd like fallocate to
|
||||
# reserve, whether there is space for the given file size or not.
|
||||
# fallocate_reserve = 0
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck recon container-server
|
||||
|
||||
[app:container-server]
|
||||
use = egg:swift#container
|
||||
log_facility = LOG_LOCAL1
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = container-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_requests = true
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# node_timeout = 3
|
||||
# conn_timeout = 0.5
|
||||
# allow_versions = false
|
||||
# auto_create_account_prefix = .
|
||||
#
|
||||
# Configure parameter for creating specific server
|
||||
# To handle all verbs, including replication verbs, do not specify
|
||||
# "replication_server" (this is the default). To only handle replication,
|
||||
# set to a True value (e.g. "True" or "1"). To handle only non-replication
|
||||
# verbs, set to "False". Unless you have a separate replication network, you
|
||||
# should not specify any value for "replication_server".
|
||||
# replication_server = false
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
# An optional filesystem path, which if present, will cause the healthcheck
|
||||
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
|
||||
# disable_path =
|
||||
|
||||
[filter:recon]
|
||||
use = egg:swift#recon
|
||||
log_facility = LOG_LOCAL2
|
||||
recon_cache_path = /var/cache/swift
|
||||
recon_lock_path = /var/lock/swift
|
||||
|
||||
[container-replicator]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = container-replicator
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# vm_test_mode = no
|
||||
# per_diff = 1000
|
||||
# max_diffs = 100
|
||||
# concurrency = 8
|
||||
# interval = 30
|
||||
# node_timeout = 10
|
||||
# conn_timeout = 0.5
|
||||
#
|
||||
# The replicator also performs reclamation
|
||||
# reclaim_age = 604800
|
||||
#
|
||||
# Time in seconds to wait between replication passes
|
||||
# Note: if the parameter 'interval' is defined then it will be used in place
|
||||
# of run_pause.
|
||||
# run_pause = 30
|
||||
#
|
||||
# recon_cache_path = /var/cache/swift
|
||||
|
||||
[container-updater]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = container-updater
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# interval = 300
|
||||
# concurrency = 4
|
||||
node_timeout = 15
|
||||
conn_timeout = 5
|
||||
#
|
||||
# slowdown will sleep that amount between containers
|
||||
# slowdown = 0.01
|
||||
#
|
||||
# Seconds to suppress updating an account that has generated an error
|
||||
# account_suppression_time = 60
|
||||
#
|
||||
# recon_cache_path = /var/cache/swift
|
||||
|
||||
[container-auditor]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = container-auditor
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# Will audit each container at most once per interval
|
||||
# interval = 1800
|
||||
#
|
||||
# containers_per_second = 200
|
||||
# recon_cache_path = /var/cache/swift
|
||||
|
||||
[container-sync]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = container-sync
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
|
||||
# You can also set this to a comma separated list of HTTP Proxies and they will
|
||||
# be randomly used (simple load balancing).
|
||||
# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888
|
||||
#
|
||||
# Will sync each container at most once per interval
|
||||
# interval = 300
|
||||
#
|
||||
# Maximum amount of time to spend syncing each container per pass
|
||||
# container_time = 60
|
||||
|
||||
# Note: Put it at the beginning of the pipleline to profile all middleware. But
|
||||
# it is safer to put this after healthcheck.
|
||||
[filter:xprofile]
|
||||
use = egg:swift#xprofile
|
||||
# This option enable you to switch profilers which should inherit from python
|
||||
# standard profiler. Currently the supported value can be 'cProfile',
|
||||
# 'eventlet.green.profile' etc.
|
||||
# profile_module = eventlet.green.profile
|
||||
#
|
||||
# This prefix will be used to combine process ID and timestamp to name the
|
||||
# profile data file. Make sure the executing user has permission to write
|
||||
# into this path (missing path segments will be created, if necessary).
|
||||
# If you enable profiling in more than one type of daemon, you must override
|
||||
# it with an unique value like: /var/log/swift/profile/container.profile
|
||||
# log_filename_prefix = /tmp/log/swift/profile/default.profile
|
||||
#
|
||||
# the profile data will be dumped to local disk based on above naming rule
|
||||
# in this interval.
|
||||
# dump_interval = 5.0
|
||||
#
|
||||
# Be careful, this option will enable profiler to dump data into the file with
|
||||
# time stamp which means there will be lots of files piled up in the directory.
|
||||
# dump_timestamp = false
|
||||
#
|
||||
# This is the path of the URL to access the mini web UI.
|
||||
# path = /__profile__
|
||||
#
|
||||
# Clear the data when the wsgi server shutdown.
|
||||
# flush_at_shutdown = false
|
||||
#
|
||||
# unwind the iterator of applications
|
||||
# unwind = false
|
12
rpc_deployment/roles/swift_object/handlers/main.yml
Normal file
12
rpc_deployment/roles/swift_object/handlers/main.yml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
- name: (re)start object server
|
||||
command: swift-init object-server restart
|
||||
|
||||
- name: (re)start object auditor
|
||||
command: swift-init object-auditor restart
|
||||
|
||||
- name: (re)start object replicator
|
||||
command: swift-init object-replicator restart
|
||||
|
||||
- name: (re)start object updater
|
||||
command: swift-init object-updater restart
|
20
rpc_deployment/roles/swift_object/tasks/main.yml
Normal file
20
rpc_deployment/roles/swift_object/tasks/main.yml
Normal file
@ -0,0 +1,20 @@
|
||||
---
|
||||
- name: "swift object server configuration"
|
||||
template: src=object-server.conf.j2 dest=/etc/swfit/object-server.conf.j2 owner={{ swift }} mode=0644
|
||||
notfiy:
|
||||
- (re)start object server
|
||||
- (re)start object auditor
|
||||
- (re)start object replicator
|
||||
- (re)start object updater
|
||||
|
||||
- name: "Set object server to start at boot"
|
||||
cron: special_time=reboot job="swift-init object-server start"
|
||||
|
||||
- name: "Set object auditor to start at boot"
|
||||
cron: special_time=reboot job="swift-init object-auditor start"
|
||||
|
||||
- name: "Set object replicator to start at boot"
|
||||
cron: special_time=reboot job="swift-init object-replicator start"
|
||||
|
||||
- name: "Set object updater to start at boot"
|
||||
cron: special_time=reboot job="swift-init object-updater start"
|
@ -0,0 +1,281 @@
|
||||
[DEFAULT]
|
||||
bind_ip = {{ inverntory_host }}
|
||||
bind_port = 6000
|
||||
# bind_timeout = 30
|
||||
# backlog = 4096
|
||||
user = {{ user }}
|
||||
swift_dir = /etc/swift
|
||||
devices = {{ drive }}
|
||||
# mount_check = true
|
||||
# disable_fallocate = false
|
||||
# expiring_objects_container_divisor = 86400
|
||||
# expiring_objects_account_name = expiring_objects
|
||||
#
|
||||
# Use an integer to override the number of pre-forked processes that will
|
||||
# accept connections.
|
||||
# workers = auto
|
||||
#
|
||||
# Maximum concurrent requests per worker
|
||||
# max_clients = 1024
|
||||
#
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
# The following caps the length of log lines to the value given; no limit if
|
||||
# set to 0, the default.
|
||||
# log_max_line_length = 0
|
||||
#
|
||||
# comma separated list of functions to call to setup custom log handlers.
|
||||
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
# adapted_logger
|
||||
# log_custom_handlers =
|
||||
#
|
||||
# If set, log_udp_host will override log_address
|
||||
# log_udp_host =
|
||||
# log_udp_port = 514
|
||||
#
|
||||
# You can enable StatsD logging here:
|
||||
# log_statsd_host = localhost
|
||||
# log_statsd_port = 8125
|
||||
# log_statsd_default_sample_rate = 1.0
|
||||
# log_statsd_sample_rate_factor = 1.0
|
||||
# log_statsd_metric_prefix =
|
||||
#
|
||||
# eventlet_debug = false
|
||||
#
|
||||
# You can set fallocate_reserve to the number of bytes you'd like fallocate to
|
||||
# reserve, whether there is space for the given file size or not.
|
||||
# fallocate_reserve = 0
|
||||
#
|
||||
# Time to wait while attempting to connect to another backend node.
|
||||
# conn_timeout = 0.5
|
||||
# Time to wait while sending each chunk of data to another backend node.
|
||||
# node_timeout = 3
|
||||
# Time to wait while receiving each chunk of data from a client or another
|
||||
# backend node.
|
||||
# client_timeout = 60
|
||||
#
|
||||
# network_chunk_size = 65536
|
||||
# disk_chunk_size = 65536
|
||||
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck recon object-server
|
||||
|
||||
[app:object-server]
|
||||
use = egg:swift#object
|
||||
log_facility = LOG_LOCAL1
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = object-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_requests = true
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# max_upload_time = 86400
|
||||
# slow = 0
|
||||
#
|
||||
# Objects smaller than this are not evicted from the buffercache once read
|
||||
# keep_cache_size = 5424880
|
||||
#
|
||||
# If true, objects for authenticated GET requests may be kept in buffer cache
|
||||
# if small enough
|
||||
# keep_cache_private = false
|
||||
#
|
||||
# on PUTs, sync data every n MB
|
||||
# mb_per_sync = 512
|
||||
mb_per_sync = 64
|
||||
#
|
||||
# Comma separated list of headers that can be set in metadata on an object.
|
||||
# This list is in addition to X-Object-Meta-* headers and cannot include
|
||||
# Content-Type, etag, Content-Length, or deleted
|
||||
# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object
|
||||
#
|
||||
# auto_create_account_prefix = .
|
||||
#
|
||||
# A value of 0 means "don't use thread pools". A reasonable starting point is
|
||||
# 4.
|
||||
# threads_per_disk = 0
|
||||
#
|
||||
# Configure parameter for creating specific server
|
||||
# To handle all verbs, including replication verbs, do not specify
|
||||
# "replication_server" (this is the default). To only handle replication,
|
||||
# set to a True value (e.g. "True" or "1"). To handle only non-replication
|
||||
# verbs, set to "False". Unless you have a separate replication network, you
|
||||
# should not specify any value for "replication_server".
|
||||
# replication_server = false
|
||||
#
|
||||
# Set to restrict the number of concurrent incoming REPLICATION requests
|
||||
# Set to 0 for unlimited
|
||||
# Note that REPLICATION is currently an ssync only item
|
||||
# replication_concurrency = 4
|
||||
#
|
||||
# Restricts incoming REPLICATION requests to one per device,
|
||||
# replication_currency above allowing. This can help control I/O to each
|
||||
# device, but you may wish to set this to False to allow multiple REPLICATION
|
||||
# requests (up to the above replication_concurrency setting) per device.
|
||||
# replication_one_per_device = True
|
||||
#
|
||||
# Number of seconds to wait for an existing replication device lock before
|
||||
# giving up.
|
||||
# replication_lock_timeout = 15
|
||||
#
|
||||
# These next two settings control when the REPLICATION subrequest handler will
|
||||
# abort an incoming REPLICATION attempt. An abort will occur if there are at
|
||||
# least threshold number of failures and the value of failures / successes
|
||||
# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
|
||||
# failures have to occur and there have to be more failures than successes for
|
||||
# an abort to occur.
|
||||
# replication_failure_threshold = 100
|
||||
# replication_failure_ratio = 1.0
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
# An optional filesystem path, which if present, will cause the healthcheck
|
||||
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
|
||||
# disable_path =
|
||||
|
||||
[filter:recon]
|
||||
use = egg:swift#recon
|
||||
log_facility = LOG_LOCAL2
|
||||
recon_cache_path = /var/cache/swift
|
||||
recon_lock_path = /var/lock/swift
|
||||
|
||||
[object-replicator]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = object-replicator
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# vm_test_mode = no
|
||||
# daemonize = on
|
||||
# run_pause = 30
|
||||
concurrency = 6
|
||||
# stats_interval = 300
|
||||
#
|
||||
# The sync method to use; default is rsync but you can use ssync to try the
|
||||
# EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified
|
||||
# as having performance comparable to, or better than, rsync, we plan to
|
||||
# deprecate rsync so we can move on with more features for replication.
|
||||
# sync_method = rsync
|
||||
#
|
||||
# max duration of a partition rsync
|
||||
# rsync_timeout = 900
|
||||
#
|
||||
# bandwidth limit for rsync in kB/s. 0 means unlimited
|
||||
# rsync_bwlimit = 0
|
||||
#
|
||||
# passed to rsync for io op timeout
|
||||
# rsync_io_timeout = 30
|
||||
#
|
||||
# node_timeout = <whatever's in the DEFAULT section or 10>
|
||||
# max duration of an http request; this is for REPLICATE finalization calls and
|
||||
# so should be longer than node_timeout
|
||||
# http_timeout = 60
|
||||
#
|
||||
# attempts to kill all workers if nothing replicates for lockup_timeout seconds
|
||||
# lockup_timeout = 1800
|
||||
#
|
||||
# The replicator also performs reclamation
|
||||
# reclaim_age = 604800
|
||||
#
|
||||
# ring_check_interval = 15
|
||||
# recon_cache_path = /var/cache/swift
|
||||
#
|
||||
# limits how long rsync error log lines are
|
||||
# 0 means to log the entire line
|
||||
# rsync_error_log_line_length = 0
|
||||
#
|
||||
# handoffs_first and handoff_delete are options for a special case
|
||||
# such as disk full in the cluster. These two options SHOULD NOT BE
|
||||
# CHANGED, except for such an extreme situations. (e.g. disks filled up
|
||||
# or are about to fill up. Anyway, DO NOT let your drives fill up)
|
||||
# handoffs_first is the flag to replicate handoffs prior to canonical
|
||||
# partitions. It allows to force syncing and deleting handoffs quickly.
|
||||
# If set to a True value(e.g. "True" or "1"), partitions
|
||||
# that are not supposed to be on the node will be replicated first.
|
||||
# handoffs_first = False
|
||||
#
|
||||
# handoff_delete is the number of replicas which are ensured in swift.
|
||||
# If the number less than the number of replicas is set, object-replicator
|
||||
# could delete local handoffs even if all replicas are not ensured in the
|
||||
# cluster. Object-replicator would remove local handoff partition directories
|
||||
# after syncing partition when the number of successful responses is greater
|
||||
# than or equal to this number. By default(auto), handoff partitions will be
|
||||
# removed when it has successfully replicated to all the canonical nodes.
|
||||
# handoff_delete = auto
|
||||
|
||||
[object-updater]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = object-updater
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# interval = 300
|
||||
concurrency = 3
|
||||
# node_timeout = <whatever's in the DEFAULT section or 10>
|
||||
# slowdown will sleep that amount between objects
|
||||
# slowdown = 0.01
|
||||
#
|
||||
# recon_cache_path = /var/cache/swift
|
||||
concurrency = 3
|
||||
node_timeout = 60
|
||||
conn_timeout = 5
|
||||
|
||||
[object-auditor]
|
||||
# You can override the default log routing for this app here (don't use set!):
|
||||
# log_name = object-auditor
|
||||
log_facility = LOG_LOCAL2
|
||||
# log_level = INFO
|
||||
# log_address = /dev/log
|
||||
#
|
||||
# You can set the disk chunk size that the auditor uses making it larger if
|
||||
# you like for more efficient local auditing of larger objects
|
||||
# disk_chunk_size = 65536
|
||||
# files_per_second = 20
|
||||
# concurrency = 1
|
||||
# bytes_per_second = 10000000
|
||||
# log_time = 3600
|
||||
# zero_byte_files_per_second = 50
|
||||
# recon_cache_path = /var/cache/swift
|
||||
|
||||
# Takes a comma separated list of ints. If set, the object auditor will
|
||||
# increment a counter for every object whose size is <= to the given break
|
||||
# points and report the result after a full scan.
|
||||
# object_size_stats =
|
||||
|
||||
# Note: Put it at the beginning of the pipleline to profile all middleware. But
|
||||
# it is safer to put this after healthcheck.
|
||||
[filter:xprofile]
|
||||
use = egg:swift#xprofile
|
||||
# This option enable you to switch profilers which should inherit from python
|
||||
# standard profiler. Currently the supported value can be 'cProfile',
|
||||
# 'eventlet.green.profile' etc.
|
||||
# profile_module = eventlet.green.profile
|
||||
#
|
||||
# This prefix will be used to combine process ID and timestamp to name the
|
||||
# profile data file. Make sure the executing user has permission to write
|
||||
# into this path (missing path segments will be created, if necessary).
|
||||
# If you enable profiling in more than one type of daemon, you must override
|
||||
# it with an unique value like: /var/log/swift/profile/object.profile
|
||||
# log_filename_prefix = /tmp/log/swift/profile/default.profile
|
||||
#
|
||||
# the profile data will be dumped to local disk based on above naming rule
|
||||
# in this interval.
|
||||
# dump_interval = 5.0
|
||||
#
|
||||
# Be careful, this option will enable profiler to dump data into the file with
|
||||
# time stamp which means there will be lots of files piled up in the directory.
|
||||
# dump_timestamp = false
|
||||
#
|
||||
# This is the path of the URL to access the mini web UI.
|
||||
# path = /__profile__
|
||||
#
|
||||
# Clear the data when the wsgi server shutdown.
|
||||
# flush_at_shutdown = false
|
||||
#
|
||||
# unwind the iterator of applications
|
||||
# unwind = false
|
3
rpc_deployment/roles/swift_proxy/handlers/main.yml
Normal file
3
rpc_deployment/roles/swift_proxy/handlers/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
- name: (re)start proxy server
|
||||
command: swift-init proxy-server restart
|
11
rpc_deployment/roles/swift_proxy/tasks/main.yml
Normal file
11
rpc_deployment/roles/swift_proxy/tasks/main.yml
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
- name: "install the keystone auth_token middleware"
|
||||
pip: name=keystonemiddleware
|
||||
|
||||
- name: "swift object server configuration"
|
||||
template: src=proxy-server.conf.j2 dest=/etc/swfit/proxy-server.conf.j2 owner={{ swift }} mode=0644
|
||||
notfiy:
|
||||
- (re)start proxy server
|
||||
|
||||
- name: "Set proxy server to start at boot"
|
||||
cron: special_time=reboot job="swift-init proxy-server start"
|
622
rpc_deployment/roles/swift_proxy/templates/proxy-server.conf.j2
Normal file
622
rpc_deployment/roles/swift_proxy/templates/proxy-server.conf.j2
Normal file
@ -0,0 +1,622 @@
|
||||
[DEFAULT]
|
||||
bind_ip = {{ inventory_host }}
|
||||
bind_port = 8080
|
||||
# bind_timeout = 30
|
||||
# backlog = 4096
|
||||
# swift_dir = /etc/swift
|
||||
user = {{ user }}
|
||||
|
||||
# Enables exposing configuration settings via HTTP GET /info.
|
||||
# expose_info = true
|
||||
|
||||
# Key to use for admin calls that are HMAC signed. Default is empty,
|
||||
# which will disable admin calls to /info.
|
||||
# admin_key = secret_admin_key
|
||||
#
|
||||
# Allows the ability to withhold sections from showing up in the public calls
|
||||
# to /info. You can withhold subsections by separating the dict level with a
|
||||
# ".". The following would cause the sections 'container_quotas' and 'tempurl'
|
||||
# to not be listed, and the key max_failed_deletes would be removed from
|
||||
# bulk_delete. Default is empty, allowing all registered fetures to be listed
|
||||
# via HTTP GET /info.
|
||||
# disallowed_sections = container_quotas, tempurl, bulk_delete.max_failed_deletes
|
||||
|
||||
# Use an integer to override the number of pre-forked processes that will
|
||||
# accept connections. Should default to the number of effective cpu
|
||||
# cores in the system. It's worth noting that individual workers will
|
||||
# use many eventlet co-routines to service multiple concurrent requests.
|
||||
# workers = auto
|
||||
#
|
||||
# Maximum concurrent requests per worker
|
||||
# max_clients = 1024
|
||||
#
|
||||
# Set the following two lines to enable SSL. This is for testing only.
|
||||
# cert_file = /etc/swift/proxy.crt
|
||||
# key_file = /etc/swift/proxy.key
|
||||
#
|
||||
# expiring_objects_container_divisor = 86400
|
||||
# expiring_objects_account_name = expiring_objects
|
||||
#
|
||||
# You can specify default log routing here if you want:
|
||||
# log_name = swift
|
||||
# log_facility = LOG_LOCAL0
|
||||
# log_level = INFO
|
||||
# log_headers = false
|
||||
# log_address = /dev/log
|
||||
# The following caps the length of log lines to the value given; no limit if
|
||||
# set to 0, the default.
|
||||
# log_max_line_length = 0
|
||||
#
|
||||
# This optional suffix (default is empty) that would be appended to the swift transaction
|
||||
# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
|
||||
# This is very useful when one is managing more than one swift cluster.
|
||||
# trans_id_suffix =
|
||||
#
|
||||
# comma separated list of functions to call to setup custom log handlers.
|
||||
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
|
||||
# adapted_logger
|
||||
# log_custom_handlers =
|
||||
#
|
||||
# If set, log_udp_host will override log_address
|
||||
# log_udp_host =
|
||||
# log_udp_port = 514
|
||||
#
|
||||
# You can enable StatsD logging here:
|
||||
# log_statsd_host = localhost
|
||||
# log_statsd_port = 8125
|
||||
# log_statsd_default_sample_rate = 1.0
|
||||
# log_statsd_sample_rate_factor = 1.0
|
||||
# log_statsd_metric_prefix =
|
||||
#
|
||||
# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
|
||||
# cors_allow_origin =
|
||||
# strict_cors_mode = True
|
||||
#
|
||||
# client_timeout = 60
|
||||
# eventlet_debug = false
|
||||
|
||||
[pipeline:main]
|
||||
{% if authtoken_active %}
|
||||
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo proxy-logging proxy-server
|
||||
{% else %}
|
||||
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo proxy-logging proxy-server
|
||||
{% endif %}
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
log_facility = LOG_LOCAL0
|
||||
# You can override the default log routing for this app here:
|
||||
# set log_name = proxy-server
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# log_handoffs = true
|
||||
# recheck_account_existence = 60
|
||||
# recheck_container_existence = 60
|
||||
# object_chunk_size = 65536
|
||||
# client_chunk_size = 65536
|
||||
#
|
||||
# How long the proxy server will wait on responses from the a/c/o servers.
|
||||
node_timeout = 60
|
||||
#
|
||||
# How long the proxy server will wait for an initial response and to read a
|
||||
# chunk of data from the object servers while serving GET / HEAD requests.
|
||||
# Timeouts from these requests can be recovered from so setting this to
|
||||
# something lower than node_timeout would provide quicker error recovery
|
||||
# while allowing for a longer timeout for non-recoverable requests (PUTs).
|
||||
# Defaults to node_timeout, should be overriden if node_timeout is set to a
|
||||
# high number to prevent client timeouts from firing before the proxy server
|
||||
# has a chance to retry.
|
||||
# recoverable_node_timeout = node_timeout
|
||||
#
|
||||
conn_timeout = 3.5
|
||||
#
|
||||
# How long to wait for requests to finish after a quorum has been established.
|
||||
# post_quorum_timeout = 0.5
|
||||
#
|
||||
# How long without an error before a node's error count is reset. This will
|
||||
# also be how long before a node is reenabled after suppression is triggered.
|
||||
# error_suppression_interval = 60
|
||||
#
|
||||
# How many errors can accumulate before a node is temporarily ignored.
|
||||
# error_suppression_limit = 10
|
||||
#
|
||||
# If set to 'true' any authorized user may create and delete accounts; if
|
||||
# 'false' no one, even authorized, can.
|
||||
# allow_account_management = false
|
||||
#
|
||||
# Set object_post_as_copy = false to turn on fast posts where only the metadata
|
||||
# changes are stored anew and the original data file is kept in place. This
|
||||
# makes for quicker posts; but since the container metadata isn't updated in
|
||||
# this mode, features like container sync won't be able to sync posts.
|
||||
# object_post_as_copy = true
|
||||
#
|
||||
# If set to 'true' authorized accounts that do not yet exist within the Swift
|
||||
# cluster will be automatically created.
|
||||
account_autocreate = true
|
||||
#
|
||||
# If set to a positive value, trying to create a container when the account
|
||||
# already has at least this maximum containers will result in a 403 Forbidden.
|
||||
# Note: This is a soft limit, meaning a user might exceed the cap for
|
||||
# recheck_account_existence before the 403s kick in.
|
||||
# max_containers_per_account = 0
|
||||
#
|
||||
# This is a comma separated list of account hashes that ignore the
|
||||
# max_containers_per_account cap.
|
||||
# max_containers_whitelist =
|
||||
#
|
||||
# Comma separated list of Host headers to which the proxy will deny requests.
|
||||
# deny_host_headers =
|
||||
#
|
||||
# Prefix used when automatically creating accounts.
|
||||
# auto_create_account_prefix = .
|
||||
#
|
||||
# Depth of the proxy put queue.
|
||||
# put_queue_depth = 10
|
||||
#
|
||||
# Storage nodes can be chosen at random (shuffle), by using timing
|
||||
# measurements (timing), or by using an explicit match (affinity).
|
||||
# Using timing measurements may allow for lower overall latency, while
|
||||
# using affinity allows for finer control. In both the timing and
|
||||
# affinity cases, equally-sorting nodes are still randomly chosen to
|
||||
# spread load.
|
||||
# The valid values for sorting_method are "affinity", "shuffle", and "timing".
|
||||
# sorting_method = shuffle
|
||||
#
|
||||
# If the "timing" sorting_method is used, the timings will only be valid for
|
||||
# the number of seconds configured by timing_expiry.
|
||||
# timing_expiry = 300
|
||||
#
|
||||
# The maximum time (seconds) that a large object connection is allowed to last.
|
||||
# max_large_object_get_time = 86400
|
||||
#
|
||||
# Set to the number of nodes to contact for a normal request. You can use
|
||||
# '* replicas' at the end to have it use the number given times the number of
|
||||
# replicas for the ring being used for the request.
|
||||
# request_node_count = 2 * replicas
|
||||
#
|
||||
# Which backend servers to prefer on reads. Format is r<N> for region
|
||||
# N or r<N>z<M> for region N, zone M. The value after the equals is
|
||||
# the priority; lower numbers are higher priority.
|
||||
#
|
||||
# Example: first read from region 1 zone 1, then region 1 zone 2, then
|
||||
# anything in region 2, then everything else:
|
||||
# read_affinity = r1z1=100, r1z2=200, r2=300
|
||||
# Default is empty, meaning no preference.
|
||||
# read_affinity =
|
||||
#
|
||||
# Which backend servers to prefer on writes. Format is r<N> for region
|
||||
# N or r<N>z<M> for region N, zone M. If this is set, then when
|
||||
# handling an object PUT request, some number (see setting
|
||||
# write_affinity_node_count) of local backend servers will be tried
|
||||
# before any nonlocal ones.
|
||||
#
|
||||
# Example: try to write to regions 1 and 2 before writing to any other
|
||||
# nodes:
|
||||
# write_affinity = r1, r2
|
||||
# Default is empty, meaning no preference.
|
||||
# write_affinity =
|
||||
#
|
||||
# The number of local (as governed by the write_affinity setting)
|
||||
# nodes to attempt to contact first, before any non-local ones. You
|
||||
# can use '* replicas' at the end to have it use the number given
|
||||
# times the number of replicas for the ring being used for the
|
||||
# request.
|
||||
# write_affinity_node_count = 2 * replicas
|
||||
#
|
||||
# These are the headers whose values will only be shown to swift_owners. The
|
||||
# exact definition of a swift_owner is up to the auth system in use, but
|
||||
# usually indicates administrative responsibilities.
|
||||
# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-account-access-control
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = tempauth
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = false
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# The reseller prefix will verify a token begins with this prefix before even
|
||||
# attempting to validate it. Also, with authorization, only Swift storage
|
||||
# accounts with this prefix will be authorized by this middleware. Useful if
|
||||
# multiple auth systems are in use for one Swift cluster.
|
||||
# reseller_prefix = AUTH
|
||||
#
|
||||
# The auth prefix will cause requests beginning with this prefix to be routed
|
||||
# to the auth subsystem, for granting tokens, etc.
|
||||
# auth_prefix = /auth/
|
||||
# token_life = 86400
|
||||
#
|
||||
# This allows middleware higher in the WSGI pipeline to override auth
|
||||
# processing, useful for middleware such as tempurl and formpost. If you know
|
||||
# you're not going to use such middleware and you want a bit of extra security,
|
||||
# you can set this to false.
|
||||
# allow_overrides = true
|
||||
#
|
||||
# This specifies what scheme to return with storage urls:
|
||||
# http, https, or default (chooses based on what the server is running as)
|
||||
# This can be useful with an SSL load balancer in front of a non-SSL server.
|
||||
# storage_url_scheme = default
|
||||
#
|
||||
# Lastly, you need to list all the accounts/users you want here. The format is:
|
||||
# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
|
||||
# or if you want underscores in <account> or <user>, you can base64 encode them
|
||||
# (with no equal signs) and use this format:
|
||||
# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
|
||||
# There are special groups of:
|
||||
# .reseller_admin = can do anything to any account for this auth
|
||||
# .admin = can do anything within the account
|
||||
# If neither of these groups are specified, the user can only access containers
|
||||
# that have been explicitly allowed for them by a .admin or .reseller_admin.
|
||||
# The trailing optional storage_url allows you to specify an alternate url to
|
||||
# hand back to the user upon authentication. If not specified, this defaults to
|
||||
# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
|
||||
# to what the requester would need to use to reach this host.
|
||||
# Here are example entries, required for running the tests:
|
||||
{% if not authtoken_active %}
|
||||
user_admin_admin = admin .admin .reseller_admin
|
||||
user_test_tester = testing .admin
|
||||
user_test2_tester2 = testing2 .admin
|
||||
user_test_tester3 = testing3
|
||||
{% endif %}
|
||||
|
||||
# To enable Keystone authentication you need to have the auth token
|
||||
# middleware first to be configured. Here is an example below, please
|
||||
# refer to the keystone's documentation for details about the
|
||||
# different settings.
|
||||
#
|
||||
# You'll need to have as well the keystoneauth middleware enabled
|
||||
# and have it in your main pipeline so instead of having tempauth in
|
||||
# there you can change it to: authtoken keystoneauth
|
||||
#
|
||||
{% if authtoken_active %}
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
auth_uri = {{ auth_uri }}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
delay_auth_decision = {{ delay_auth_decision }}
|
||||
# cache = swift.cache
|
||||
# include_service_catalog = False
|
||||
#
|
||||
[filter:keystoneauth]
|
||||
use = egg:swift#keystoneauth
|
||||
# Operator roles is the role which user would be allowed to manage a
|
||||
# tenant and be able to create container or give ACL to others.
|
||||
operator_roles = admin, swiftoperator
|
||||
# The reseller admin role has the ability to create and delete accounts
|
||||
reseller_admin_role = reseller_admin
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
# An optional filesystem path, which if present, will cause the healthcheck
|
||||
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
|
||||
# This facility may be used to temporarily remove a Swift node from a load
|
||||
# balancer pool during maintenance or upgrade (remove the file to allow the
|
||||
# node back into the load balancer pool).
|
||||
# disable_path =
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = cache
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = false
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# If not set here, the value for memcache_servers will be read from
|
||||
# memcache.conf (see memcache.conf-sample) or lacking that file, it will
|
||||
# default to the value below. You can specify multiple servers separated with
|
||||
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
|
||||
memcache_servers = {{ memcache_servers }}
|
||||
#
|
||||
# Sets how memcache values are serialized and deserialized:
|
||||
# 0 = older, insecure pickle serialization
|
||||
# 1 = json serialization but pickles can still be read (still insecure)
|
||||
# 2 = json serialization only (secure and the default)
|
||||
# If not set here, the value for memcache_serialization_support will be read
|
||||
# from /etc/swift/memcache.conf (see memcache.conf-sample).
|
||||
# To avoid an instant full cache flush, existing installations should
|
||||
# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
|
||||
# set to 2 and reload.
|
||||
# In the future, the ability to use pickle serialization will be removed.
|
||||
memcache_serialization_support = 2
|
||||
#
|
||||
# Sets the maximum number of connections to each memcached server per worker
|
||||
# memcache_max_connections = 2
|
||||
|
||||
[filter:ratelimit]
|
||||
use = egg:swift#ratelimit
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = ratelimit
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = false
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# clock_accuracy should represent how accurate the proxy servers' system clocks
|
||||
# are with each other. 1000 means that all the proxies' clock are accurate to
|
||||
# each other within 1 millisecond. No ratelimit should be higher than the
|
||||
# clock accuracy.
|
||||
# clock_accuracy = 1000
|
||||
#
|
||||
# max_sleep_time_seconds = 60
|
||||
#
|
||||
# log_sleep_time_seconds of 0 means disabled
|
||||
# log_sleep_time_seconds = 0
|
||||
#
|
||||
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
|
||||
# rate_buffer_seconds = 5
|
||||
#
|
||||
# account_ratelimit of 0 means disabled
|
||||
# account_ratelimit = 0
|
||||
|
||||
# these are comma separated lists of account names
|
||||
# account_whitelist = a,b
|
||||
# account_blacklist = c,d
|
||||
|
||||
# with container_limit_x = r
|
||||
# for containers of size x limit write requests per second to r. The container
|
||||
# rate will be linearly interpolated from the values given. With the values
|
||||
# below, a container of size 5 will get a rate of 75.
|
||||
# container_ratelimit_0 = 100
|
||||
# container_ratelimit_10 = 50
|
||||
# container_ratelimit_50 = 20
|
||||
|
||||
# Similarly to the above container-level write limits, the following will limit
|
||||
# container GET (listing) requests.
|
||||
# container_listing_ratelimit_0 = 100
|
||||
# container_listing_ratelimit_10 = 50
|
||||
# container_listing_ratelimit_50 = 20
|
||||
|
||||
[filter:domain_remap]
|
||||
use = egg:swift#domain_remap
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = domain_remap
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = false
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# storage_domain = example.com
|
||||
# path_root = v1
|
||||
# reseller_prefixes = AUTH
|
||||
|
||||
[filter:catch_errors]
|
||||
use = egg:swift#catch_errors
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = catch_errors
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = false
|
||||
# set log_address = /dev/log
|
||||
|
||||
[filter:cname_lookup]
|
||||
# Note: this middleware requires python-dnspython
|
||||
use = egg:swift#cname_lookup
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = cname_lookup
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = false
|
||||
# set log_address = /dev/log
|
||||
#
|
||||
# Specify the storage_domain that match your cloud, multiple domains
|
||||
# can be specified separated by a comma
|
||||
# storage_domain = example.com
|
||||
#
|
||||
# lookup_depth = 1
|
||||
|
||||
# Note: Put staticweb just after your auth filter(s) in the pipeline
|
||||
[filter:staticweb]
|
||||
use = egg:swift#staticweb
|
||||
|
||||
# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
|
||||
[filter:tempurl]
|
||||
use = egg:swift#tempurl
|
||||
# The methods allowed with Temp URLs.
|
||||
# methods = GET HEAD PUT POST DELETE
|
||||
#
|
||||
# The headers to remove from incoming requests. Simply a whitespace delimited
|
||||
# list of header names and names can optionally end with '*' to indicate a
|
||||
# prefix match. incoming_allow_headers is a list of exceptions to these
|
||||
# removals.
|
||||
# incoming_remove_headers = x-timestamp
|
||||
#
|
||||
# The headers allowed as exceptions to incoming_remove_headers. Simply a
|
||||
# whitespace delimited list of header names and names can optionally end with
|
||||
# '*' to indicate a prefix match.
|
||||
# incoming_allow_headers =
|
||||
#
|
||||
# The headers to remove from outgoing responses. Simply a whitespace delimited
|
||||
# list of header names and names can optionally end with '*' to indicate a
|
||||
# prefix match. outgoing_allow_headers is a list of exceptions to these
|
||||
# removals.
|
||||
# outgoing_remove_headers = x-object-meta-*
|
||||
#
|
||||
# The headers allowed as exceptions to outgoing_remove_headers. Simply a
|
||||
# whitespace delimited list of header names and names can optionally end with
|
||||
# '*' to indicate a prefix match.
|
||||
# outgoing_allow_headers = x-object-meta-public-*
|
||||
|
||||
# Note: Put formpost just before your auth filter(s) in the pipeline
|
||||
[filter:formpost]
|
||||
use = egg:swift#formpost
|
||||
|
||||
# Note: Just needs to be placed before the proxy-server in the pipeline.
|
||||
[filter:name_check]
|
||||
use = egg:swift#name_check
|
||||
# forbidden_chars = '"`<>
|
||||
# maximum_length = 255
|
||||
# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
|
||||
|
||||
[filter:list-endpoints]
|
||||
use = egg:swift#list_endpoints
|
||||
# list_endpoints_path = /endpoints/
|
||||
|
||||
[filter:proxy-logging]
|
||||
use = egg:swift#proxy_logging
|
||||
# If not set, logging directives from [DEFAULT] without "access_" will be used
|
||||
# access_log_name = swift
|
||||
# access_log_facility = LOG_LOCAL0
|
||||
# access_log_level = INFO
|
||||
# access_log_address = /dev/log
|
||||
#
|
||||
# If set, access_log_udp_host will override access_log_address
|
||||
# access_log_udp_host =
|
||||
# access_log_udp_port = 514
|
||||
#
|
||||
# You can use log_statsd_* from [DEFAULT] or override them here:
|
||||
# access_log_statsd_host = localhost
|
||||
# access_log_statsd_port = 8125
|
||||
# access_log_statsd_default_sample_rate = 1.0
|
||||
# access_log_statsd_sample_rate_factor = 1.0
|
||||
# access_log_statsd_metric_prefix =
|
||||
# access_log_headers = false
|
||||
#
|
||||
# If access_log_headers is True and access_log_headers_only is set only
|
||||
# these headers are logged. Multiple headers can be defined as comma separated
|
||||
# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
|
||||
# access_log_headers_only =
|
||||
#
|
||||
# By default, the X-Auth-Token is logged. To obscure the value,
|
||||
# set reveal_sensitive_prefix to the number of characters to log.
|
||||
# For example, if set to 12, only the first 12 characters of the
|
||||
# token appear in the log. An unauthorized access of the log file
|
||||
# won't allow unauthorized usage of the token. However, the first
|
||||
# 12 or so characters is unique enough that you can trace/debug
|
||||
# token usage. Set to 0 to suppress the token completely (replaced
|
||||
# by '...' in the log).
|
||||
# Note: reveal_sensitive_prefix will not affect the value
|
||||
# logged with access_log_headers=True.
|
||||
# reveal_sensitive_prefix = 16
|
||||
#
|
||||
# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
|
||||
# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
|
||||
# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
|
||||
#
|
||||
# Note: The double proxy-logging in the pipeline is not a mistake. The
|
||||
# left-most proxy-logging is there to log requests that were handled in
|
||||
# middleware and never made it through to the right-most middleware (and
|
||||
# proxy server). Double logging is prevented for normal requests. See
|
||||
# proxy-logging docs.
|
||||
|
||||
# Note: Put before both ratelimit and auth in the pipeline.
|
||||
[filter:bulk]
|
||||
use = egg:swift#bulk
|
||||
# max_containers_per_extraction = 10000
|
||||
# max_failed_extractions = 1000
|
||||
# max_deletes_per_request = 10000
|
||||
# max_failed_deletes = 1000
|
||||
|
||||
# In order to keep a connection active during a potentially long bulk request,
|
||||
# Swift may return whitespace prepended to the actual response body. This
|
||||
# whitespace will be yielded no more than every yield_frequency seconds.
|
||||
# yield_frequency = 10
|
||||
|
||||
# Note: The following parameter is used during a bulk delete of objects and
|
||||
# their container. This would frequently fail because it is very likely
|
||||
# that all replicated objects have not been deleted by the time the middleware got a
|
||||
# successful response. It can be configured the number of retries. And the
|
||||
# number of seconds to wait between each retry will be 1.5**retry
|
||||
|
||||
# delete_container_retry_count = 0
|
||||
|
||||
# Note: Put after auth in the pipeline.
|
||||
[filter:container-quotas]
|
||||
use = egg:swift#container_quotas
|
||||
|
||||
# Note: Put after auth and staticweb in the pipeline.
|
||||
[filter:slo]
|
||||
use = egg:swift#slo
|
||||
# max_manifest_segments = 1000
|
||||
# max_manifest_size = 2097152
|
||||
# min_segment_size = 1048576
|
||||
# Start rate-limiting SLO segment serving after the Nth segment of a
|
||||
# segmented object.
|
||||
# rate_limit_after_segment = 10
|
||||
#
|
||||
# Once segment rate-limiting kicks in for an object, limit segments served
|
||||
# to N per second. 0 means no rate-limiting.
|
||||
# rate_limit_segments_per_sec = 0
|
||||
#
|
||||
# Time limit on GET requests (seconds)
|
||||
# max_get_time = 86400
|
||||
|
||||
# Note: Put after auth and staticweb in the pipeline.
|
||||
# If you don't put it in the pipeline, it will be inserted for you.
|
||||
[filter:dlo]
|
||||
use = egg:swift#dlo
|
||||
# Start rate-limiting DLO segment serving after the Nth segment of a
|
||||
# segmented object.
|
||||
# rate_limit_after_segment = 10
|
||||
#
|
||||
# Once segment rate-limiting kicks in for an object, limit segments served
|
||||
# to N per second. 0 means no rate-limiting.
|
||||
# rate_limit_segments_per_sec = 1
|
||||
#
|
||||
# Time limit on GET requests (seconds)
|
||||
# max_get_time = 86400
|
||||
|
||||
[filter:account-quotas]
|
||||
use = egg:swift#account_quotas
|
||||
|
||||
[filter:gatekeeper]
|
||||
use = egg:swift#gatekeeper
|
||||
# You can override the default log routing for this filter here:
|
||||
# set log_name = gatekeeper
|
||||
# set log_facility = LOG_LOCAL0
|
||||
# set log_level = INFO
|
||||
# set log_headers = false
|
||||
# set log_address = /dev/log
|
||||
|
||||
[filter:container_sync]
|
||||
use = egg:swift#container_sync
|
||||
# Set this to false if you want to disallow any full url values to be set for
|
||||
# any new X-Container-Sync-To headers. This will keep any new full urls from
|
||||
# coming in, but won't change any existing values already in the cluster.
|
||||
# Updating those will have to be done manually, as knowing what the true realm
|
||||
# endpoint should be cannot always be guessed.
|
||||
# allow_full_urls = true
|
||||
# Set this to specify this clusters //realm/cluster as "current" in /info
|
||||
# current = //REALM/CLUSTER
|
||||
|
||||
# Note: Put it at the beginning of the pipleline to profile all middleware. But
|
||||
# it is safer to put this after catch_errors, gatekeeper and healthcheck.
|
||||
[filter:xprofile]
|
||||
use = egg:swift#xprofile
|
||||
# This option enable you to switch profilers which should inherit from python
|
||||
# standard profiler. Currently the supported value can be 'cProfile',
|
||||
# 'eventlet.green.profile' etc.
|
||||
# profile_module = eventlet.green.profile
|
||||
#
|
||||
# This prefix will be used to combine process ID and timestamp to name the
|
||||
# profile data file. Make sure the executing user has permission to write
|
||||
# into this path (missing path segments will be created, if necessary).
|
||||
# If you enable profiling in more than one type of daemon, you must override
|
||||
# it with an unique value like: /var/log/swift/profile/proxy.profile
|
||||
# log_filename_prefix = /tmp/log/swift/profile/default.profile
|
||||
#
|
||||
# the profile data will be dumped to local disk based on above naming rule
|
||||
# in this interval.
|
||||
# dump_interval = 5.0
|
||||
#
|
||||
# Be careful, this option will enable profiler to dump data into the file with
|
||||
# time stamp which means there will be lots of files piled up in the directory.
|
||||
# dump_timestamp = false
|
||||
#
|
||||
# This is the path of the URL to access the mini web UI.
|
||||
# path = /__profile__
|
||||
#
|
||||
# Clear the data when the wsgi server shutdown.
|
||||
# flush_at_shutdown = false
|
||||
#
|
||||
# unwind the iterator of applications
|
||||
# unwind = false
|
309
scripts/swift_inventory.py
Normal file
309
scripts/swift_inventory.py
Normal file
@ -0,0 +1,309 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import print_function
|
||||
|
||||
import datetime
|
||||
import sys
|
||||
import yaml
|
||||
|
||||
from optparse import OptionParser
|
||||
from os.path import exists, isdir, join
|
||||
|
||||
VERSION = '0.1'
|
||||
USAGE = "usage: %prog [options] -s <swift setup yaml>"
|
||||
|
||||
DEFAULT_PART_POWER = 8
|
||||
DEFAULT_REPL_NUM = 3
|
||||
DEFAULT_REGION = 0
|
||||
DEFAULT_ZONE = 0
|
||||
DEFAULT_WEIGHT = 100
|
||||
DEFAULT_DRIVE = "/srv/disk"
|
||||
DEFAULT_OUTPUT_DIR = "/etc/ansible"
|
||||
DEFAULT_OUTPUT_FILENAME = "hosts"
|
||||
|
||||
RETURN_NOT_DEFINED = 3
|
||||
|
||||
# FILE formatted strings
|
||||
HEADER = """# This file was generated using the %s version %s at %s
|
||||
[local]
|
||||
localhost ansible_connection=local
|
||||
|
||||
[proxy]"""
|
||||
|
||||
CATCH_ALL_GROUPS = """
|
||||
[object:children]
|
||||
storagepolocy
|
||||
|
||||
[swift:children]
|
||||
proxy
|
||||
account
|
||||
container
|
||||
object
|
||||
|
||||
[swift:vars]"""
|
||||
|
||||
DRIVE_FORMAT = "%(host)s drive=%(drive)s region=%(region)s zone=%(zone)s "
|
||||
DRIVE_FORMAT += "weight=%(weight)s"
|
||||
|
||||
DEFAULT_AUTHTOKEN_SETTINGS = {
|
||||
'auth_version': 'v2.0',
|
||||
'auth_host': 'keystone',
|
||||
'auth_port': '35357',
|
||||
'auth_protocol': 'https',
|
||||
'admin_tenant_name': 'service',
|
||||
'admin_user': 'swift',
|
||||
'admin_password': 'ADMIN',
|
||||
}
|
||||
|
||||
|
||||
def main(setup, verbose=False, dry_run=False, overwrite=True):
|
||||
# Parse the setup file, which should be yaml
|
||||
_swift = {}
|
||||
_drives = {}
|
||||
try:
|
||||
with open(setup) as yaml_stream:
|
||||
_swift = yaml.load(yaml_stream)
|
||||
except Exception as err:
|
||||
print("ERROR: Failed to yaml failure: %s", err)
|
||||
return 2
|
||||
|
||||
def _section_defined(section):
|
||||
if section not in _swift:
|
||||
print("ERROR: no swift section defined")
|
||||
return False
|
||||
return True
|
||||
|
||||
def _get_output_fd(filename):
|
||||
if dry_run:
|
||||
return None
|
||||
elif not overwrite and exists(filename):
|
||||
i = 1
|
||||
while exists("%s_%d" % (filename, i)):
|
||||
i += 1
|
||||
return open("%s_%d" % (filename, i), 'w')
|
||||
else:
|
||||
return open(filename, 'w')
|
||||
|
||||
def _write_to_file(fd, data):
|
||||
if not fd or verbose:
|
||||
print(data)
|
||||
|
||||
if fd:
|
||||
if not data.endswith('\n'):
|
||||
data += "\n"
|
||||
fd.write(data)
|
||||
fd.flush()
|
||||
|
||||
def _get_drive(drive):
|
||||
_drive = {
|
||||
'drive': DEFAULT_DRIVE,
|
||||
'region': DEFAULT_REGION,
|
||||
'zone': DEFAULT_ZONE,
|
||||
'weight': DEFAULT_WEIGHT}
|
||||
|
||||
if "drive" not in drive:
|
||||
drive["drive"] = DEFAULT_DRIVE
|
||||
|
||||
key = "%(host)s%(drive)s" % drive
|
||||
if key in _drives:
|
||||
return _drives[key]
|
||||
else:
|
||||
_drive.update(drive)
|
||||
data = DRIVE_FORMAT % _drive
|
||||
_drives[key] = data
|
||||
return data
|
||||
|
||||
# First attempt to get swift settings
|
||||
if not _section_defined("swift"):
|
||||
return RETURN_NOT_DEFINED
|
||||
|
||||
swift_options = [
|
||||
"part_power=%s" % (_swift['swift'].get('part_power',
|
||||
DEFAULT_PART_POWER)),
|
||||
"user=%s" % (_swift['swift'].get('user', 'swift')),
|
||||
"swift_hash_path_suffix=%s" % (_swift['swift'].get("hash_path_suffix")),
|
||||
"swift_hash_path_prefix=%s" % (_swift['swift'].get("hash_path_prefix")),
|
||||
"syslog_host=%s" % (_swift['swift'].get('syslog_host',
|
||||
'localhost:514')),
|
||||
]
|
||||
output_path = _swift['swift'].get("output_directory", DEFAULT_OUTPUT_DIR)
|
||||
output_file = _swift['swift'].get("output_filename",
|
||||
DEFAULT_OUTPUT_FILENAME)
|
||||
if not isdir(output_path):
|
||||
print("Outdir path '%s' doesn't exist", output_path)
|
||||
return 4
|
||||
|
||||
output_file = join(output_path, output_file)
|
||||
output_fd = _get_output_fd(output_file)
|
||||
|
||||
n = datetime.datetime.now()
|
||||
_write_to_file(output_fd, HEADER % (__file__, VERSION, n.ctime()))
|
||||
|
||||
if not _section_defined("proxy"):
|
||||
return RETURN_NOT_DEFINED
|
||||
|
||||
# Parse proxies
|
||||
# TODO: Add read anfinity and pipeline here?
|
||||
for proxy in _swift["proxy"]["hosts"]:
|
||||
_write_to_file(output_fd, "%s" % (proxy["host"]))
|
||||
_write_to_file(output_fd, "\n[proxy:vars]")
|
||||
_mc_servers = _swift["proxy"].get('memcache_servers')
|
||||
memcache_servers = ",".join(_mc_servers) if _mc_servers else \
|
||||
'127.0.0.1:11211'
|
||||
_write_to_file(output_fd, "memcache_servers=%s" % (memcache_servers))
|
||||
_at = _swift["proxy"].get('authtoken')
|
||||
if _at:
|
||||
authtoken = DEFAULT_AUTHTOKEN_SETTINGS
|
||||
authtoken.update(_at)
|
||||
at_active = authtoken.get("active", False)
|
||||
if at_active:
|
||||
_write_to_file(output_fd, "authtoken_active=true")
|
||||
_write_to_file(output_fd, "delay_auth_decision="
|
||||
"%(delay_auth_decision)s" % authtoken)
|
||||
_write_to_file(output_fd, "auth_version="
|
||||
"%(auth_version)s" % authtoken)
|
||||
_write_to_file(output_fd, "auth_host="
|
||||
"%(auth_host)s" % authtoken)
|
||||
_write_to_file(output_fd, "auth_port="
|
||||
"%(auth_port)s" % authtoken)
|
||||
_write_to_file(output_fd, "auth_protocol="
|
||||
"%(auth_protocol)s" % authtoken)
|
||||
_write_to_file(output_fd, "auth_uri="
|
||||
"%(auth_uri)s" % authtoken)
|
||||
_write_to_file(output_fd, "admin_tenant_name="
|
||||
"%(admin_tenant_name)s" % authtoken)
|
||||
_write_to_file(output_fd, "admin_user="
|
||||
"%(admin_user)s" % authtoken)
|
||||
_write_to_file(output_fd, "admin_password="
|
||||
"%(admin_password)s" % authtoken)
|
||||
else:
|
||||
_write_to_file(output_fd, "authtoken_active=false")
|
||||
|
||||
_write_to_file(output_fd, "\n[account]")
|
||||
|
||||
if not _section_defined("account"):
|
||||
return RETURN_NOT_DEFINED
|
||||
|
||||
for account in _swift["account"]["hosts"]:
|
||||
data = _get_drive(account)
|
||||
_write_to_file(output_fd, data)
|
||||
|
||||
_write_to_file(output_fd, "\n[account:vars]")
|
||||
repl_num = _swift["account"].get("repl_number", DEFAULT_REPL_NUM)
|
||||
_write_to_file(output_fd, "repl_number=%d" % (repl_num))
|
||||
|
||||
# Container section
|
||||
_write_to_file(output_fd, "\n[container]")
|
||||
|
||||
if not _section_defined("container"):
|
||||
return RETURN_NOT_DEFINED
|
||||
|
||||
for container in _swift["container"]["hosts"]:
|
||||
data = _get_drive(container)
|
||||
_write_to_file(output_fd, data)
|
||||
|
||||
_write_to_file(output_fd, "\n[container:vars]")
|
||||
repl_num = _swift["container"].get("repl_number", DEFAULT_REPL_NUM)
|
||||
_write_to_file(output_fd, "repl_number=%d" % (repl_num))
|
||||
|
||||
# Objects / Storage polices
|
||||
_storage_policies = {}
|
||||
_storage_policies_idx = {}
|
||||
if not _section_defined("storage_policies"):
|
||||
return RETURN_NOT_DEFINED
|
||||
|
||||
if "policies" not in _swift["storage_policies"]:
|
||||
print("ERROR: No storage policies defined")
|
||||
return 4
|
||||
|
||||
for policy in _swift["storage_policies"]["policies"]:
|
||||
if policy["name"] in _storage_policies:
|
||||
print("ERROR: Storage policy '%s' already defined" % policy["name"])
|
||||
return 5
|
||||
|
||||
if policy["index"] in _storage_policies_idx:
|
||||
print("ERROR: Storage policy index '%s' already defined" %
|
||||
policy["index"])
|
||||
return 5
|
||||
|
||||
_storage_policies[policy['name']] = "storagepolicy_%(name)s" % policy
|
||||
_storage_policies_idx[policy['index']] = policy["name"]
|
||||
|
||||
_write_to_file(output_fd,
|
||||
"\n[%s]" % (_storage_policies[policy['name']]))
|
||||
|
||||
# print the storage policy hosts.
|
||||
for drive in policy.get("hosts", []):
|
||||
data = _get_drive(drive)
|
||||
_write_to_file(output_fd, data)
|
||||
|
||||
_write_to_file(output_fd,
|
||||
"\n[%s:vars]" % (_storage_policies[policy['name']]))
|
||||
_write_to_file(output_fd, "index=%d" % (policy['index']))
|
||||
_write_to_file(output_fd, "policy_name=%s" % (policy['name']))
|
||||
policy_type = policy.get("type", 'replication')
|
||||
_write_to_file(output_fd, "type=%s" % (policy_type))
|
||||
|
||||
depricated = policy.get("depricated", False)
|
||||
if depricated:
|
||||
_write_to_file(output_fd, "depricated=True")
|
||||
|
||||
default = policy.get("default", False)
|
||||
if default:
|
||||
_write_to_file(output_fd, "default=True")
|
||||
|
||||
if policy_type == 'replication':
|
||||
repl_num = policy.get("repl_number", DEFAULT_REPL_NUM)
|
||||
_write_to_file(output_fd, "repl_num=%d" % (repl_num))
|
||||
|
||||
# Write out the storage policy catch all group
|
||||
_write_to_file(output_fd, "\n[storagepolicy:children]")
|
||||
for name, longname in _storage_policies.items():
|
||||
_write_to_file(output_fd, "%s" % (longname))
|
||||
|
||||
_write_to_file(output_fd, "\n[storagepolicy:vars]")
|
||||
if 'default' in _swift["storage_policies"]:
|
||||
default_sp = _swift["storage_policies"]["default"]
|
||||
if default_sp in _storage_policies:
|
||||
_write_to_file(output_fd, "default=%s" % (default_sp))
|
||||
elif default_sp in _storage_policies_idx:
|
||||
_write_to_file(output_fd,
|
||||
"default=%s" % (_storage_policies_idx[default_sp]))
|
||||
else:
|
||||
print("ERROR: Default storage policy '%s' doesn't exist",
|
||||
default_sp)
|
||||
|
||||
# Write out the object and swift catchall groups
|
||||
_write_to_file(output_fd, CATCH_ALL_GROUPS)
|
||||
|
||||
# Now write out the global swift options that is gathered in the file
|
||||
for option in swift_options:
|
||||
_write_to_file(output_fd, option)
|
||||
|
||||
# Done
|
||||
if output_fd:
|
||||
output_fd.flush()
|
||||
output_fd.close()
|
||||
return 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser(USAGE)
|
||||
parser.add_option("-s", "--setup", dest="setup",
|
||||
help="Specify the swift setup file.", metavar="FILE")
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
|
||||
default=False, help="Be more verbose")
|
||||
parser.add_option("-d", "--dryrun", action="store_true", dest="dry_run",
|
||||
default=False, help="Print result out to stdout.")
|
||||
parser.add_option("-C", "--copy", action="store_false", dest="overwrite",
|
||||
default=True, help="Make a copy if inventory file exists")
|
||||
parser.add_option("-i", "--import", dest="ring_folder", metavar="FILE",
|
||||
help="Attempt to build a swift setup file"
|
||||
" from the Swift builder files. Pass directory here")
|
||||
|
||||
options, args = parser.parse_args(sys.argv[1:])
|
||||
if not options.setup or not exists(options.setup):
|
||||
print("Swift setup file not found or doesn't exist")
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(main(options.setup, options.verbose, options.dry_run,
|
||||
options.overwrite))
|
Loading…
Reference in New Issue
Block a user