Merge pull request #341 from andymcc/swift-complete

Add Swift Support
This commit is contained in:
Andy McCrae 2014-10-22 08:31:47 +01:00
commit 91f9f84784
38 changed files with 2645 additions and 2 deletions

View File

@ -0,0 +1,94 @@
---
# Setup swift group variables when using swift (Not required if not using swift)
# part power is required under swift. This can't be changed once the ring is built
# For account/container speciying min_part_hours and repl_number is all that can be set.
# These 2 can be set at the "swift" level to work as a default.
# Alternatively defaults will be used (repl_number of 3, and min_part_hours of 1).
# For storage policies, a name and unique index is required as well as repl_number and
# min_part_hours which will be set to a default value if not specified.
# There MUST be a storage policy with index 0 configured which will be the default for legacy containers (created pre-storage policies).
# You can set one policy to be "default: yes" this will be the default storage policy for non-legacy containers that are created.
# The index value must be unique.
# Storage policies can be set to "deprecated: yes" which will mean they are not used
# global_overrides:
# swift:
# part_power: 8
# account:
# repl_number: 3
# min_part_hours: 1
# container:
# repl_number: 3
# storage_policies:
# - policy:
# name: gold
# index: 0
# repl_number: 3
# default: yes
# - policy:
# name: silver
# index: 1
# repl_number: 2
# deprecated: yes
# User defined Swift Proxy hosts - not required when not using swift
# Will deploy a swift-proxy container on these hosts.
# Recommend mirroring the infra_hosts
# swift-proxy_hosts:
# infra1:
# ip: 172.29.236.100
# infra2:
# ip: 172.29.236.101
# infra3:
# ip: 172.29.236.102
# User defined Object Storage Hosts - this is not a required group
# Under swift_vars you can specify the host specific swift_vars.
# region - the swift region, this isn't required.
# zone - the swift zone, this isn't required either, will default to 0
# mount_point - where the drives are mounted on the server
# drives - A list of drives in the server (Must have a name as a minimum)
# Above 4 vars are "host specific"
# weight: a disks weight (defaults to 100 if not specified)
# repl_ip: IP specific for object replication (not required)
# repl_port: Port specific for object replication (not required)
# groups: A list of groups to add the drive to. A group is either a storage policy or the account or container servers. (If not specified defaults to all groups, so container/account/all storage policies).
# The above 4 can be specified on a per host or per drive basis
# Or both, in which case "per drive" will take precedence for the specific drive.
# ip can be specified in swift_vars to override the hosts ip
# or per drive to override all for that specific drive.
# swift_hosts:
# object_storage1:
# ip: 172.29.236.108
# container_vars:
# swift_vars:
# region: 0
# zone: 0
# groups:
# - silver
# - account
# mount_point: /srv/node
# drives:
# - name: sdb
# ip: 172.10.100.100
# repl_ip: 10.10.0.1
# repl_port: 54321
# groups:
# - gold
# - account
# - container
# - name: sdc
# weight: 150
# - name: sdd
# - name: sde
#
# object_storage2:
# ip: 172.29.236.109
# container_vars:
# swift_vars:
# region: 0
# zone: 1
# mount_point: /srv/node
# drives:
# - name: sdb
# - name: sdc

View File

@ -116,6 +116,18 @@ component_skel:
utility: utility:
belongs_to: belongs_to:
- utility_all - utility_all
swift_proxy:
belongs_to:
- swift_all
swift_acc:
belongs_to:
- swift_all
swift_obj:
belongs_to:
- swift_all
swift_cont:
belongs_to:
- swift_all
container_skel: container_skel:
cinder_api_container: cinder_api_container:
belongs_to: belongs_to:
@ -250,6 +262,7 @@ container_skel:
- storage_containers - storage_containers
- log_containers - log_containers
- network_containers - network_containers
- swift_containers
contains: contains:
- rsyslog - rsyslog
utility_container: utility_container:
@ -257,6 +270,29 @@ container_skel:
- infra_containers - infra_containers
contains: contains:
- utility - utility
swift_proxy_container:
belongs_to:
- infra_containers
contains:
- swift_proxy
swift_acc_container:
is_metal: true
belongs_to:
- swift_containers
contains:
- swift_acc
swift_obj_container:
is_metal: true
belongs_to:
- swift_containers
contains:
- swift_obj
swift_cont_container:
is_metal: true
belongs_to:
- swift_containers
contains:
- swift_cont
physical_skel: physical_skel:
network_containers: network_containers:
belongs_to: belongs_to:
@ -288,3 +324,15 @@ physical_skel:
storage_hosts: storage_hosts:
belongs_to: belongs_to:
- hosts - hosts
swift_containers:
belongs_to:
- all_containers
swift_hosts:
belongs_to:
- hosts
swift-proxy_containers:
belongs_to:
- all_containers
swift-proxy_hosts:
belongs_to:
- hosts

View File

@ -15,7 +15,7 @@
# This is the md5 of the environment file # This is the md5 of the environment file
# this will ensure consistency when deploying. # this will ensure consistency when deploying.
environment_version: e0955a92a761d5845520a82dcca596af environment_version: 701a1a44b7d77473f3b930f21f78cddf
# User defined CIDR used for containers # User defined CIDR used for containers
# Global cidr/s used for everything. # Global cidr/s used for everything.

View File

@ -145,3 +145,8 @@ rpc_support_holland_password:
## Kibana Options ## Kibana Options
kibana_password: kibana_password:
# Swift Options:
swift_service_password:
swift_container_mysql_password:
swift_hash_path_suffix:
swift_hash_path_prefix:

View File

@ -0,0 +1,70 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The variables file used by the playbooks in the swift-hosts & swift-proxy groups.
# These don't have to be explicitly imported by vars_files: they are autopopulated.
authtoken_active: True
delay_auth_decision: true
## Service Name
service_name: swift
# Verbosity Options
debug: False
verbose: True
# only used when the lxc vg is present on the target
container_lvm_fstype: ext4
container_lvm_fssize: 5GB
# Swift default ports
swift_proxy_port: "8888"
swift_object_port: "6000"
swift_container_port: "6001"
swift_account_port: "6002"
# Swift default variables
swift_default_replication_number: 3
swift_default_min_part_hours: 1
swift_default_host_zone: 0
swift_default_drive_weight: 100
## DB
container_mysql_user: swift
container_mysql_password: "{{ swift_container_mysql_password }}"
container_database: swift
## Swift Auth
service_admin_tenant_name: "service"
service_admin_username: "swift"
service_admin_password: "{{ swift_service_password }}"
## Swift User / Group
system_user: swift
system_group: swift
## Service Names
service_names:
- swift-object
- swift-account
- swift-container
- swift-proxy
container_directories:
- /var/log/swift
- /var/lock/swift
- /etc/swift
- /etc/swift/rings/

View File

@ -0,0 +1,19 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- include: swift-common.yml
- include: swift-build-rings.yml
- include: swift-proxy.yml
- include: swift-storage.yml

View File

@ -0,0 +1,35 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: swift_hosts:swift_proxy
user: root
roles:
- swift_ring_md5sum
- hosts: local
user: root
roles:
- { role: container_common, tags: [ 'directories' ] }
- openstack_common
- swift_common
- swift_ring_builder
vars_files:
- inventory/group_vars/swift_all.yml
- vars/repo_packages/swift.yml
- hosts: swift_hosts:swift_proxy
user: root
roles:
- swift_ring_distribute

View File

@ -0,0 +1,26 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: swift_proxy:swift_hosts
user: root
roles:
- common
- common_sudoers
- container_common
- openstack_common
- openstack_openrc
- galera_client_cnf
vars_files:
- vars/repo_packages/swift.yml

View File

@ -0,0 +1,30 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: swift_proxy
user: root
roles:
- swift_common
- swift_proxy
vars_files:
- inventory/group_vars/swift_all.yml
- hosts: swift_proxy[0]
user: root
roles:
- keystone_add_service
vars_files:
- vars/openstack_service_vars/swift_proxy_endpoint.yml

View File

@ -0,0 +1,25 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- hosts: swift_hosts
user: root
roles:
- swift_common
- swift_storage_setup
- swift_container
- swift_object
- swift_account
vars_files:
- inventory/group_vars/swift_all.yml

View File

@ -22,3 +22,5 @@
recurse=true recurse=true
when: container_directories is defined when: container_directories is defined
with_items: container_directories with_items: container_directories
tags:
- directories

View File

@ -0,0 +1,27 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Replace these with init scripts
- name: Restart account server
shell: swift-init account-server restart || swift-init account-server restart
- name: Restart account auditor
shell: swift-init account-auditor restart || swift-init account-auditor restart
- name: Restart account replicator
shell: swift-init account-replicator restart || swift-init account-replicator restart
- name: Restart account reaper
shell: swift-init account-reaper restart || swift-init account-reaper restart

View File

@ -0,0 +1,50 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: "swift account server configuration"
template: >
src="account-server.conf.j2"
dest="/etc/swift/account-server.conf"
owner={{ system_user }}
mode=0644
notify:
- Restart account server
- Restart account auditor
- Restart account replicator
- Restart account reaper
- name: "Set account server to start at boot"
cron: >
name="Restart account-server on boot"
special_time=reboot
job="swift-init account-server start"
- name: "Set account auditor to start at boot"
cron: >
name="Restart account-auditor on boot"
special_time=reboot
job="swift-init account-auditor start"
- name: "Set account replicator to start at boot"
cron: >
name="Restart account-replicator on boot"
special_time=reboot
job="swift-init account-replicator start"
- name: "Set account reaper to start at boot"
cron: >
name="Restart account-reaper on boot"
special_time=reboot
job="swift-init account-reaper start"

View File

@ -0,0 +1,194 @@
[DEFAULT]
bind_ip = {{ container_address }}
bind_port = {{ swift_account_port }}
# bind_timeout = 30
# backlog = 4096
user = {{ system_user }}
# swift_dir = /etc/swift
devices = {{ swift_vars.mount_point }}
# mount_check = true
# disable_fallocate = false
#
# Use an integer to override the number of pre-forked processes that will
# accept connections.
# workers = auto
#
# Maximum concurrent requests per worker
# max_clients = 1024
#
# You can specify default log routing here if you want:
# log_name = swift
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
# The following caps the length of log lines to the value given; no limit if
# set to 0, the default.
# log_max_line_length = 0
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host = localhost
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
#
# If you don't mind the extra disk space usage in overhead, you can turn this
# on to preallocate disk space with SQLite databases to decrease fragmentation.
# db_preallocation = off
#
# eventlet_debug = false
#
# You can set fallocate_reserve to the number of bytes you'd like fallocate to
# reserve, whether there is space for the given file size or not.
# fallocate_reserve = 0
[pipeline:main]
pipeline = healthcheck recon account-server
[app:account-server]
use = egg:swift#account
log_facility = LOG_LOCAL1
# You can override the default log routing for this app here:
# set log_name = account-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_requests = true
# set log_address = /dev/log
#
# auto_create_account_prefix = .
#
# Configure parameter for creating specific server
# To handle all verbs, including replication verbs, do not specify
# "replication_server" (this is the default). To only handle replication,
# set to a True value (e.g. "True" or "1"). To handle only non-replication
# verbs, set to "False". Unless you have a separate replication network, you
# should not specify any value for "replication_server".
# replication_server = false
[filter:healthcheck]
use = egg:swift#healthcheck
# An optional filesystem path, which if present, will cause the healthcheck
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
# disable_path =
[filter:recon]
use = egg:swift#recon
log_facility = LOG_LOCAL2
recon_cache_path = /var/cache/swift
recon_lock_path = /var/lock/swift
[account-replicator]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-replicator
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# vm_test_mode = no
per_diff = 10000
# max_diffs = 100
# concurrency = 8
# interval = 30
#
# How long without an error before a node's error count is reset. This will
# also be how long before a node is reenabled after suppression is triggered.
# error_suppression_interval = 60
#
# How many errors can accumulate before a node is temporarily ignored.
# error_suppression_limit = 10
#
# node_timeout = 10
# conn_timeout = 0.5
#
# The replicator also performs reclamation
# reclaim_age = 604800
#
# Time in seconds to wait between replication passes
# Note: if the parameter 'interval' is defined then it will be used in place
# of run_pause.
# run_pause = 30
#
# recon_cache_path = /var/cache/swift
[account-auditor]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-auditor
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# Will audit each account at most once per interval
# interval = 1800
#
# log_facility = LOG_LOCAL0
# log_level = INFO
# accounts_per_second = 200
# recon_cache_path = /var/cache/swift
[account-reaper]
# You can override the default log routing for this app here (don't use set!):
# log_name = account-reaper
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# concurrency = 25
# interval = 3600
# node_timeout = 10
# conn_timeout = 0.5
#
# Normally, the reaper begins deleting account information for deleted accounts
# immediately; you can set this to delay its work however. The value is in
# seconds; 2592000 = 30 days for example.
delay_reaping = 604800
#
# If the account fails to be be reaped due to a persistent error, the
# account reaper will log a message such as:
# Account <name> has not been reaped since <date>
# You can search logs for this message if space is not being reclaimed
# after you delete account(s).
# Default is 2592000 seconds (30 days). This is in addition to any time
# requested by delay_reaping.
# reap_warn_after = 2592000
# Note: Put it at the beginning of the pipleline to profile all middleware. But
# it is safer to put this after healthcheck.
[filter:xprofile]
use = egg:swift#xprofile
# This option enable you to switch profilers which should inherit from python
# standard profiler. Currently the supported value can be 'cProfile',
# 'eventlet.green.profile' etc.
# profile_module = eventlet.green.profile
#
# This prefix will be used to combine process ID and timestamp to name the
# profile data file. Make sure the executing user has permission to write
# into this path (missing path segments will be created, if necessary).
# If you enable profiling in more than one type of daemon, you must override
# it with an unique value like: /var/log/swift/profile/account.profile
# log_filename_prefix = /tmp/log/swift/profile/default.profile
#
# the profile data will be dumped to local disk based on above naming rule
# in this interval.
# dump_interval = 5.0
#
# Be careful, this option will enable profiler to dump data into the file with
# time stamp which means there will be lots of files piled up in the directory.
# dump_timestamp = false
#
# This is the path of the URL to access the mini web UI.
# path = /__profile__
#
# Clear the data when the wsgi server shutdown.
# flush_at_shutdown = false
#
# unwind the iterator of applications
# unwind = false

View File

@ -0,0 +1,17 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: restart rsyslog
service: name=rsyslog state=restarted

View File

@ -0,0 +1,31 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: "Create logging directory"
file:
path: "{{ item }}"
state: "directory"
owner: "syslog"
group: "syslog"
with_items:
- /var/log/swift
- /openstack/log/{{ inventory_hostname }}
- name: "Drop swift rsyslog conf"
template:
src: "swift-rsyslog.conf.j2"
dest: "/etc/rsyslog.d/10-swift.conf"
notify:
- restart rsyslog

View File

@ -0,0 +1,23 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- include: log_setup.yml
- name: "Drop swift.conf template"
template: >
src="swift.conf.j2"
dest="/etc/swift/swift.conf"
owner={{ system_user }}
mode=0644

View File

@ -0,0 +1,22 @@
# Uncomment the following to have a log containing all logs together
#local1,local2,local3,local4.* /openstack/log/{{ inventory_hostname }}/all.log
# Uncomment the following to have hourly proxy logs for stats processing
#$template HourlyProxyLog,"/openstack/log/{{ inventory_hostname }}/hourly/%$YEAR%%$MONTH%%$DAY%%$HOUR%"
#local1.*;local1.!notice ?HourlyProxyLog
local1.*;local1.!notice /var/log/swift/proxy.log
local1.notice /var/log/swift/proxy.error
local1.* ~
local2.*;local2.!notice /openstack/log/{{ inventory_hostname }}/account.log
local2.notice /openstack/log/{{ inventory_hostname }}/account.error
local2.* ~
local3.*;local3.!notice /openstack/log/{{ inventory_hostname }}/container.log
local3.notice /openstack/log/{{ inventory_hostname }}/container.error
local3.* ~
local4.*;local4.!notice /openstack/log/{{ inventory_hostname }}/object.log
local4.notice /openstack/log/{{ inventory_hostname }}/object.error
local4.*

View File

@ -0,0 +1,94 @@
[swift-hash]
# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the
# the hashing algorithm when determining data placement in the cluster.
# These values should remain secret and MUST NOT change
# once a cluster has been deployed.
swift_hash_path_suffix = {{ swift_hash_path_suffix }}
swift_hash_path_prefix = {{ swift_hash_path_prefix }}
# Storage Policies
{% for policy in swift.storage_policies %}
[storage-policy:{{ policy.policy.index }}]
name = {{ policy.policy.name }}
{% if policy.policy.deprecated is defined %}
deprecated = {{ policy.policy.deprecated }}
{% endif %}
{% if policy.policy.default is defined %}
default = {{ policy.policy.default }}
{% endif %}
{% endfor %}
[swift-constraints]
# max_file_size is the largest "normal" object that can be saved in
# the cluster. This is also the limit on the size of each segment of
# a "large" object when using the large object manifest support.
# This value is set in bytes. Setting it to lower than 1MiB will cause
# some tests to fail. It is STRONGLY recommended to leave this value at
# the default (5 * 2**30 + 2).
#max_file_size = 5368709122
# max_meta_name_length is the max number of bytes in the utf8 encoding
# of the name portion of a metadata header.
#max_meta_name_length = 128
# max_meta_value_length is the max number of bytes in the utf8 encoding
# of a metadata value
#max_meta_value_length = 256
# max_meta_count is the max number of metadata keys that can be stored
# on a single account, container, or object
#max_meta_count = 90
# max_meta_overall_size is the max number of bytes in the utf8 encoding
# of the metadata (keys + values)
#max_meta_overall_size = 4096
# max_header_size is the max number of bytes in the utf8 encoding of each
# header. Using 8192 as default because eventlet use 8192 as max size of
# header line. This value may need to be increased when using identity
# v3 API tokens including more than 7 catalog entries.
# See also include_service_catalog in proxy-server.conf-sample
# (documented in overview_auth.rst)
#max_header_size = 8192
# max_object_name_length is the max number of bytes in the utf8 encoding
# of an object name
#max_object_name_length = 1024
# container_listing_limit is the default (and max) number of items
# returned for a container listing request
#container_listing_limit = 10000
# account_listing_limit is the default (and max) number of items returned
# for an account listing request
#account_listing_limit = 10000
# max_account_name_length is the max number of bytes in the utf8 encoding
# of an account name
#max_account_name_length = 256
# max_container_name_length is the max number of bytes in the utf8 encoding
# of a container name
#max_container_name_length = 256

View File

@ -0,0 +1,27 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Replace these with init scripts
- name: Restart container server
shell: swift-init container-server restart || swift-init container-server restart
- name: Restart container auditor
shell: swift-init container-auditor restart || swift-init container-auditor restart
- name: Restart container replicator
shell: swift-init container-replicator restart || swift-init container-replicator restart
- name: Restart container updater
shell: swift-init container-updater restart || swift-init container-updater restart

View File

@ -0,0 +1,50 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: "swift container server configuration"
template: >
src="container-server.conf.j2"
dest="/etc/swift/container-server.conf"
owner={{ system_user }}
mode=0644
notify:
- Restart container server
- Restart container auditor
- Restart container replicator
- Restart container updater
- name: "Set container server to start at boot"
cron: >
name="Restart container-server on boot"
special_time=reboot
job="swift-init container-server start"
- name: "Set container auditor to start at boot"
cron: >
name="Restart container-auditor at boot"
special_time=reboot
job="swift-init container-auditor start"
- name: "Set container replicator to start at boot"
cron: >
name="Restart container-replicator at boot"
special_time=reboot
job="swift-init container-replicator start"
- name: "Set container updater to start at boot"
cron: >
name="Restart container-updater at boot"
special_time=reboot
job="swift-init container-updater start"

View File

@ -0,0 +1,205 @@
[DEFAULT]
bind_ip = {{ container_address }}
bind_port = {{ swift_container_port }}
# bind_timeout = 30
# backlog = 4096
user = {{ system_user }}
# swift_dir = /etc/swift
devices = {{ swift_vars.mount_point }}
# mount_check = true
# disable_fallocate = false
#
# Use an integer to override the number of pre-forked processes that will
# accept connections.
# workers = auto
#
# Maximum concurrent requests per worker
# max_clients = 1024
#
# This is a comma separated list of hosts allowed in the X-Container-Sync-To
# field for containers. This is the old-style of using container sync. It is
# strongly recommended to use the new style of a separate
# container-sync-realms.conf -- see container-sync-realms.conf-sample
# allowed_sync_hosts = 127.0.0.1
#
# You can specify default log routing here if you want:
# log_name = swift
log_facility = LOG_LOCAL3
# log_level = INFO
# log_address = /dev/log
# The following caps the length of log lines to the value given; no limit if
# set to 0, the default.
# log_max_line_length = 0
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host = localhost
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
#
# If you don't mind the extra disk space usage in overhead, you can turn this
# on to preallocate disk space with SQLite databases to decrease fragmentation.
# db_preallocation = off
#
# eventlet_debug = false
#
# You can set fallocate_reserve to the number of bytes you'd like fallocate to
# reserve, whether there is space for the given file size or not.
# fallocate_reserve = 0
[pipeline:main]
pipeline = healthcheck recon container-server
[app:container-server]
use = egg:swift#container
log_facility = LOG_LOCAL1
# You can override the default log routing for this app here:
# set log_name = container-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_requests = true
# set log_address = /dev/log
#
# node_timeout = 3
# conn_timeout = 0.5
# allow_versions = false
# auto_create_account_prefix = .
#
# Configure parameter for creating specific server
# To handle all verbs, including replication verbs, do not specify
# "replication_server" (this is the default). To only handle replication,
# set to a True value (e.g. "True" or "1"). To handle only non-replication
# verbs, set to "False". Unless you have a separate replication network, you
# should not specify any value for "replication_server".
# replication_server = false
[filter:healthcheck]
use = egg:swift#healthcheck
# An optional filesystem path, which if present, will cause the healthcheck
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
# disable_path =
[filter:recon]
use = egg:swift#recon
log_facility = LOG_LOCAL2
recon_cache_path = /var/cache/swift
recon_lock_path = /var/lock/swift
[container-replicator]
# You can override the default log routing for this app here (don't use set!):
# log_name = container-replicator
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# vm_test_mode = no
# per_diff = 1000
# max_diffs = 100
# concurrency = 8
# interval = 30
# node_timeout = 10
# conn_timeout = 0.5
#
# The replicator also performs reclamation
# reclaim_age = 604800
#
# Time in seconds to wait between replication passes
# Note: if the parameter 'interval' is defined then it will be used in place
# of run_pause.
# run_pause = 30
#
# recon_cache_path = /var/cache/swift
[container-updater]
# You can override the default log routing for this app here (don't use set!):
# log_name = container-updater
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# interval = 300
# concurrency = 4
node_timeout = 15
conn_timeout = 5
#
# slowdown will sleep that amount between containers
# slowdown = 0.01
#
# Seconds to suppress updating an account that has generated an error
# account_suppression_time = 60
#
# recon_cache_path = /var/cache/swift
[container-auditor]
# You can override the default log routing for this app here (don't use set!):
# log_name = container-auditor
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# Will audit each container at most once per interval
# interval = 1800
#
# containers_per_second = 200
# recon_cache_path = /var/cache/swift
[container-sync]
# You can override the default log routing for this app here (don't use set!):
# log_name = container-sync
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
#
# If you need to use an HTTP Proxy, set it here; defaults to no proxy.
# You can also set this to a comma separated list of HTTP Proxies and they will
# be randomly used (simple load balancing).
# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888
#
# Will sync each container at most once per interval
# interval = 300
#
# Maximum amount of time to spend syncing each container per pass
# container_time = 60
# Note: Put it at the beginning of the pipleline to profile all middleware. But
# it is safer to put this after healthcheck.
[filter:xprofile]
use = egg:swift#xprofile
# This option enable you to switch profilers which should inherit from python
# standard profiler. Currently the supported value can be 'cProfile',
# 'eventlet.green.profile' etc.
# profile_module = eventlet.green.profile
#
# This prefix will be used to combine process ID and timestamp to name the
# profile data file. Make sure the executing user has permission to write
# into this path (missing path segments will be created, if necessary).
# If you enable profiling in more than one type of daemon, you must override
# it with an unique value like: /var/log/swift/profile/container.profile
# log_filename_prefix = /tmp/log/swift/profile/default.profile
#
# the profile data will be dumped to local disk based on above naming rule
# in this interval.
# dump_interval = 5.0
#
# Be careful, this option will enable profiler to dump data into the file with
# time stamp which means there will be lots of files piled up in the directory.
# dump_timestamp = false
#
# This is the path of the URL to access the mini web UI.
# path = /__profile__
#
# Clear the data when the wsgi server shutdown.
# flush_at_shutdown = false
#
# unwind the iterator of applications
# unwind = false

View File

@ -0,0 +1,27 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Replace these with init scripts
- name: Restart object server
shell: swift-init object-server restart || swift-init object-server restart
- name: Restart object auditor
shell: swift-init object-auditor restart || swift-init object-auditor restart
- name: Restart object replicator
shell: swift-init object-replicator restart || swift-init object-replicator restart
- name: Restart object updater
shell: swift-init object-updater restart || swift-init object-updater restart

View File

@ -0,0 +1,50 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: "swift object server configuration"
template: >
src="object-server.conf.j2"
dest="/etc/swift/object-server.conf"
owner={{ system_user }}
mode=0644
notify:
- Restart object server
- Restart object auditor
- Restart object replicator
- Restart object updater
- name: "Set object server to start at boot"
cron: >
name="Restart object-server on boot"
special_time=reboot
job="swift-init object-server start"
- name: "Set object auditor to start at boot"
cron: >
name="Restart object-auditor on boot"
special_time=reboot
job="swift-init object-auditor start"
- name: "Set object replicator to start at boot"
cron: >
name="Restart object-replicator on boot"
special_time=reboot
job="swift-init object-replicator start"
- name: "Set object updater to start at boot"
cron: >
name="Restart object-updater on boot"
special_time=reboot
job="swift-init object-updater start"

View File

@ -0,0 +1,281 @@
[DEFAULT]
bind_ip = {{ container_address }}
bind_port = {{ swift_object_port }}
# bind_timeout = 30
# backlog = 4096
user = {{ system_user }}
swift_dir = /etc/swift
devices = {{ swift_vars.mount_point }}
# mount_check = true
# disable_fallocate = false
# expiring_objects_container_divisor = 86400
# expiring_objects_account_name = expiring_objects
#
# Use an integer to override the number of pre-forked processes that will
# accept connections.
# workers = auto
#
# Maximum concurrent requests per worker
# max_clients = 1024
#
# You can specify default log routing here if you want:
# log_name = swift
log_facility = LOG_LOCAL4
# log_level = INFO
# log_address = /dev/log
# The following caps the length of log lines to the value given; no limit if
# set to 0, the default.
# log_max_line_length = 0
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host = localhost
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
#
# eventlet_debug = false
#
# You can set fallocate_reserve to the number of bytes you'd like fallocate to
# reserve, whether there is space for the given file size or not.
# fallocate_reserve = 0
#
# Time to wait while attempting to connect to another backend node.
# conn_timeout = 0.5
# Time to wait while sending each chunk of data to another backend node.
# node_timeout = 3
# Time to wait while receiving each chunk of data from a client or another
# backend node.
# client_timeout = 60
#
# network_chunk_size = 65536
# disk_chunk_size = 65536
[pipeline:main]
pipeline = healthcheck recon object-server
[app:object-server]
use = egg:swift#object
log_facility = LOG_LOCAL1
# You can override the default log routing for this app here:
# set log_name = object-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_requests = true
# set log_address = /dev/log
#
# max_upload_time = 86400
# slow = 0
#
# Objects smaller than this are not evicted from the buffercache once read
# keep_cache_size = 5424880
#
# If true, objects for authenticated GET requests may be kept in buffer cache
# if small enough
# keep_cache_private = false
#
# on PUTs, sync data every n MB
# mb_per_sync = 512
mb_per_sync = 64
#
# Comma separated list of headers that can be set in metadata on an object.
# This list is in addition to X-Object-Meta-* headers and cannot include
# Content-Type, etag, Content-Length, or deleted
# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object
#
# auto_create_account_prefix = .
#
# A value of 0 means "don't use thread pools". A reasonable starting point is
# 4.
# threads_per_disk = 0
#
# Configure parameter for creating specific server
# To handle all verbs, including replication verbs, do not specify
# "replication_server" (this is the default). To only handle replication,
# set to a True value (e.g. "True" or "1"). To handle only non-replication
# verbs, set to "False". Unless you have a separate replication network, you
# should not specify any value for "replication_server".
# replication_server = false
#
# Set to restrict the number of concurrent incoming REPLICATION requests
# Set to 0 for unlimited
# Note that REPLICATION is currently an ssync only item
# replication_concurrency = 4
#
# Restricts incoming REPLICATION requests to one per device,
# replication_currency above allowing. This can help control I/O to each
# device, but you may wish to set this to False to allow multiple REPLICATION
# requests (up to the above replication_concurrency setting) per device.
# replication_one_per_device = True
#
# Number of seconds to wait for an existing replication device lock before
# giving up.
# replication_lock_timeout = 15
#
# These next two settings control when the REPLICATION subrequest handler will
# abort an incoming REPLICATION attempt. An abort will occur if there are at
# least threshold number of failures and the value of failures / successes
# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100
# failures have to occur and there have to be more failures than successes for
# an abort to occur.
# replication_failure_threshold = 100
# replication_failure_ratio = 1.0
[filter:healthcheck]
use = egg:swift#healthcheck
# An optional filesystem path, which if present, will cause the healthcheck
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE"
# disable_path =
[filter:recon]
use = egg:swift#recon
log_facility = LOG_LOCAL2
recon_cache_path = /var/cache/swift
recon_lock_path = /var/lock/swift
[object-replicator]
# You can override the default log routing for this app here (don't use set!):
# log_name = object-replicator
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# vm_test_mode = no
# daemonize = on
# run_pause = 30
concurrency = 6
# stats_interval = 300
#
# The sync method to use; default is rsync but you can use ssync to try the
# EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified
# as having performance comparable to, or better than, rsync, we plan to
# deprecate rsync so we can move on with more features for replication.
# sync_method = rsync
#
# max duration of a partition rsync
# rsync_timeout = 900
#
# bandwidth limit for rsync in kB/s. 0 means unlimited
# rsync_bwlimit = 0
#
# passed to rsync for io op timeout
# rsync_io_timeout = 30
#
# node_timeout = <whatever's in the DEFAULT section or 10>
# max duration of an http request; this is for REPLICATE finalization calls and
# so should be longer than node_timeout
# http_timeout = 60
#
# attempts to kill all workers if nothing replicates for lockup_timeout seconds
# lockup_timeout = 1800
#
# The replicator also performs reclamation
# reclaim_age = 604800
#
# ring_check_interval = 15
# recon_cache_path = /var/cache/swift
#
# limits how long rsync error log lines are
# 0 means to log the entire line
# rsync_error_log_line_length = 0
#
# handoffs_first and handoff_delete are options for a special case
# such as disk full in the cluster. These two options SHOULD NOT BE
# CHANGED, except for such an extreme situations. (e.g. disks filled up
# or are about to fill up. Anyway, DO NOT let your drives fill up)
# handoffs_first is the flag to replicate handoffs prior to canonical
# partitions. It allows to force syncing and deleting handoffs quickly.
# If set to a True value(e.g. "True" or "1"), partitions
# that are not supposed to be on the node will be replicated first.
# handoffs_first = False
#
# handoff_delete is the number of replicas which are ensured in swift.
# If the number less than the number of replicas is set, object-replicator
# could delete local handoffs even if all replicas are not ensured in the
# cluster. Object-replicator would remove local handoff partition directories
# after syncing partition when the number of successful responses is greater
# than or equal to this number. By default(auto), handoff partitions will be
# removed when it has successfully replicated to all the canonical nodes.
# handoff_delete = auto
[object-updater]
# You can override the default log routing for this app here (don't use set!):
# log_name = object-updater
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# interval = 300
concurrency = 3
# node_timeout = <whatever's in the DEFAULT section or 10>
# slowdown will sleep that amount between objects
# slowdown = 0.01
#
# recon_cache_path = /var/cache/swift
concurrency = 3
node_timeout = 60
conn_timeout = 5
[object-auditor]
# You can override the default log routing for this app here (don't use set!):
# log_name = object-auditor
log_facility = LOG_LOCAL2
# log_level = INFO
# log_address = /dev/log
#
# You can set the disk chunk size that the auditor uses making it larger if
# you like for more efficient local auditing of larger objects
# disk_chunk_size = 65536
# files_per_second = 20
# concurrency = 1
# bytes_per_second = 10000000
# log_time = 3600
# zero_byte_files_per_second = 50
# recon_cache_path = /var/cache/swift
# Takes a comma separated list of ints. If set, the object auditor will
# increment a counter for every object whose size is <= to the given break
# points and report the result after a full scan.
# object_size_stats =
# Note: Put it at the beginning of the pipleline to profile all middleware. But
# it is safer to put this after healthcheck.
[filter:xprofile]
use = egg:swift#xprofile
# This option enable you to switch profilers which should inherit from python
# standard profiler. Currently the supported value can be 'cProfile',
# 'eventlet.green.profile' etc.
# profile_module = eventlet.green.profile
#
# This prefix will be used to combine process ID and timestamp to name the
# profile data file. Make sure the executing user has permission to write
# into this path (missing path segments will be created, if necessary).
# If you enable profiling in more than one type of daemon, you must override
# it with an unique value like: /var/log/swift/profile/object.profile
# log_filename_prefix = /tmp/log/swift/profile/default.profile
#
# the profile data will be dumped to local disk based on above naming rule
# in this interval.
# dump_interval = 5.0
#
# Be careful, this option will enable profiler to dump data into the file with
# time stamp which means there will be lots of files piled up in the directory.
# dump_timestamp = false
#
# This is the path of the URL to access the mini web UI.
# path = /__profile__
#
# Clear the data when the wsgi server shutdown.
# flush_at_shutdown = false
#
# unwind the iterator of applications
# unwind = false

View File

@ -0,0 +1,18 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Replace this with init scripts
- name: Restart proxy server
shell: swift-init proxy-server restart || swift-init proxy-server restart

View File

@ -0,0 +1,28 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: swift proxy server configuration
template: >
src="proxy-server.conf.j2"
dest="/etc/swift/proxy-server.conf"
owner={{ service_admin_username }}
mode=0644
notify: Restart proxy server
- name: Set proxy server to start at boot
cron: >
name="Restart swift proxy at boot"
special_time=reboot
job="swift-init proxy-server start"

View File

@ -0,0 +1,623 @@
[DEFAULT]
bind_ip = 0.0.0.0
bind_port = {{ swift_proxy_port }}
# bind_timeout = 30
# backlog = 4096
# swift_dir = /etc/swift
user = {{ system_user }}
# Enables exposing configuration settings via HTTP GET /info.
# expose_info = true
# Key to use for admin calls that are HMAC signed. Default is empty,
# which will disable admin calls to /info.
# admin_key = secret_admin_key
#
# Allows the ability to withhold sections from showing up in the public calls
# to /info. You can withhold subsections by separating the dict level with a
# ".". The following would cause the sections 'container_quotas' and 'tempurl'
# to not be listed, and the key max_failed_deletes would be removed from
# bulk_delete. Default is empty, allowing all registered fetures to be listed
# via HTTP GET /info.
# disallowed_sections = container_quotas, tempurl, bulk_delete.max_failed_deletes
# Use an integer to override the number of pre-forked processes that will
# accept connections. Should default to the number of effective cpu
# cores in the system. It's worth noting that individual workers will
# use many eventlet co-routines to service multiple concurrent requests.
# workers = auto
#
# Maximum concurrent requests per worker
# max_clients = 1024
#
# Set the following two lines to enable SSL. This is for testing only.
# cert_file = /etc/swift/proxy.crt
# key_file = /etc/swift/proxy.key
#
# expiring_objects_container_divisor = 86400
# expiring_objects_account_name = expiring_objects
#
# You can specify default log routing here if you want:
# log_name = swift
log_facility = LOG_LOCAL1
# log_level = INFO
# log_headers = false
# log_address = /dev/log
# The following caps the length of log lines to the value given; no limit if
# set to 0, the default.
# log_max_line_length = 0
#
# This optional suffix (default is empty) that would be appended to the swift transaction
# id allows one to easily figure out from which cluster that X-Trans-Id belongs to.
# This is very useful when one is managing more than one swift cluster.
# trans_id_suffix =
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host = localhost
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
#
# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar)
# cors_allow_origin =
# strict_cors_mode = True
#
# client_timeout = 60
# eventlet_debug = false
[pipeline:main]
{% if authtoken_active %}
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo proxy-logging proxy-server
{% else %}
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo proxy-logging proxy-server
{% endif %}
[app:proxy-server]
use = egg:swift#proxy
log_facility = LOG_LOCAL0
# You can override the default log routing for this app here:
# set log_name = proxy-server
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_address = /dev/log
#
# log_handoffs = true
# recheck_account_existence = 60
# recheck_container_existence = 60
# object_chunk_size = 65536
# client_chunk_size = 65536
#
# How long the proxy server will wait on responses from the a/c/o servers.
node_timeout = 60
#
# How long the proxy server will wait for an initial response and to read a
# chunk of data from the object servers while serving GET / HEAD requests.
# Timeouts from these requests can be recovered from so setting this to
# something lower than node_timeout would provide quicker error recovery
# while allowing for a longer timeout for non-recoverable requests (PUTs).
# Defaults to node_timeout, should be overriden if node_timeout is set to a
# high number to prevent client timeouts from firing before the proxy server
# has a chance to retry.
# recoverable_node_timeout = node_timeout
#
conn_timeout = 3.5
#
# How long to wait for requests to finish after a quorum has been established.
# post_quorum_timeout = 0.5
#
# How long without an error before a node's error count is reset. This will
# also be how long before a node is reenabled after suppression is triggered.
# error_suppression_interval = 60
#
# How many errors can accumulate before a node is temporarily ignored.
# error_suppression_limit = 10
#
# If set to 'true' any authorized user may create and delete accounts; if
# 'false' no one, even authorized, can.
# allow_account_management = false
#
# Set object_post_as_copy = false to turn on fast posts where only the metadata
# changes are stored anew and the original data file is kept in place. This
# makes for quicker posts; but since the container metadata isn't updated in
# this mode, features like container sync won't be able to sync posts.
# object_post_as_copy = true
#
# If set to 'true' authorized accounts that do not yet exist within the Swift
# cluster will be automatically created.
account_autocreate = true
#
# If set to a positive value, trying to create a container when the account
# already has at least this maximum containers will result in a 403 Forbidden.
# Note: This is a soft limit, meaning a user might exceed the cap for
# recheck_account_existence before the 403s kick in.
# max_containers_per_account = 0
#
# This is a comma separated list of account hashes that ignore the
# max_containers_per_account cap.
# max_containers_whitelist =
#
# Comma separated list of Host headers to which the proxy will deny requests.
# deny_host_headers =
#
# Prefix used when automatically creating accounts.
# auto_create_account_prefix = .
#
# Depth of the proxy put queue.
# put_queue_depth = 10
#
# Storage nodes can be chosen at random (shuffle), by using timing
# measurements (timing), or by using an explicit match (affinity).
# Using timing measurements may allow for lower overall latency, while
# using affinity allows for finer control. In both the timing and
# affinity cases, equally-sorting nodes are still randomly chosen to
# spread load.
# The valid values for sorting_method are "affinity", "shuffle", and "timing".
# sorting_method = shuffle
#
# If the "timing" sorting_method is used, the timings will only be valid for
# the number of seconds configured by timing_expiry.
# timing_expiry = 300
#
# The maximum time (seconds) that a large object connection is allowed to last.
# max_large_object_get_time = 86400
#
# Set to the number of nodes to contact for a normal request. You can use
# '* replicas' at the end to have it use the number given times the number of
# replicas for the ring being used for the request.
# request_node_count = 2 * replicas
#
# Which backend servers to prefer on reads. Format is r<N> for region
# N or r<N>z<M> for region N, zone M. The value after the equals is
# the priority; lower numbers are higher priority.
#
# Example: first read from region 1 zone 1, then region 1 zone 2, then
# anything in region 2, then everything else:
# read_affinity = r1z1=100, r1z2=200, r2=300
# Default is empty, meaning no preference.
# read_affinity =
#
# Which backend servers to prefer on writes. Format is r<N> for region
# N or r<N>z<M> for region N, zone M. If this is set, then when
# handling an object PUT request, some number (see setting
# write_affinity_node_count) of local backend servers will be tried
# before any nonlocal ones.
#
# Example: try to write to regions 1 and 2 before writing to any other
# nodes:
# write_affinity = r1, r2
# Default is empty, meaning no preference.
# write_affinity =
#
# The number of local (as governed by the write_affinity setting)
# nodes to attempt to contact first, before any non-local ones. You
# can use '* replicas' at the end to have it use the number given
# times the number of replicas for the ring being used for the
# request.
# write_affinity_node_count = 2 * replicas
#
# These are the headers whose values will only be shown to swift_owners. The
# exact definition of a swift_owner is up to the auth system in use, but
# usually indicates administrative responsibilities.
# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-account-access-control
[filter:tempauth]
use = egg:swift#tempauth
# You can override the default log routing for this filter here:
# set log_name = tempauth
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = false
# set log_address = /dev/log
#
# The reseller prefix will verify a token begins with this prefix before even
# attempting to validate it. Also, with authorization, only Swift storage
# accounts with this prefix will be authorized by this middleware. Useful if
# multiple auth systems are in use for one Swift cluster.
# reseller_prefix = AUTH
#
# The auth prefix will cause requests beginning with this prefix to be routed
# to the auth subsystem, for granting tokens, etc.
# auth_prefix = /auth/
# token_life = 86400
#
# This allows middleware higher in the WSGI pipeline to override auth
# processing, useful for middleware such as tempurl and formpost. If you know
# you're not going to use such middleware and you want a bit of extra security,
# you can set this to false.
# allow_overrides = true
#
# This specifies what scheme to return with storage urls:
# http, https, or default (chooses based on what the server is running as)
# This can be useful with an SSL load balancer in front of a non-SSL server.
# storage_url_scheme = default
#
# Lastly, you need to list all the accounts/users you want here. The format is:
# user_<account>_<user> = <key> [group] [group] [...] [storage_url]
# or if you want underscores in <account> or <user>, you can base64 encode them
# (with no equal signs) and use this format:
# user64_<account_b64>_<user_b64> = <key> [group] [group] [...] [storage_url]
# There are special groups of:
# .reseller_admin = can do anything to any account for this auth
# .admin = can do anything within the account
# If neither of these groups are specified, the user can only access containers
# that have been explicitly allowed for them by a .admin or .reseller_admin.
# The trailing optional storage_url allows you to specify an alternate url to
# hand back to the user upon authentication. If not specified, this defaults to
# $HOST/v1/<reseller_prefix>_<account> where $HOST will do its best to resolve
# to what the requester would need to use to reach this host.
# Here are example entries, required for running the tests:
{% if not authtoken_active %}
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing .admin
user_test2_tester2 = testing2 .admin
user_test_tester3 = testing3
{% endif %}
# To enable Keystone authentication you need to have the auth token
# middleware first to be configured. Here is an example below, please
# refer to the keystone's documentation for details about the
# different settings.
#
# You'll need to have as well the keystoneauth middleware enabled
# and have it in your main pipeline so instead of having tempauth in
# there you can change it to: authtoken keystoneauth
#
{% if authtoken_active %}
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
auth_host = {{ auth_host }}
auth_port = {{ auth_port }}
auth_protocol = {{ auth_protocol }}
auth_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
admin_tenant_name = {{ service_admin_tenant_name }}
admin_user = {{ service_admin_username }}
admin_password = {{ service_admin_password }}
delay_auth_decision = {{ delay_auth_decision }}
# cache = swift.cache
# include_service_catalog = False
{% endif %}
#
[filter:keystoneauth]
use = egg:swift#keystoneauth
# Operator roles is the role which user would be allowed to manage a
# tenant and be able to create container or give ACL to others.
operator_roles = admin, swiftoperator
# The reseller admin role has the ability to create and delete accounts
reseller_admin_role = reseller_admin
[filter:healthcheck]
use = egg:swift#healthcheck
# An optional filesystem path, which if present, will cause the healthcheck
# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE".
# This facility may be used to temporarily remove a Swift node from a load
# balancer pool during maintenance or upgrade (remove the file to allow the
# node back into the load balancer pool).
# disable_path =
[filter:cache]
use = egg:swift#memcache
# You can override the default log routing for this filter here:
# set log_name = cache
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = false
# set log_address = /dev/log
#
# If not set here, the value for memcache_servers will be read from
# memcache.conf (see memcache.conf-sample) or lacking that file, it will
# default to the value below. You can specify multiple servers separated with
# commas, as in: 10.1.2.3:11211,10.1.2.4:11211
memcache_servers = {{ internal_vip_address }}:{{ memcached_port }}
#
# Sets how memcache values are serialized and deserialized:
# 0 = older, insecure pickle serialization
# 1 = json serialization but pickles can still be read (still insecure)
# 2 = json serialization only (secure and the default)
# If not set here, the value for memcache_serialization_support will be read
# from /etc/swift/memcache.conf (see memcache.conf-sample).
# To avoid an instant full cache flush, existing installations should
# upgrade with 0, then set to 1 and reload, then after some time (24 hours)
# set to 2 and reload.
# In the future, the ability to use pickle serialization will be removed.
memcache_serialization_support = 2
#
# Sets the maximum number of connections to each memcached server per worker
# memcache_max_connections = 2
[filter:ratelimit]
use = egg:swift#ratelimit
# You can override the default log routing for this filter here:
# set log_name = ratelimit
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = false
# set log_address = /dev/log
#
# clock_accuracy should represent how accurate the proxy servers' system clocks
# are with each other. 1000 means that all the proxies' clock are accurate to
# each other within 1 millisecond. No ratelimit should be higher than the
# clock accuracy.
# clock_accuracy = 1000
#
# max_sleep_time_seconds = 60
#
# log_sleep_time_seconds of 0 means disabled
# log_sleep_time_seconds = 0
#
# allows for slow rates (e.g. running up to 5 sec's behind) to catch up.
# rate_buffer_seconds = 5
#
# account_ratelimit of 0 means disabled
# account_ratelimit = 0
# these are comma separated lists of account names
# account_whitelist = a,b
# account_blacklist = c,d
# with container_limit_x = r
# for containers of size x limit write requests per second to r. The container
# rate will be linearly interpolated from the values given. With the values
# below, a container of size 5 will get a rate of 75.
# container_ratelimit_0 = 100
# container_ratelimit_10 = 50
# container_ratelimit_50 = 20
# Similarly to the above container-level write limits, the following will limit
# container GET (listing) requests.
# container_listing_ratelimit_0 = 100
# container_listing_ratelimit_10 = 50
# container_listing_ratelimit_50 = 20
[filter:domain_remap]
use = egg:swift#domain_remap
# You can override the default log routing for this filter here:
# set log_name = domain_remap
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = false
# set log_address = /dev/log
#
# storage_domain = example.com
# path_root = v1
# reseller_prefixes = AUTH
[filter:catch_errors]
use = egg:swift#catch_errors
# You can override the default log routing for this filter here:
# set log_name = catch_errors
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = false
# set log_address = /dev/log
[filter:cname_lookup]
# Note: this middleware requires python-dnspython
use = egg:swift#cname_lookup
# You can override the default log routing for this filter here:
# set log_name = cname_lookup
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = false
# set log_address = /dev/log
#
# Specify the storage_domain that match your cloud, multiple domains
# can be specified separated by a comma
# storage_domain = example.com
#
# lookup_depth = 1
# Note: Put staticweb just after your auth filter(s) in the pipeline
[filter:staticweb]
use = egg:swift#staticweb
# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline
[filter:tempurl]
use = egg:swift#tempurl
# The methods allowed with Temp URLs.
# methods = GET HEAD PUT POST DELETE
#
# The headers to remove from incoming requests. Simply a whitespace delimited
# list of header names and names can optionally end with '*' to indicate a
# prefix match. incoming_allow_headers is a list of exceptions to these
# removals.
# incoming_remove_headers = x-timestamp
#
# The headers allowed as exceptions to incoming_remove_headers. Simply a
# whitespace delimited list of header names and names can optionally end with
# '*' to indicate a prefix match.
# incoming_allow_headers =
#
# The headers to remove from outgoing responses. Simply a whitespace delimited
# list of header names and names can optionally end with '*' to indicate a
# prefix match. outgoing_allow_headers is a list of exceptions to these
# removals.
# outgoing_remove_headers = x-object-meta-*
#
# The headers allowed as exceptions to outgoing_remove_headers. Simply a
# whitespace delimited list of header names and names can optionally end with
# '*' to indicate a prefix match.
# outgoing_allow_headers = x-object-meta-public-*
# Note: Put formpost just before your auth filter(s) in the pipeline
[filter:formpost]
use = egg:swift#formpost
# Note: Just needs to be placed before the proxy-server in the pipeline.
[filter:name_check]
use = egg:swift#name_check
# forbidden_chars = '"`<>
# maximum_length = 255
# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$
[filter:list-endpoints]
use = egg:swift#list_endpoints
# list_endpoints_path = /endpoints/
[filter:proxy-logging]
use = egg:swift#proxy_logging
# If not set, logging directives from [DEFAULT] without "access_" will be used
# access_log_name = swift
# access_log_facility = LOG_LOCAL0
# access_log_level = INFO
# access_log_address = /dev/log
#
# If set, access_log_udp_host will override access_log_address
# access_log_udp_host =
# access_log_udp_port = 514
#
# You can use log_statsd_* from [DEFAULT] or override them here:
# access_log_statsd_host = localhost
# access_log_statsd_port = 8125
# access_log_statsd_default_sample_rate = 1.0
# access_log_statsd_sample_rate_factor = 1.0
# access_log_statsd_metric_prefix =
# access_log_headers = false
#
# If access_log_headers is True and access_log_headers_only is set only
# these headers are logged. Multiple headers can be defined as comma separated
# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime
# access_log_headers_only =
#
# By default, the X-Auth-Token is logged. To obscure the value,
# set reveal_sensitive_prefix to the number of characters to log.
# For example, if set to 12, only the first 12 characters of the
# token appear in the log. An unauthorized access of the log file
# won't allow unauthorized usage of the token. However, the first
# 12 or so characters is unique enough that you can trace/debug
# token usage. Set to 0 to suppress the token completely (replaced
# by '...' in the log).
# Note: reveal_sensitive_prefix will not affect the value
# logged with access_log_headers=True.
# reveal_sensitive_prefix = 16
#
# What HTTP methods are allowed for StatsD logging (comma-sep); request methods
# not in this list will have "BAD_METHOD" for the <verb> portion of the metric.
# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS
#
# Note: The double proxy-logging in the pipeline is not a mistake. The
# left-most proxy-logging is there to log requests that were handled in
# middleware and never made it through to the right-most middleware (and
# proxy server). Double logging is prevented for normal requests. See
# proxy-logging docs.
# Note: Put before both ratelimit and auth in the pipeline.
[filter:bulk]
use = egg:swift#bulk
# max_containers_per_extraction = 10000
# max_failed_extractions = 1000
# max_deletes_per_request = 10000
# max_failed_deletes = 1000
# In order to keep a connection active during a potentially long bulk request,
# Swift may return whitespace prepended to the actual response body. This
# whitespace will be yielded no more than every yield_frequency seconds.
# yield_frequency = 10
# Note: The following parameter is used during a bulk delete of objects and
# their container. This would frequently fail because it is very likely
# that all replicated objects have not been deleted by the time the middleware got a
# successful response. It can be configured the number of retries. And the
# number of seconds to wait between each retry will be 1.5**retry
# delete_container_retry_count = 0
# Note: Put after auth in the pipeline.
[filter:container-quotas]
use = egg:swift#container_quotas
# Note: Put after auth and staticweb in the pipeline.
[filter:slo]
use = egg:swift#slo
# max_manifest_segments = 1000
# max_manifest_size = 2097152
# min_segment_size = 1048576
# Start rate-limiting SLO segment serving after the Nth segment of a
# segmented object.
# rate_limit_after_segment = 10
#
# Once segment rate-limiting kicks in for an object, limit segments served
# to N per second. 0 means no rate-limiting.
# rate_limit_segments_per_sec = 0
#
# Time limit on GET requests (seconds)
# max_get_time = 86400
# Note: Put after auth and staticweb in the pipeline.
# If you don't put it in the pipeline, it will be inserted for you.
[filter:dlo]
use = egg:swift#dlo
# Start rate-limiting DLO segment serving after the Nth segment of a
# segmented object.
# rate_limit_after_segment = 10
#
# Once segment rate-limiting kicks in for an object, limit segments served
# to N per second. 0 means no rate-limiting.
# rate_limit_segments_per_sec = 1
#
# Time limit on GET requests (seconds)
# max_get_time = 86400
[filter:account-quotas]
use = egg:swift#account_quotas
[filter:gatekeeper]
use = egg:swift#gatekeeper
# You can override the default log routing for this filter here:
# set log_name = gatekeeper
# set log_facility = LOG_LOCAL0
# set log_level = INFO
# set log_headers = false
# set log_address = /dev/log
[filter:container_sync]
use = egg:swift#container_sync
# Set this to false if you want to disallow any full url values to be set for
# any new X-Container-Sync-To headers. This will keep any new full urls from
# coming in, but won't change any existing values already in the cluster.
# Updating those will have to be done manually, as knowing what the true realm
# endpoint should be cannot always be guessed.
# allow_full_urls = true
# Set this to specify this clusters //realm/cluster as "current" in /info
# current = //REALM/CLUSTER
# Note: Put it at the beginning of the pipleline to profile all middleware. But
# it is safer to put this after catch_errors, gatekeeper and healthcheck.
[filter:xprofile]
use = egg:swift#xprofile
# This option enable you to switch profilers which should inherit from python
# standard profiler. Currently the supported value can be 'cProfile',
# 'eventlet.green.profile' etc.
# profile_module = eventlet.green.profile
#
# This prefix will be used to combine process ID and timestamp to name the
# profile data file. Make sure the executing user has permission to write
# into this path (missing path segments will be created, if necessary).
# If you enable profiling in more than one type of daemon, you must override
# it with an unique value like: /var/log/swift/profile/proxy.profile
# log_filename_prefix = /tmp/log/swift/profile/default.profile
#
# the profile data will be dumped to local disk based on above naming rule
# in this interval.
# dump_interval = 5.0
#
# Be careful, this option will enable profiler to dump data into the file with
# time stamp which means there will be lots of files piled up in the directory.
# dump_timestamp = false
#
# This is the path of the URL to access the mini web UI.
# path = /__profile__
#
# Clear the data when the wsgi server shutdown.
# flush_at_shutdown = false
#
# unwind the iterator of applications
# unwind = false

View File

@ -0,0 +1,30 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Get md5sum of local builder files
shell: cat /etc/swift/rings/*.builder 2>/dev/null | md5sum | cut -d " " -f1
register: md5sum
- name: Get empty md5sum
shell: echo -n | md5sum | cut -d " " -f1
register: empty_md5sum
# Fail if the remote hosts builder files is not empty AND
# does not match the md5sum of the local host.
- name: Check md5sum of builder files
fail:
msg: "The builder files on the remote host {{ item }} do not match the local host, and are not empty on the remote host"
when: ("{{ hostvars[item]['builder_md5sum'] }}" != "{{ empty_md5sum.stdout }}") and ("{{ hostvars[item]['builder_md5sum'] }}" != "{{ md5sum.stdout }}")
with_items: groups['swift_proxy'] + groups['swift_hosts']

View File

@ -0,0 +1,38 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- include: check_ring.yml
- name: "make sure scripts directory exists"
file: >
state=directory
path={{ item }}
owner=root
group=root
mode=0755
with_items:
- /etc/swift/rings
- /etc/swift/scripts
- name: "Copy the swift_rings.py file"
template: >
src=swift_rings.py
dest="/etc/swift/scripts/swift_rings.py"
mode=0700
- name: "build rings"
command: /usr/bin/python /etc/swift/scripts/swift_rings.py -s /etc/rpc_deploy/rpc_inventory.json
args:
chdir: /etc/swift/rings/

View File

@ -0,0 +1,334 @@
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from optparse import OptionParser
from os.path import exists
from swift.cli.ringbuilder import main as rb_main
import pickle
import sys
import threading
import json
USAGE = "usage: %prog -s <rpc_inventory.json>"
DEV_KEY = "%(ip)s:%(port)d/%(device)s"
DEFAULT_REPL = {{ swift_default_replication_number }}
DEFAULT_MIN_PART_HOURS = {{ swift_default_min_part_hours }}
DEFAULT_HOST_ZONE = {{ swift_default_host_zone }}
DEFAULT_HOST_WEIGHT = {{ swift_default_drive_weight }}
DEFAULT_ACCOUNT_PORT = {{ swift_account_port }}
DEFAULT_CONTAINER_PORT = {{ swift_container_port }}
DEFAULT_OBJECT_PORT = {{ swift_object_port }}
DEFAULT_SECTION_PORT = {
'account': DEFAULT_ACCOUNT_PORT,
'container': DEFAULT_CONTAINER_PORT,
'object': DEFAULT_OBJECT_PORT,
}
DEFAULT_GROUP_MAP = {
'account': 'account',
{% for policy in swift.storage_policies %}
{% if policy.policy.index == 0 %}
'object': '{{ policy.policy.name }}',
{% else %}
'object-{{ policy.policy.index}}': '{{ policy.policy.name }}',
{% endif %}
{% endfor %}
'container': 'container'
}
DEFAULT_GROUPS= [
'account',
{% for policy in swift.storage_policies %}
'{{ policy.policy.name }}',
{% endfor %}
'container'
]
class RingValidationError(Exception):
pass
def create_buildfile(build_file, part_power, repl, min_part_hours,
update=False, data=None, validate=False):
if update:
# build file exists, so lets just update the existing build file
if not data:
data = get_build_file_data(build_file)
if data is None:
data = {}
if repl != data.get('replicas') and not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_replicas", repl])
if min_part_hours != data.get('min_part_hours') and not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_min_part_hours", min_part_hours])
if part_power != data.get('part_power'):
raise RingValidationError('Part power cannot be changed! '
'you must rebuild the ring if you need '
'to change it.\nRing part power: %s '
'Inventory part power: %s'
%(data.get('part_power'), part_power))
elif not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file, "create",
part_power, repl, min_part_hours])
def change_host_weight(build_file, host_search_str, weight):
run_and_wait(rb_main, ["swift-ring-builder", build_file, "set_weight",
host_search_str, weight])
def remove_host_from_ring(build_file, host):
run_and_wait(rb_main, ["swift-ring-builder", build_file, "remove",
host])
def update_host_in_ring(build_file, new_host, old_host, validate=False):
if new_host.get('zone', 0) != old_host['zone']:
devstr = DEV_KEY % new_host
raise RingValidationError('Cannot update zone on %s, this can only be '
'done when the drive is added' % (devstr))
if new_host.get('region', 1) != old_host['region']:
devstr = DEV_KEY % new_host
raise RingValidationError('Cannot update region on %s, this can only '
'be done when the drive is added' % (devstr))
try:
r_ip = new_host.get('repl_ip', new_host['ip'])
r_port = new_host.get('repl_port', new_host['port'])
weight = new_host.get('weight', DEFAULT_HOST_WEIGHT)
if r_ip != old_host['replication_ip'] or \
r_port != old_host['replication_port']:
host_d = {'r_ip': r_ip, 'r_port': r_port}
host_d.update(new_host)
host_str = "%(ip)s:%(port)dR%(r_ip)s:%(r_port)d/%(name)s" % host_d
if not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file,
"set_info", DEV_KEY % new_host,
host_str])
except Exception as ex:
raise RingValidationError(ex)
if weight != old_host['weight'] and not validate:
change_host_weight(build_file, DEV_KEY % new_host, weight)
def add_host_to_ring(build_file, host, validate=False):
host_str = ""
try:
if host.get('region') is not None:
host_str += 'r%(region)d' % host
host_str += "z%d" % (host.get('zone', DEFAULT_HOST_ZONE))
host_str += "-%(ip)s:%(port)d" % host
if host.get('repl_port'):
r_ip = host.get('repl_ip', host['ip'])
host_str += "R%s:%d" % (r_ip, host['repl_port'])
host_str += "/%(name)s" % host
weight = host.get('weight', DEFAULT_HOST_WEIGHT)
except Exception as ex:
raise RingValidationError(ex)
if not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file, 'add',
host_str, str(weight)])
def run_and_wait(func, *args):
t = threading.Thread(target=func, args=args)
t.start()
return t.join()
def has_section(conf, section):
return True if conf.get(section) else False
def check_section(conf, section):
if not has_section(conf, section):
print("Section %s doesn't exist" % (section))
sys.exit(2)
def get_build_file_data(build_file):
build_file_data = None
if exists(build_file):
try:
with open(build_file) as bf_stream:
build_file_data = pickle.load(bf_stream)
except Exception as ex:
print("Error: failed to load build file '%s': %s" % (build_file,
ex))
build_file_data = None
return build_file_data
def build_ring(section, conf, part_power, hosts, validate=False):
# Create the build file
build_file = "%s.builder" % (section)
build_file_data = get_build_file_data(build_file)
repl = conf.get('repl_number', DEFAULT_REPL)
min_part_hours = conf.get('min_part_hours',
DEFAULT_MIN_PART_HOURS)
update = build_file_data is not None
create_buildfile(build_file, part_power, repl, min_part_hours, update,
data=build_file_data, validate=validate)
old_hosts = {}
if update:
for i, dev in enumerate(build_file_data['devs']):
if dev is not None:
old_hosts[DEV_KEY % dev] = i
section_key = section.split('-')[0]
service_port = conf.get('port', DEFAULT_SECTION_PORT[section_key])
for host in hosts:
host_vars = hosts[host]
host_vars['device'] = host_vars['name']
host_vars['port'] = service_port
host_vars['groups'] = host_vars.get('groups', DEFAULT_GROUPS)
if DEFAULT_GROUP_MAP[section] in host_vars['groups']:
host_key = DEV_KEY % host_vars
if host_key in old_hosts:
old_host = build_file_data['devs'][old_hosts[host_key]]
update_host_in_ring(build_file, host_vars, old_host,
validate=validate)
old_hosts.pop(host_key)
else:
add_host_to_ring(build_file, host_vars, validate=validate)
if old_hosts and not validate:
# There are still old hosts, these hosts must've been removed
for host in old_hosts:
remove_host_from_ring(build_file, host)
# Rebalance ring
if not validate:
run_and_wait(rb_main, ["swift-ring-builder", build_file, "rebalance"])
def main(setup):
# load the json file
try:
with open(setup) as json_stream:
_inventory = json.load(json_stream)
except Exception as ex:
print("Failed to load json string %s" % (ex))
return 1
_hosts = {}
if _inventory.get("swift_hosts"):
for host in _inventory['swift_hosts']['hosts']:
host_config = _inventory['_meta']['hostvars'][host]
host_vars = host_config['swift_vars']
host_ip = host_vars.get('ip', host_config['container_address'])
if not host_vars.get('drives'):
continue
host_drives = host_vars.get('drives')
for host_drive in host_drives:
host_drive['ip'] = host_drive.get('ip', host_ip)
if host_vars.get('groups'):
host_drive['groups'] = \
host_drive.get('groups', host_vars['groups'])
if host_vars.get('repl_ip'):
host_drive['repl_ip'] = \
host_drive.get('repl_ip', host_vars['repl_ip'])
if host_vars.get('repl_port'):
host_drive['repl_port'] = \
host_drive.get('repl_port', host_vars['repl_port'])
if host_vars.get('weight'):
host_drive['weight'] = \
host_drive.get('weight', host_vars['weight'])
key = "%s/%s" % (host_drive['ip'], host_drive['name'])
if key in _hosts:
print("%s already definined - duplicate device" % key)
return 1
_hosts[key] = host_drive
global_vars = _inventory['all']['vars']
check_section(global_vars, 'swift')
swift_vars = global_vars['swift']
if not swift_vars.get('part_power'):
print('No part_power specified - please set a part_power value')
return 1
part_power = swift_vars.get('part_power')
# If the repl_number or min_part hours are set on a "global" level in the
# conf lets set them here - otherwise use the overall default.
default_repl_num = swift_vars.get('repl_number', DEFAULT_REPL)
default_min_part_hours = swift_vars.get('min_part_hours',
DEFAULT_MIN_PART_HOURS)
ring_calls = []
# Create account ring - if the section is empty create an empty dict
# so defaults are used
if not has_section(swift_vars, 'account'):
swift_vars['account'] = {'repl_number': default_repl_num,
'min_part_hours': default_min_part_hours}
ring_calls.append(('account', swift_vars['account'], part_power))
# Create container ring - if the section is empty create an empty dict
# so defaults are used
if not has_section(swift_vars, 'container'):
swift_vars['container'] = {'repl_number': default_repl_num,
'min_part_hours': default_min_part_hours}
ring_calls.append(('container', swift_vars['container'], part_power))
# Create object rings (storage policies)
check_section(swift_vars, 'storage_policies')
indexes = set()
for policy in swift_vars['storage_policies']:
policy = policy['policy']
if policy['index'] in indexes:
print("Storage Policy index %d already in use" % (policy['index']))
return 4
if policy['index'] == 0:
buildfilename = 'object'
else:
buildfilename = 'object-%d' % (policy['index'])
indexes.add(policy['index'])
# Set default port/min_part_hours/repl_number
if 'min_part_hours' not in policy:
policy['min_part_hours'] = default_min_part_hours
if 'repl_number' not in policy:
policy['repl_number'] = default_repl_num
if 'port' not in policy:
policy['port'] = policy.get('port', DEFAULT_OBJECT_PORT)
ring_calls.append((buildfilename, policy, part_power))
# Now that we have gathered all the options for building/update the rings
# lets validate them
kargs = {'validate': True, 'hosts': _hosts}
for ring_call in ring_calls:
try:
build_ring(*ring_call, **kargs)
except RingValidationError as ex:
print(ex)
return 2
# If the validation passes lets go ahead and build the rings.
kargs.pop('validate')
for ring_call in ring_calls:
build_ring(*ring_call, **kargs)
if __name__ == "__main__":
parser = OptionParser(USAGE)
parser.add_option("-s", "--setup", dest="setup",
help="Specify the swift setup file.", metavar="FILE",
default="/etc/rpc_deploy/rpc_inventory.json")
options, args = parser.parse_args(sys.argv[1:])
if options.setup and not exists(options.setup):
print("Swift setup file not found or doesn't exist")
parser.print_help()
sys.exit(1)
sys.exit(main(options.setup))

View File

@ -0,0 +1,25 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: "Copy the rings over"
copy: >
src={{ item }}
dest=/etc/swift/
mode=0644
owner={{ system_user }}
group={{ system_group }}
with_fileglob:
- /etc/swift/rings/*.ring.gz
- /etc/swift/rings/*.builder

View File

@ -0,0 +1,22 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Get md5sum of builder files
shell: cat /etc/swift/*.builder 2>/dev/null | md5sum | cut -d " " -f1
register: md5sum
- name: Register a fact for the md5sum
set_fact:
builder_md5sum: "{{ md5sum.stdout }}"

View File

@ -0,0 +1,22 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: "Set ownership on mounted drives"
file:
dest: "{{ swift_vars.mount_point }}/{{ item.name }}"
owner: "{{ system_user }}"
group: "{{ system_group }}"
state: "directory"
with_items: swift_vars.drives

View File

@ -178,3 +178,8 @@ haproxy_config:
hap_backend_alg: source hap_backend_alg: source
hap_backend_options: hap_backend_options:
- "ssl-hello-chk" - "ssl-hello-chk"
- service:
hap_service_name: swift_proxy
hap_backend_nodes: "{{ groups['swift_proxy'] }}"
hap_port: 8888
hap_balance_type: http

View File

@ -0,0 +1,22 @@
---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
service_type: object-store
service_description: "Object Storage Service"
service_password: "{{ swift_service_password }}"
service_publicurl: "http://{{ external_vip_address }}:{{ swift_proxy_port }}/v1/AUTH_%(tenant_id)s"
service_adminurl: "http://{{ internal_vip_address }}:{{ swift_proxy_port }}/v1/AUTH_%(tenant_id)s"
service_internalurl: "http://{{ internal_vip_address }}:{{ swift_proxy_port }}/v1/AUTH_%(tenant_id)s"

View File

@ -21,6 +21,28 @@ repo_path: "{{ repo_package_name }}_{{ git_install_branch | replace('/', '_') }}
git_repo: https://github.com/openstack/swift git_repo: https://github.com/openstack/swift
git_fallback_repo: https://git.openstack.org/openstack/swift git_fallback_repo: https://git.openstack.org/openstack/swift
git_dest: "/opt/{{ repo_path }}" git_dest: "/opt/{{ repo_path }}"
git_install_branch: master git_etc_example: etc/
git_install_branch: stable/juno
service_pip_dependencies:
- pywbem
- ecdsa
- MySQL-python
- python-memcached
- pycrypto
- python-cinderclient
- python-keystoneclient
- keystonemiddleware
container_packages:
- curl
- python-pip
- rsync
- openssh-server
- git-core
- python-setuptools
- python-dev
- gcc
- libffi-dev
pip_wheel_name: swift pip_wheel_name: swift

View File

@ -32,5 +32,7 @@ pushd /root/ansible-lxc-rpc
# Openstack Service Setup # Openstack Service Setup
ansible-playbook -e @/etc/rpc_deploy/user_variables.yml playbooks/openstack/openstack-setup.yml ansible-playbook -e @/etc/rpc_deploy/user_variables.yml playbooks/openstack/openstack-setup.yml
ansible-playbook -e @/etc/rpc_deploy/user_variables.yml playbooks/openstack/swift-all.yml
popd popd
popd popd