Add templates, allow for automatic zone assignment.
This commit is contained in:
18
config.yaml
18
config.yaml
@@ -33,17 +33,19 @@ options:
|
||||
default: 1
|
||||
type: int
|
||||
description: Minimum hours between balances
|
||||
storage-zone-distribution:
|
||||
default: "service-unit"
|
||||
zone-assignment:
|
||||
default: "manual"
|
||||
type: string
|
||||
description: |
|
||||
Storage zone distribution policy that the charm will use when
|
||||
configuring and initializing the storage ring upon new swift-storage
|
||||
relations (see README). Options include:
|
||||
Which policy to use when assigning new storage nodes to zones.
|
||||
.
|
||||
service-unit - Storage zones configured per swift-storage service unit.
|
||||
machine-unit - Storage zones configured per swift-storage machine-unit.
|
||||
manual - Storage zones configured manually per swift-storage service.
|
||||
manual - Allow swift-storage services to request zone membership.
|
||||
auto - Assign new swift-storage units to zones automatically.
|
||||
.
|
||||
The configured replica minimum must be met by an equal number of storage
|
||||
zones before the storage ring will be initially balance. Deployment
|
||||
requirements differ based on the zone-assignment policy configured, see
|
||||
this charm's README for details.
|
||||
# CA Cert info
|
||||
use-https:
|
||||
default: "yes"
|
||||
|
||||
@@ -115,9 +115,10 @@ def proxy_changed():
|
||||
account_port = utils.config_get('account-ring-port')
|
||||
object_port = utils.config_get('object-ring-port')
|
||||
container_port = utils.config_get('container-ring-port')
|
||||
zone = swift.get_zone(utils.config_get('zone-assignment'))
|
||||
node_settings = {
|
||||
'ip': utils.get_host_ip(utils.relation_get('private-address')),
|
||||
'zone': utils.relation_get('zone'),
|
||||
'zone': zone,
|
||||
'account_port': utils.relation_get('account_port'),
|
||||
'object_port': utils.relation_get('object_port'),
|
||||
'container_port': utils.relation_get('container_port'),
|
||||
|
||||
@@ -95,6 +95,7 @@ def render_config(config_file, context):
|
||||
# load os release-specific templates.
|
||||
cfile = os.path.basename(config_file)
|
||||
templates_dir = os.path.join(utils.TEMPLATES_DIR, os_release)
|
||||
context['os_release'] = os_release
|
||||
return utils.render_template(cfile, context, templates_dir)
|
||||
|
||||
|
||||
@@ -157,6 +158,8 @@ def get_keystone_auth():
|
||||
|
||||
|
||||
def write_proxy_config():
|
||||
|
||||
bind_port = utils.config_get('bind-port')
|
||||
workers = utils.config_get('workers')
|
||||
if workers == '0':
|
||||
import multiprocessing
|
||||
@@ -164,7 +167,7 @@ def write_proxy_config():
|
||||
|
||||
ctxt = {
|
||||
'proxy_ip': utils.get_host_ip(),
|
||||
'bind_port': utils.config_get('bind-port'),
|
||||
'bind_port': bind_port,
|
||||
'workers': workers,
|
||||
'operator_roles': utils.config_get('operator-roles')
|
||||
}
|
||||
@@ -179,12 +182,14 @@ def write_proxy_config():
|
||||
ks_auth = get_keystone_auth()
|
||||
if ks_auth:
|
||||
utils.juju_log('INFO', 'Enabling Keystone authentication.')
|
||||
ctxt = (ctxt.items() + ks_auth.items())
|
||||
for k, v in ks_auth.iteritems():
|
||||
ctxt[k] = v
|
||||
|
||||
with open(SWIFT_PROXY_CONF, 'w') as conf:
|
||||
conf.write(render_config(SWIFT_PROXY_CONF, ctxt))
|
||||
|
||||
proxy_control('restart')
|
||||
subprocess.check_call(['open-port', bind_port])
|
||||
|
||||
def configure_ssl():
|
||||
# this should be expanded to cover setting up user-specified certificates
|
||||
@@ -247,8 +252,8 @@ def exists_in_ring(ring_path, node):
|
||||
node['port'] = ring_port(ring_path, node)
|
||||
|
||||
for dev in ring['devs']:
|
||||
d = [(i, dev[i]) for i in dev if i in node]
|
||||
n = [(i, node[i]) for i in node if i in dev]
|
||||
d = [(i, dev[i]) for i in dev if i in node and i != 'zone']
|
||||
n = [(i, node[i]) for i in node if i in dev and i != 'zone']
|
||||
if sorted(d) == sorted(n):
|
||||
|
||||
msg = 'Node already exists in ring (%s).' % ring_path
|
||||
@@ -284,18 +289,50 @@ def add_to_ring(ring_path, node):
|
||||
utils.juju_log('INFO', msg)
|
||||
|
||||
|
||||
def determine_zone(policy):
|
||||
'''Determine which storage zone a specific machine unit belongs to based
|
||||
on configured storage-zone-distrbution policy.'''
|
||||
if policy == 'service-unit':
|
||||
this_relid = os.getenv('JUJU_RELATION_ID')
|
||||
relids = utils.relation_ids('swift-proxy')
|
||||
zone = (relids.index(this_relid) + 1)
|
||||
elif policy == 'machine-unit':
|
||||
pass
|
||||
elif policy == 'manual':
|
||||
zone = utils.relation_get('zone')
|
||||
return zone
|
||||
def _get_zone(ring_builder):
|
||||
replicas = ring_builder.replicas
|
||||
zones = [d['zone'] for d in ring_builder.devs]
|
||||
if not zones:
|
||||
return 1
|
||||
if len(zones) < replicas:
|
||||
return sorted(zones).pop() + 1
|
||||
|
||||
zone_distrib = {}
|
||||
for z in zones:
|
||||
zone_distrib[z] = zone_distrib.get(z, 0) + 1
|
||||
|
||||
if len(set([total for total in zone_distrib.itervalues()])) == 1:
|
||||
# all zones are equal, start assigning to zone 1 again.
|
||||
return 1
|
||||
|
||||
return sorted(zone_distrib, key=zone_distrib.get).pop(0)
|
||||
|
||||
|
||||
def get_zone(assignment_policy):
|
||||
''' Determine the appropriate zone depending on configured assignment
|
||||
policy.
|
||||
|
||||
Manual assignment relies on each storage zone being deployed as a
|
||||
separate service unit with its desired zone set as a configuration
|
||||
option.
|
||||
|
||||
Auto assignment distributes swift-storage machine units across a number
|
||||
of zones equal to the configured minimum replicas. This allows for a
|
||||
single swift-storage service unit, with each 'add-unit'd machine unit
|
||||
being assigned to a different zone.
|
||||
'''
|
||||
if assignment_policy == 'manual':
|
||||
return utils.relation_get('zone')
|
||||
elif assignment_policy == 'auto':
|
||||
potential_zones = []
|
||||
for ring in SWIFT_RINGS.itervalues():
|
||||
builder = _load_builder(ring)
|
||||
potential_zones.append(_get_zone(builder))
|
||||
return set(potential_zones).pop()
|
||||
else:
|
||||
utils.juju_log('Invalid zone assignment policy: %s' %\
|
||||
assignemnt_policy)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def balance_ring(ring_path):
|
||||
|
||||
47
hooks/templates/essex/memcached.conf
Normal file
47
hooks/templates/essex/memcached.conf
Normal file
@@ -0,0 +1,47 @@
|
||||
# memcached default config file
|
||||
# 2003 - Jay Bonci <jaybonci@debian.org>
|
||||
# This configuration file is read by the start-memcached script provided as
|
||||
# part of the Debian GNU/Linux distribution.
|
||||
|
||||
# Run memcached as a daemon. This command is implied, and is not needed for the
|
||||
# daemon to run. See the README.Debian that comes with this package for more
|
||||
# information.
|
||||
-d
|
||||
|
||||
# Log memcached's output to /var/log/memcached
|
||||
logfile /var/log/memcached.log
|
||||
|
||||
# Be verbose
|
||||
# -v
|
||||
|
||||
# Be even more verbose (print client commands as well)
|
||||
# -vv
|
||||
|
||||
# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default
|
||||
# Note that the daemon will grow to this size, but does not start out holding this much
|
||||
# memory
|
||||
-m 64
|
||||
|
||||
# Default connection port is 11211
|
||||
-p 11211
|
||||
|
||||
# Run the daemon as root. The start-memcached will default to running as root if no
|
||||
# -u command is present in this config file
|
||||
-u memcache
|
||||
|
||||
# Specify which IP address to listen on. The default is to listen on all IP addresses
|
||||
# This parameter is one of the only security measures that memcached has, so make sure
|
||||
# it's listening on a firewalled interface.
|
||||
-l {{ proxy_ip }}
|
||||
|
||||
# Limit the number of simultaneous incoming connections. The daemon default is 1024
|
||||
# -c 1024
|
||||
|
||||
# Lock down all paged memory. Consult with the README and homepage before you do this
|
||||
# -k
|
||||
|
||||
# Return error when memory is exhausted (rather than removing items)
|
||||
# -M
|
||||
|
||||
# Maximize core file limit
|
||||
# -r
|
||||
64
hooks/templates/essex/proxy-server.conf
Normal file
64
hooks/templates/essex/proxy-server.conf
Normal file
@@ -0,0 +1,64 @@
|
||||
[DEFAULT]
|
||||
bind_port = {{ bind_port }}
|
||||
workers = {{ workers }}
|
||||
user = swift
|
||||
{% if ssl %}
|
||||
cert_file = {{ ssl_cert }}
|
||||
key_file = {{ ssl_key }}
|
||||
{% endif %}
|
||||
|
||||
{% if auth_type == 'keystone' %}
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache swift3 s3token authtoken keystone proxy-server
|
||||
{% else %}
|
||||
[pipeline:main]
|
||||
pipeline = healthcheck cache tempauth proxy-server
|
||||
{% endif %}
|
||||
|
||||
[app:proxy-server]
|
||||
use = egg:swift#proxy
|
||||
allow_account_management = true
|
||||
{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
|
||||
|
||||
[filter:tempauth]
|
||||
use = egg:swift#tempauth
|
||||
user_system_root = testpass .admin https://{{ proxy_ip }}:8080/v1/AUTH_system
|
||||
|
||||
[filter:healthcheck]
|
||||
use = egg:swift#healthcheck
|
||||
|
||||
[filter:cache]
|
||||
use = egg:swift#memcache
|
||||
memcache_servers = {{ proxy_ip }}:11211
|
||||
|
||||
{% if auth_type == 'keystone' %}
|
||||
[filter:keystone]
|
||||
paste.filter_factory = keystone.middleware.swift_auth:filter_factory
|
||||
operator_roles = {{ operator_roles }}
|
||||
|
||||
[filter:authtoken]
|
||||
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||
auth_host = {{ keystone_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
auth_uri = {{ auth_protocol }}://{{ keystone_host }}:{{ service_port }}
|
||||
admin_tenant_name = {{ service_tenant }}
|
||||
admin_user = {{ service_user }}
|
||||
admin_password = {{ service_password }}
|
||||
{% if os_release != 'essex' %}signing_dir = /etc/swift{% endif %}
|
||||
|
||||
|
||||
[filter:s3token]
|
||||
paste.filter_factory = keystone.middleware.s3_token:filter_factory
|
||||
service_host = {{ keystone_host }}
|
||||
service_port = {{ service_port }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_host = {{ keystone_host }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
auth_token = {{ admin_token }}
|
||||
admin_token = {{ admin_token }}
|
||||
|
||||
[filter:swift3]
|
||||
{% if os_release == 'essex' %}use = egg:swift#swift3{% else %}use = egg:swift3#swift3
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
7
hooks/templates/essex/swift-rings
Normal file
7
hooks/templates/essex/swift-rings
Normal file
@@ -0,0 +1,7 @@
|
||||
<Directory {{ www_dir }}>
|
||||
Order deny,allow
|
||||
{% for host in allowed_hosts %}
|
||||
Allow from {{ host }}
|
||||
{% endfor %}
|
||||
Deny from all
|
||||
</Directory>
|
||||
4
hooks/templates/essex/swift.conf
Normal file
4
hooks/templates/essex/swift.conf
Normal file
@@ -0,0 +1,4 @@
|
||||
[swift-hash]
|
||||
# random unique string that can never change (DO NOT LOSE)
|
||||
swift_hash_path_suffix = {{ swift_hash }}
|
||||
|
||||
1
hooks/templates/folsom/memcached.conf
Symbolic link
1
hooks/templates/folsom/memcached.conf
Symbolic link
@@ -0,0 +1 @@
|
||||
../essex/memcached.conf
|
||||
1
hooks/templates/folsom/proxy-server.conf
Symbolic link
1
hooks/templates/folsom/proxy-server.conf
Symbolic link
@@ -0,0 +1 @@
|
||||
../essex/proxy-server.conf
|
||||
1
hooks/templates/folsom/swift-rings
Symbolic link
1
hooks/templates/folsom/swift-rings
Symbolic link
@@ -0,0 +1 @@
|
||||
../essex/swift-rings
|
||||
1
hooks/templates/folsom/swift.conf
Symbolic link
1
hooks/templates/folsom/swift.conf
Symbolic link
@@ -0,0 +1 @@
|
||||
../essex/swift.conf
|
||||
@@ -34,7 +34,7 @@ def install(*pkgs):
|
||||
cmd.append(pkg)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
TEMPLATES_DIR = 'templates'
|
||||
TEMPLATES_DIR = 'hooks/templates'
|
||||
|
||||
try:
|
||||
import jinja2
|
||||
|
||||
Reference in New Issue
Block a user