Add templates, allow for automatic zone assignment.
This commit is contained in:
18
config.yaml
18
config.yaml
@@ -33,17 +33,19 @@ options:
|
|||||||
default: 1
|
default: 1
|
||||||
type: int
|
type: int
|
||||||
description: Minimum hours between balances
|
description: Minimum hours between balances
|
||||||
storage-zone-distribution:
|
zone-assignment:
|
||||||
default: "service-unit"
|
default: "manual"
|
||||||
type: string
|
type: string
|
||||||
description: |
|
description: |
|
||||||
Storage zone distribution policy that the charm will use when
|
Which policy to use when assigning new storage nodes to zones.
|
||||||
configuring and initializing the storage ring upon new swift-storage
|
|
||||||
relations (see README). Options include:
|
|
||||||
.
|
.
|
||||||
service-unit - Storage zones configured per swift-storage service unit.
|
manual - Allow swift-storage services to request zone membership.
|
||||||
machine-unit - Storage zones configured per swift-storage machine-unit.
|
auto - Assign new swift-storage units to zones automatically.
|
||||||
manual - Storage zones configured manually per swift-storage service.
|
.
|
||||||
|
The configured replica minimum must be met by an equal number of storage
|
||||||
|
zones before the storage ring will be initially balance. Deployment
|
||||||
|
requirements differ based on the zone-assignment policy configured, see
|
||||||
|
this charm's README for details.
|
||||||
# CA Cert info
|
# CA Cert info
|
||||||
use-https:
|
use-https:
|
||||||
default: "yes"
|
default: "yes"
|
||||||
|
|||||||
@@ -115,9 +115,10 @@ def proxy_changed():
|
|||||||
account_port = utils.config_get('account-ring-port')
|
account_port = utils.config_get('account-ring-port')
|
||||||
object_port = utils.config_get('object-ring-port')
|
object_port = utils.config_get('object-ring-port')
|
||||||
container_port = utils.config_get('container-ring-port')
|
container_port = utils.config_get('container-ring-port')
|
||||||
|
zone = swift.get_zone(utils.config_get('zone-assignment'))
|
||||||
node_settings = {
|
node_settings = {
|
||||||
'ip': utils.get_host_ip(utils.relation_get('private-address')),
|
'ip': utils.get_host_ip(utils.relation_get('private-address')),
|
||||||
'zone': utils.relation_get('zone'),
|
'zone': zone,
|
||||||
'account_port': utils.relation_get('account_port'),
|
'account_port': utils.relation_get('account_port'),
|
||||||
'object_port': utils.relation_get('object_port'),
|
'object_port': utils.relation_get('object_port'),
|
||||||
'container_port': utils.relation_get('container_port'),
|
'container_port': utils.relation_get('container_port'),
|
||||||
|
|||||||
@@ -95,6 +95,7 @@ def render_config(config_file, context):
|
|||||||
# load os release-specific templates.
|
# load os release-specific templates.
|
||||||
cfile = os.path.basename(config_file)
|
cfile = os.path.basename(config_file)
|
||||||
templates_dir = os.path.join(utils.TEMPLATES_DIR, os_release)
|
templates_dir = os.path.join(utils.TEMPLATES_DIR, os_release)
|
||||||
|
context['os_release'] = os_release
|
||||||
return utils.render_template(cfile, context, templates_dir)
|
return utils.render_template(cfile, context, templates_dir)
|
||||||
|
|
||||||
|
|
||||||
@@ -157,6 +158,8 @@ def get_keystone_auth():
|
|||||||
|
|
||||||
|
|
||||||
def write_proxy_config():
|
def write_proxy_config():
|
||||||
|
|
||||||
|
bind_port = utils.config_get('bind-port')
|
||||||
workers = utils.config_get('workers')
|
workers = utils.config_get('workers')
|
||||||
if workers == '0':
|
if workers == '0':
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
@@ -164,7 +167,7 @@ def write_proxy_config():
|
|||||||
|
|
||||||
ctxt = {
|
ctxt = {
|
||||||
'proxy_ip': utils.get_host_ip(),
|
'proxy_ip': utils.get_host_ip(),
|
||||||
'bind_port': utils.config_get('bind-port'),
|
'bind_port': bind_port,
|
||||||
'workers': workers,
|
'workers': workers,
|
||||||
'operator_roles': utils.config_get('operator-roles')
|
'operator_roles': utils.config_get('operator-roles')
|
||||||
}
|
}
|
||||||
@@ -179,12 +182,14 @@ def write_proxy_config():
|
|||||||
ks_auth = get_keystone_auth()
|
ks_auth = get_keystone_auth()
|
||||||
if ks_auth:
|
if ks_auth:
|
||||||
utils.juju_log('INFO', 'Enabling Keystone authentication.')
|
utils.juju_log('INFO', 'Enabling Keystone authentication.')
|
||||||
ctxt = (ctxt.items() + ks_auth.items())
|
for k, v in ks_auth.iteritems():
|
||||||
|
ctxt[k] = v
|
||||||
|
|
||||||
with open(SWIFT_PROXY_CONF, 'w') as conf:
|
with open(SWIFT_PROXY_CONF, 'w') as conf:
|
||||||
conf.write(render_config(SWIFT_PROXY_CONF, ctxt))
|
conf.write(render_config(SWIFT_PROXY_CONF, ctxt))
|
||||||
|
|
||||||
proxy_control('restart')
|
proxy_control('restart')
|
||||||
|
subprocess.check_call(['open-port', bind_port])
|
||||||
|
|
||||||
def configure_ssl():
|
def configure_ssl():
|
||||||
# this should be expanded to cover setting up user-specified certificates
|
# this should be expanded to cover setting up user-specified certificates
|
||||||
@@ -247,8 +252,8 @@ def exists_in_ring(ring_path, node):
|
|||||||
node['port'] = ring_port(ring_path, node)
|
node['port'] = ring_port(ring_path, node)
|
||||||
|
|
||||||
for dev in ring['devs']:
|
for dev in ring['devs']:
|
||||||
d = [(i, dev[i]) for i in dev if i in node]
|
d = [(i, dev[i]) for i in dev if i in node and i != 'zone']
|
||||||
n = [(i, node[i]) for i in node if i in dev]
|
n = [(i, node[i]) for i in node if i in dev and i != 'zone']
|
||||||
if sorted(d) == sorted(n):
|
if sorted(d) == sorted(n):
|
||||||
|
|
||||||
msg = 'Node already exists in ring (%s).' % ring_path
|
msg = 'Node already exists in ring (%s).' % ring_path
|
||||||
@@ -284,18 +289,50 @@ def add_to_ring(ring_path, node):
|
|||||||
utils.juju_log('INFO', msg)
|
utils.juju_log('INFO', msg)
|
||||||
|
|
||||||
|
|
||||||
def determine_zone(policy):
|
def _get_zone(ring_builder):
|
||||||
'''Determine which storage zone a specific machine unit belongs to based
|
replicas = ring_builder.replicas
|
||||||
on configured storage-zone-distrbution policy.'''
|
zones = [d['zone'] for d in ring_builder.devs]
|
||||||
if policy == 'service-unit':
|
if not zones:
|
||||||
this_relid = os.getenv('JUJU_RELATION_ID')
|
return 1
|
||||||
relids = utils.relation_ids('swift-proxy')
|
if len(zones) < replicas:
|
||||||
zone = (relids.index(this_relid) + 1)
|
return sorted(zones).pop() + 1
|
||||||
elif policy == 'machine-unit':
|
|
||||||
pass
|
zone_distrib = {}
|
||||||
elif policy == 'manual':
|
for z in zones:
|
||||||
zone = utils.relation_get('zone')
|
zone_distrib[z] = zone_distrib.get(z, 0) + 1
|
||||||
return zone
|
|
||||||
|
if len(set([total for total in zone_distrib.itervalues()])) == 1:
|
||||||
|
# all zones are equal, start assigning to zone 1 again.
|
||||||
|
return 1
|
||||||
|
|
||||||
|
return sorted(zone_distrib, key=zone_distrib.get).pop(0)
|
||||||
|
|
||||||
|
|
||||||
|
def get_zone(assignment_policy):
|
||||||
|
''' Determine the appropriate zone depending on configured assignment
|
||||||
|
policy.
|
||||||
|
|
||||||
|
Manual assignment relies on each storage zone being deployed as a
|
||||||
|
separate service unit with its desired zone set as a configuration
|
||||||
|
option.
|
||||||
|
|
||||||
|
Auto assignment distributes swift-storage machine units across a number
|
||||||
|
of zones equal to the configured minimum replicas. This allows for a
|
||||||
|
single swift-storage service unit, with each 'add-unit'd machine unit
|
||||||
|
being assigned to a different zone.
|
||||||
|
'''
|
||||||
|
if assignment_policy == 'manual':
|
||||||
|
return utils.relation_get('zone')
|
||||||
|
elif assignment_policy == 'auto':
|
||||||
|
potential_zones = []
|
||||||
|
for ring in SWIFT_RINGS.itervalues():
|
||||||
|
builder = _load_builder(ring)
|
||||||
|
potential_zones.append(_get_zone(builder))
|
||||||
|
return set(potential_zones).pop()
|
||||||
|
else:
|
||||||
|
utils.juju_log('Invalid zone assignment policy: %s' %\
|
||||||
|
assignemnt_policy)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def balance_ring(ring_path):
|
def balance_ring(ring_path):
|
||||||
|
|||||||
47
hooks/templates/essex/memcached.conf
Normal file
47
hooks/templates/essex/memcached.conf
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
# memcached default config file
|
||||||
|
# 2003 - Jay Bonci <jaybonci@debian.org>
|
||||||
|
# This configuration file is read by the start-memcached script provided as
|
||||||
|
# part of the Debian GNU/Linux distribution.
|
||||||
|
|
||||||
|
# Run memcached as a daemon. This command is implied, and is not needed for the
|
||||||
|
# daemon to run. See the README.Debian that comes with this package for more
|
||||||
|
# information.
|
||||||
|
-d
|
||||||
|
|
||||||
|
# Log memcached's output to /var/log/memcached
|
||||||
|
logfile /var/log/memcached.log
|
||||||
|
|
||||||
|
# Be verbose
|
||||||
|
# -v
|
||||||
|
|
||||||
|
# Be even more verbose (print client commands as well)
|
||||||
|
# -vv
|
||||||
|
|
||||||
|
# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default
|
||||||
|
# Note that the daemon will grow to this size, but does not start out holding this much
|
||||||
|
# memory
|
||||||
|
-m 64
|
||||||
|
|
||||||
|
# Default connection port is 11211
|
||||||
|
-p 11211
|
||||||
|
|
||||||
|
# Run the daemon as root. The start-memcached will default to running as root if no
|
||||||
|
# -u command is present in this config file
|
||||||
|
-u memcache
|
||||||
|
|
||||||
|
# Specify which IP address to listen on. The default is to listen on all IP addresses
|
||||||
|
# This parameter is one of the only security measures that memcached has, so make sure
|
||||||
|
# it's listening on a firewalled interface.
|
||||||
|
-l {{ proxy_ip }}
|
||||||
|
|
||||||
|
# Limit the number of simultaneous incoming connections. The daemon default is 1024
|
||||||
|
# -c 1024
|
||||||
|
|
||||||
|
# Lock down all paged memory. Consult with the README and homepage before you do this
|
||||||
|
# -k
|
||||||
|
|
||||||
|
# Return error when memory is exhausted (rather than removing items)
|
||||||
|
# -M
|
||||||
|
|
||||||
|
# Maximize core file limit
|
||||||
|
# -r
|
||||||
64
hooks/templates/essex/proxy-server.conf
Normal file
64
hooks/templates/essex/proxy-server.conf
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
bind_port = {{ bind_port }}
|
||||||
|
workers = {{ workers }}
|
||||||
|
user = swift
|
||||||
|
{% if ssl %}
|
||||||
|
cert_file = {{ ssl_cert }}
|
||||||
|
key_file = {{ ssl_key }}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if auth_type == 'keystone' %}
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = healthcheck cache swift3 s3token authtoken keystone proxy-server
|
||||||
|
{% else %}
|
||||||
|
[pipeline:main]
|
||||||
|
pipeline = healthcheck cache tempauth proxy-server
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
[app:proxy-server]
|
||||||
|
use = egg:swift#proxy
|
||||||
|
allow_account_management = true
|
||||||
|
{% if auth_type == 'keystone' %}account_autocreate = true{% endif %}
|
||||||
|
|
||||||
|
[filter:tempauth]
|
||||||
|
use = egg:swift#tempauth
|
||||||
|
user_system_root = testpass .admin https://{{ proxy_ip }}:8080/v1/AUTH_system
|
||||||
|
|
||||||
|
[filter:healthcheck]
|
||||||
|
use = egg:swift#healthcheck
|
||||||
|
|
||||||
|
[filter:cache]
|
||||||
|
use = egg:swift#memcache
|
||||||
|
memcache_servers = {{ proxy_ip }}:11211
|
||||||
|
|
||||||
|
{% if auth_type == 'keystone' %}
|
||||||
|
[filter:keystone]
|
||||||
|
paste.filter_factory = keystone.middleware.swift_auth:filter_factory
|
||||||
|
operator_roles = {{ operator_roles }}
|
||||||
|
|
||||||
|
[filter:authtoken]
|
||||||
|
paste.filter_factory = keystone.middleware.auth_token:filter_factory
|
||||||
|
auth_host = {{ keystone_host }}
|
||||||
|
auth_port = {{ auth_port }}
|
||||||
|
auth_protocol = {{ auth_protocol }}
|
||||||
|
auth_uri = {{ auth_protocol }}://{{ keystone_host }}:{{ service_port }}
|
||||||
|
admin_tenant_name = {{ service_tenant }}
|
||||||
|
admin_user = {{ service_user }}
|
||||||
|
admin_password = {{ service_password }}
|
||||||
|
{% if os_release != 'essex' %}signing_dir = /etc/swift{% endif %}
|
||||||
|
|
||||||
|
|
||||||
|
[filter:s3token]
|
||||||
|
paste.filter_factory = keystone.middleware.s3_token:filter_factory
|
||||||
|
service_host = {{ keystone_host }}
|
||||||
|
service_port = {{ service_port }}
|
||||||
|
auth_port = {{ auth_port }}
|
||||||
|
auth_host = {{ keystone_host }}
|
||||||
|
auth_protocol = {{ auth_protocol }}
|
||||||
|
auth_token = {{ admin_token }}
|
||||||
|
admin_token = {{ admin_token }}
|
||||||
|
|
||||||
|
[filter:swift3]
|
||||||
|
{% if os_release == 'essex' %}use = egg:swift#swift3{% else %}use = egg:swift3#swift3
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
7
hooks/templates/essex/swift-rings
Normal file
7
hooks/templates/essex/swift-rings
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
<Directory {{ www_dir }}>
|
||||||
|
Order deny,allow
|
||||||
|
{% for host in allowed_hosts %}
|
||||||
|
Allow from {{ host }}
|
||||||
|
{% endfor %}
|
||||||
|
Deny from all
|
||||||
|
</Directory>
|
||||||
4
hooks/templates/essex/swift.conf
Normal file
4
hooks/templates/essex/swift.conf
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
[swift-hash]
|
||||||
|
# random unique string that can never change (DO NOT LOSE)
|
||||||
|
swift_hash_path_suffix = {{ swift_hash }}
|
||||||
|
|
||||||
1
hooks/templates/folsom/memcached.conf
Symbolic link
1
hooks/templates/folsom/memcached.conf
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../essex/memcached.conf
|
||||||
1
hooks/templates/folsom/proxy-server.conf
Symbolic link
1
hooks/templates/folsom/proxy-server.conf
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../essex/proxy-server.conf
|
||||||
1
hooks/templates/folsom/swift-rings
Symbolic link
1
hooks/templates/folsom/swift-rings
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../essex/swift-rings
|
||||||
1
hooks/templates/folsom/swift.conf
Symbolic link
1
hooks/templates/folsom/swift.conf
Symbolic link
@@ -0,0 +1 @@
|
|||||||
|
../essex/swift.conf
|
||||||
@@ -34,7 +34,7 @@ def install(*pkgs):
|
|||||||
cmd.append(pkg)
|
cmd.append(pkg)
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
TEMPLATES_DIR = 'templates'
|
TEMPLATES_DIR = 'hooks/templates'
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|||||||
Reference in New Issue
Block a user