743 lines
26 KiB
Python
Raw Normal View History

2012-12-12 09:18:54 -08:00
#!/usr/bin/python
#
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2012-12-12 09:18:54 -08:00
import os
import sys
import time
from subprocess import (
check_call,
CalledProcessError,
)
2013-09-27 13:02:37 +01:00
from lib.swift_utils import (
2014-12-04 18:41:32 +00:00
SwiftProxyCharmException,
2013-09-27 13:02:37 +01:00
register_configs,
restart_map,
services,
2013-09-27 13:02:37 +01:00
determine_packages,
ensure_swift_dir,
SWIFT_RINGS,
get_www_dir,
2013-09-27 13:02:37 +01:00
initialize_ring,
SWIFT_HA_RES,
get_zone,
do_openstack_upgrade,
setup_ipv6,
2014-12-09 15:18:08 +00:00
update_rings,
balance_rings,
2014-12-09 22:57:39 +00:00
fully_synced,
sync_proxy_rings,
2014-12-02 17:08:55 +00:00
broadcast_rings_available,
mark_www_rings_deleted,
2014-12-04 18:41:32 +00:00
SwiftProxyClusterRPC,
2014-12-04 20:58:15 +00:00
get_first_available_value,
all_responses_equal,
2014-12-09 21:57:27 +00:00
ensure_www_dir_permissions,
sync_builders_and_rings_if_changed,
cluster_sync_rings,
is_most_recent_timestamp,
timestamps_available,
2015-10-08 01:24:59 +00:00
assess_status,
try_initialize_swauth,
2013-09-27 13:02:37 +01:00
)
import charmhelpers.contrib.openstack.utils as openstack
from charmhelpers.contrib.openstack.ha.utils import (
update_dns_ha_resource_params,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config,
is_elected_leader,
)
2013-09-27 13:02:37 +01:00
from charmhelpers.core.hookenv import (
config,
2014-12-05 13:21:52 +00:00
local_unit,
2014-12-17 12:42:17 +00:00
remote_unit,
2013-09-27 13:02:37 +01:00
unit_get,
relation_set,
relation_ids,
relation_get,
related_units,
2014-10-20 13:43:39 +01:00
log,
DEBUG,
2014-10-21 12:45:36 +01:00
INFO,
2014-10-20 13:43:39 +01:00
WARNING,
2013-09-27 13:02:37 +01:00
Hooks, UnregisteredHookError,
open_port,
2015-10-08 01:24:59 +00:00
status_set,
2013-09-27 13:02:37 +01:00
)
from charmhelpers.core.host import (
2015-03-30 18:26:29 +01:00
service_reload,
2013-09-27 13:02:37 +01:00
service_restart,
service_stop,
service_start,
2013-09-27 13:02:37 +01:00
)
from charmhelpers.fetch import (
apt_install,
apt_update,
2013-09-27 13:02:37 +01:00
)
2013-09-27 17:11:20 +01:00
from charmhelpers.payload.execd import execd_preinstall
from charmhelpers.contrib.openstack.ip import (
canonical_url,
PUBLIC,
INTERNAL,
ADMIN,
)
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address,
2014-09-30 15:48:30 +08:00
get_address_in_network,
2014-09-20 23:49:25 +08:00
get_ipv6_addr,
is_ipv6,
2014-09-30 15:48:30 +08:00
format_ipv6_addr,
)
from charmhelpers.contrib.openstack.context import ADDRESS_TYPES
2015-01-12 12:04:01 +00:00
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.hardening.harden import harden
2014-10-29 22:30:36 -05:00
2013-02-27 16:07:46 +00:00
extra_pkgs = [
"haproxy",
"python-jinja2"
2013-09-27 13:02:37 +01:00
]
hooks = Hooks()
CONFIGS = register_configs()
restart_on_change = openstack.pausable_restart_on_change
2013-09-27 13:02:37 +01:00
@hooks.hook('install.real')
@harden()
2012-12-12 09:18:54 -08:00
def install():
2015-10-08 01:24:59 +00:00
status_set('maintenance', 'Executing pre-install')
2013-09-27 17:11:20 +01:00
execd_preinstall()
2013-09-27 13:02:37 +01:00
src = config('openstack-origin')
2012-12-12 09:18:54 -08:00
if src != 'distro':
openstack.configure_installation_source(src)
2015-10-08 01:24:59 +00:00
status_set('maintenance', 'Installing apt packages')
2013-09-27 13:02:37 +01:00
apt_update(fatal=True)
2012-12-12 09:18:54 -08:00
rel = openstack.get_os_codename_install_source(src)
2013-09-27 13:02:37 +01:00
pkgs = determine_packages(rel)
apt_install(pkgs, fatal=True)
apt_install(extra_pkgs, fatal=True)
ensure_swift_dir()
2012-12-12 09:18:54 -08:00
# configure a directory on webserver for distributing rings.
2014-12-09 21:57:27 +00:00
ensure_www_dir_permissions(get_www_dir())
2012-12-12 09:18:54 -08:00
@hooks.hook('config-changed')
@restart_on_change(restart_map())
@harden()
def config_changed():
if is_elected_leader(SWIFT_HA_RES):
log("Leader established, generating ring builders", level=INFO)
# initialize new storage rings.
for path in SWIFT_RINGS.itervalues():
if not os.path.exists(path):
initialize_ring(path,
config('partition-power'),
config('replicas'),
config('min-hours'))
if config('prefer-ipv6'):
2015-10-08 01:24:59 +00:00
status_set('maintenance', 'Configuring ipv6')
setup_ipv6()
configure_https()
open_port(config('bind-port'))
2015-01-09 10:54:27 +00:00
update_nrpe_config()
# Determine whether or not we should do an upgrade.
2015-09-23 09:28:19 -07:00
if not config('action-managed-upgrade') and \
openstack.openstack_upgrade_available('python-swift'):
do_openstack_upgrade(CONFIGS)
2015-10-08 01:24:59 +00:00
status_set('maintenance', 'Running openstack upgrade')
status_set('maintenance', 'Updating and (maybe) balancing rings')
update_rings(min_part_hours=config('min-hours'),
rebalance=not config('disable-ring-balance'))
2014-12-09 21:26:55 +00:00
if not config('disable-ring-balance') and is_elected_leader(SWIFT_HA_RES):
2014-12-09 15:18:08 +00:00
# Try ring balance. If rings are balanced, no sync will occur.
balance_rings()
for r_id in relation_ids('identity-service'):
keystone_joined(relid=r_id)
for r_id in relation_ids('cluster'):
cluster_joined(relation_id=r_id)
for r_id in relation_ids('object-store'):
object_store_joined(relation_id=r_id)
try_initialize_swauth()
2012-12-12 09:18:54 -08:00
2013-09-27 13:02:37 +01:00
@hooks.hook('identity-service-relation-joined')
2012-12-12 09:18:54 -08:00
def keystone_joined(relid=None):
2013-09-27 13:02:37 +01:00
port = config('bind-port')
2014-07-14 15:42:25 +01:00
admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port)
2015-04-08 17:24:50 +01:00
internal_url = ('%s:%s/v1/AUTH_$(tenant_id)s' %
(canonical_url(CONFIGS, INTERNAL), port))
public_url = ('%s:%s/v1/AUTH_$(tenant_id)s' %
(canonical_url(CONFIGS, PUBLIC), port))
region = config('region')
s3_public_url = ('%s:%s' %
(canonical_url(CONFIGS, PUBLIC), port))
s3_internal_url = ('%s:%s' %
(canonical_url(CONFIGS, INTERNAL), port))
s3_admin_url = '%s:%s' % (canonical_url(CONFIGS, ADMIN), port)
relation_set(relation_id=relid,
region=None, public_url=None,
internal_url=None, admin_url=None, service=None,
swift_service='swift', swift_region=region,
swift_public_url=public_url,
swift_internal_url=internal_url,
swift_admin_url=admin_url,
s3_service='s3', s3_region=region,
s3_public_url=s3_public_url,
s3_admin_url=s3_admin_url,
s3_internal_url=s3_internal_url)
2012-12-12 09:18:54 -08:00
2013-09-27 13:02:37 +01:00
@hooks.hook('identity-service-relation-changed')
@restart_on_change(restart_map())
2012-12-12 09:18:54 -08:00
def keystone_changed():
2013-09-27 13:02:37 +01:00
configure_https()
2012-12-12 09:18:54 -08:00
@hooks.hook('swift-storage-relation-joined')
def storage_joined():
if not is_elected_leader(SWIFT_HA_RES):
log("New storage relation joined - stopping proxy until ring builder "
"synced", level=INFO)
service_stop('swift-proxy')
2012-12-12 09:18:54 -08:00
2014-12-04 15:49:24 +00:00
# This unit is not currently responsible for distributing rings but
# may become so at some time in the future so we do this to avoid the
# possibility of storage nodes getting out-of-date rings by deprecating
# any existing ones from the www dir.
mark_www_rings_deleted()
try_initialize_swauth()
2012-12-12 09:18:54 -08:00
def get_host_ip(rid=None, unit=None):
addr = relation_get('private-address', rid=rid, unit=unit)
if config('prefer-ipv6'):
host_ip = format_ipv6_addr(addr)
if host_ip:
return host_ip
else:
msg = ("Did not get IPv6 address from storage relation "
"(got=%s)" % (addr))
log(msg, level=WARNING)
return openstack.get_host_ip(addr)
def update_rsync_acls():
"""Get Host IP of each storage unit and broadcast acl to all units."""
hosts = []
if not is_elected_leader(SWIFT_HA_RES):
log("Skipping rsync acl update since not leader", level=DEBUG)
return
# Get all unit addresses
for rid in relation_ids('swift-storage'):
for unit in related_units(rid):
hosts.append(get_host_ip(rid=rid, unit=unit))
rsync_hosts = ' '.join(hosts)
log("Broadcasting acl '%s' to all storage units" % (rsync_hosts),
level=DEBUG)
# We add a timestamp so that the storage units know which is the newest
settings = {'rsync_allowed_hosts': rsync_hosts,
'timestamp': time.time()}
for rid in relation_ids('swift-storage'):
2015-11-27 09:48:45 +00:00
relation_set(relation_id=rid, **settings)
2013-09-27 13:02:37 +01:00
@hooks.hook('swift-storage-relation-changed')
@restart_on_change(restart_map())
2012-12-18 11:59:19 -08:00
def storage_changed():
2014-12-09 15:18:08 +00:00
"""Storage relation.
Only the leader unit can update and distribute rings so if we are not the
leader we ignore this event and wait for a resync request from the leader.
"""
if not is_elected_leader(SWIFT_HA_RES):
log("Not the leader - deferring storage relation change to leader "
"unit.", level=DEBUG)
return
log("Storage relation changed -processing", level=DEBUG)
host_ip = get_host_ip()
if not host_ip:
log("No host ip found in storage relation - deferring storage "
"relation", level=WARNING)
return
update_rsync_acls()
2013-09-27 13:02:37 +01:00
zone = get_zone(config('zone-assignment'))
2012-12-12 09:18:54 -08:00
node_settings = {
2014-08-13 20:15:41 +08:00
'ip': host_ip,
'zone': zone,
2013-09-27 13:02:37 +01:00
'account_port': relation_get('account_port'),
'object_port': relation_get('object_port'),
'container_port': relation_get('container_port'),
2012-12-12 09:18:54 -08:00
}
2014-10-20 19:27:50 +01:00
2012-12-12 09:18:54 -08:00
if None in node_settings.itervalues():
2014-12-01 23:54:03 +00:00
missing = [k for k, v in node_settings.iteritems() if v is None]
log("Relation not ready - some required values not provided by "
"relation (missing=%s)" % (', '.join(missing)), level=INFO)
2012-12-12 09:18:54 -08:00
return None
for k in ['zone', 'account_port', 'object_port', 'container_port']:
node_settings[k] = int(node_settings[k])
2013-09-27 13:02:37 +01:00
CONFIGS.write_all()
2012-12-12 09:18:54 -08:00
# Allow for multiple devs per unit, passed along as a : separated list
2014-12-09 15:18:08 +00:00
# Update and balance rings.
nodes = []
2014-12-09 15:33:54 +00:00
devs = relation_get('device')
if devs:
for dev in devs.split(':'):
node = {k: v for k, v in node_settings.items()}
node['device'] = dev
nodes.append(node)
2012-12-12 09:18:54 -08:00
# NOTE(jamespage): ensure that disable-ring-balance is observed
# whilst new storage is added - rebalance will
# happen when configuration is toggled later
update_rings(nodes, rebalance=not config('disable-ring-balance'))
if not openstack.is_unit_paused_set():
# Restart proxy here in case no config changes made (so
# restart_on_change() ineffective).
service_restart('swift-proxy')
2012-12-12 09:18:54 -08:00
2013-09-27 13:02:37 +01:00
@hooks.hook('swift-storage-relation-broken')
@restart_on_change(restart_map())
2012-12-18 11:59:19 -08:00
def storage_broken():
2013-09-27 13:02:37 +01:00
CONFIGS.write_all()
2012-12-12 09:18:54 -08:00
@hooks.hook('object-store-relation-joined')
def object_store_joined(relation_id=None):
relation_data = {
'swift-url':
"{}:{}".format(canonical_url(CONFIGS, INTERNAL), config('bind-port'))
}
relation_set(relation_id=relation_id, **relation_data)
@hooks.hook('cluster-relation-joined')
def cluster_joined(relation_id=None):
settings = {}
for addr_type in ADDRESS_TYPES:
netaddr_cfg = 'os-{}-network'.format(addr_type)
address = get_address_in_network(config(netaddr_cfg))
if address:
settings['{}-address'.format(addr_type)] = address
if config('prefer-ipv6'):
2014-09-30 15:48:30 +08:00
private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
settings['private-address'] = private_addr
else:
settings['private-address'] = unit_get('private-address')
relation_set(relation_id=relation_id, relation_settings=settings)
def is_all_peers_stopped(responses):
2014-12-04 15:49:24 +00:00
"""Establish whether all peers have stopped their proxy services.
2014-12-02 14:40:58 +00:00
2014-12-04 18:51:43 +00:00
Each peer unit will set stop-proxy-service-ack to rq value to indicate that
2014-12-04 15:49:24 +00:00
it has stopped its proxy service. We wait for all units to be stopped
before triggering a sync. Peer services will be restarted once their rings
are synced with the leader.
2014-12-02 14:40:58 +00:00
To be safe, default expectation is that api is still running.
"""
2014-12-05 13:32:28 +00:00
rq_key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC
ack_key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK
token = relation_get(attribute=rq_key, unit=local_unit())
if not token or token != responses[0].get(ack_key):
log("Token mismatch, rq and ack tokens differ (expected ack=%s, "
"got=%s)" %
2014-12-05 13:32:28 +00:00
(token, responses[0].get(ack_key)), level=DEBUG)
2014-12-05 13:21:52 +00:00
return False
2014-12-05 13:32:28 +00:00
if not all_responses_equal(responses, ack_key):
log("Not all ack responses are equal. Either we are still waiting "
"for responses or we were not the request originator.",
level=DEBUG)
2014-12-02 14:40:58 +00:00
return False
return True
2014-12-02 14:21:47 +00:00
def cluster_leader_actions():
2014-12-04 16:47:09 +00:00
"""Cluster relation hook actions to be performed by leader units.
NOTE: must be called by leader from cluster relation hook.
"""
2014-12-17 12:42:17 +00:00
log("Cluster changed by unit=%s (local is leader)" % (remote_unit()),
level=DEBUG)
rx_settings = relation_get() or {}
tx_settings = relation_get(unit=local_unit()) or {}
2014-12-17 10:17:50 +00:00
rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
rx_ack_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)
tx_rq_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)
rx_leader_changed = \
rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
if rx_leader_changed:
log("Leader change notification received and this is leader so "
"retrying sync.", level=INFO)
# FIXME: check that we were previously part of a successful sync to
# ensure we have good rings.
cluster_sync_rings(peers_only=tx_settings.get('peers-only', False),
token=rx_leader_changed)
return
rx_resync_request = \
rx_settings.get(SwiftProxyClusterRPC.KEY_REQUEST_RESYNC)
resync_request_ack_key = SwiftProxyClusterRPC.KEY_REQUEST_RESYNC_ACK
tx_resync_request_ack = tx_settings.get(resync_request_ack_key)
if rx_resync_request and tx_resync_request_ack != rx_resync_request:
log("Unit '%s' has requested a resync" % (remote_unit()),
level=INFO)
cluster_sync_rings(peers_only=True)
relation_set(**{resync_request_ack_key: rx_resync_request})
2014-12-17 10:17:50 +00:00
return
# If we have received an ack token ensure it is not associated with a
# request we received from another peer. If it is, this would indicate
# a leadership change during a sync and this unit will abort the sync or
# attempt to restore the original leader so to be able to complete the
# sync.
if rx_ack_token and rx_ack_token == tx_rq_token:
2014-12-05 12:44:14 +00:00
# Find out if all peer units have been stopped.
responses = []
for rid in relation_ids('cluster'):
for unit in related_units(rid):
responses.append(relation_get(rid=rid, unit=unit))
# Ensure all peers stopped before starting sync
if is_all_peers_stopped(responses):
2014-12-05 12:44:14 +00:00
key = 'peers-only'
if not all_responses_equal(responses, key, must_exist=False):
msg = ("Did not get equal response from every peer unit for "
"'%s'" % (key))
raise SwiftProxyCharmException(msg)
peers_only = bool(get_first_available_value(responses, key,
default=0))
2014-12-05 12:44:14 +00:00
log("Syncing rings and builders (peers-only=%s)" % (peers_only),
level=DEBUG)
broadcast_rings_available(broker_token=rx_ack_token,
storage=not peers_only)
2014-12-05 12:44:14 +00:00
else:
key = SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK
acks = ', '.join([rsp[key] for rsp in responses if key in rsp])
2014-12-05 12:44:14 +00:00
log("Not all peer apis stopped - skipping sync until all peers "
"ready (current='%s', token='%s')" % (acks, tx_ack_token),
level=INFO)
elif ((rx_ack_token and (rx_ack_token == tx_ack_token)) or
(rx_rq_token and (rx_rq_token == rx_ack_token))):
log("It appears that the cluster leader has changed mid-sync - "
"stopping proxy service", level=WARNING)
service_stop('swift-proxy')
broker = rx_settings.get('builder-broker')
if broker:
# If we get here, manual intervention will be required in order
# to restore the cluster.
msg = ("Failed to restore previous broker '%s' as leader" %
(broker))
raise SwiftProxyCharmException(msg)
else:
msg = ("No builder-broker on rx_settings relation from '%s' - "
"unable to attempt leader restore" % (remote_unit()))
raise SwiftProxyCharmException(msg)
else:
log("Not taking any sync actions", level=DEBUG)
2014-12-02 14:21:47 +00:00
CONFIGS.write_all()
2014-12-02 14:21:47 +00:00
def cluster_non_leader_actions():
2014-12-04 16:47:09 +00:00
"""Cluster relation hook actions to be performed by non-leader units.
NOTE: must be called by non-leader from cluster relation hook.
"""
2014-12-17 12:42:17 +00:00
log("Cluster changed by unit=%s (local is non-leader)" % (remote_unit()),
level=DEBUG)
rx_settings = relation_get() or {}
tx_settings = relation_get(unit=local_unit()) or {}
2014-12-02 14:21:47 +00:00
token = rx_settings.get(SwiftProxyClusterRPC.KEY_NOTIFY_LEADER_CHANGED)
2014-12-04 20:00:00 +00:00
if token:
log("Leader-changed notification received from peer unit. Since "
"this most likely occurred during a ring sync proxies will "
"be disabled until the leader is restored and a fresh sync "
"request is set out", level=WARNING)
service_stop("swift-proxy")
return
rx_rq_token = rx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC)
# Check whether we have been requested to stop proxy service
if rx_rq_token:
2014-12-09 22:57:39 +00:00
log("Peer request to stop proxy service received (%s) - sending ack" %
(rx_rq_token), level=INFO)
service_stop('swift-proxy')
peers_only = rx_settings.get('peers-only', None)
rq = SwiftProxyClusterRPC().stop_proxy_ack(echo_token=rx_rq_token,
2014-12-04 20:00:00 +00:00
echo_peers_only=peers_only)
2014-12-04 19:00:54 +00:00
relation_set(relation_settings=rq)
return
2013-02-27 16:07:46 +00:00
2014-12-02 14:21:47 +00:00
# Check if there are any builder files we can sync from the leader.
broker = rx_settings.get('builder-broker', None)
broker_token = rx_settings.get('broker-token', None)
broker_timestamp = rx_settings.get('broker-timestamp', None)
tx_ack_token = tx_settings.get(SwiftProxyClusterRPC.KEY_STOP_PROXY_SVC_ACK)
if not broker:
log("No ring/builder update available", level=DEBUG)
if not openstack.is_unit_paused_set():
service_start('swift-proxy')
return
elif broker_token:
if tx_ack_token:
if broker_token == tx_ack_token:
log("Broker and ACK tokens match (%s)" % (broker_token),
level=DEBUG)
else:
log("Received ring/builder update notification but tokens do "
"not match (broker-token=%s/ack-token=%s)" %
(broker_token, tx_ack_token), level=WARNING)
return
else:
log("Broker token available without handshake, assuming we just "
"joined and rings won't change", level=DEBUG)
else:
log("Not taking any sync actions", level=DEBUG)
return
# If we upgrade from cluster that did not use timestamps, the new peer will
# need to request a re-sync from the leader
if not is_most_recent_timestamp(broker_timestamp):
if not timestamps_available(excluded_unit=remote_unit()):
log("Requesting resync")
rq = SwiftProxyClusterRPC().request_resync(broker_token)
relation_set(relation_settings=rq)
else:
log("Did not receive most recent broker timestamp but timestamps "
"are available - waiting for next timestamp", level=INFO)
return
log("Ring/builder update available", level=DEBUG)
builders_only = int(rx_settings.get('sync-only-builders', 0))
path = os.path.basename(get_www_dir())
try:
2014-12-09 22:57:39 +00:00
sync_proxy_rings('http://%s/%s' % (broker, path),
rings=not builders_only)
except CalledProcessError:
log("Ring builder sync failed, builders not yet available - "
"leader not ready?", level=WARNING)
return
2014-12-09 22:57:39 +00:00
# Re-enable the proxy once all builders and rings are synced
if fully_synced():
log("Ring builders synced - starting proxy", level=INFO)
CONFIGS.write_all()
if not openstack.is_unit_paused_set():
service_start('swift-proxy')
else:
2014-12-09 22:57:39 +00:00
log("Not all builders and rings synced yet - waiting for peer sync "
"before starting proxy", level=INFO)
2013-02-27 16:07:46 +00:00
2013-02-27 16:18:34 +00:00
2015-03-11 15:34:20 +00:00
@hooks.hook('cluster-relation-changed')
@restart_on_change(restart_map())
2013-02-27 16:07:46 +00:00
def cluster_changed():
2014-12-02 14:21:47 +00:00
if is_elected_leader(SWIFT_HA_RES):
cluster_leader_actions()
else:
cluster_non_leader_actions()
2013-02-27 16:07:46 +00:00
2013-09-27 13:02:37 +01:00
@hooks.hook('ha-relation-changed')
@sync_builders_and_rings_if_changed
2013-02-27 16:07:46 +00:00
def ha_relation_changed():
2013-09-27 13:02:37 +01:00
clustered = relation_get('clustered')
if clustered:
log("Cluster configured, notifying other services and updating "
"keystone endpoint configuration", level=INFO)
2013-02-27 16:07:46 +00:00
# Tell all related services to start using
2013-03-01 23:20:05 +00:00
# the VIP instead
2013-09-27 13:02:37 +01:00
for r_id in relation_ids('identity-service'):
2013-02-27 16:07:46 +00:00
keystone_joined(relid=r_id)
2013-09-27 13:02:37 +01:00
@hooks.hook('ha-relation-joined')
def ha_relation_joined(relation_id=None):
2013-02-27 16:07:46 +00:00
# Obtain the config values necessary for the cluster config. These
# include multicast port and interface to bind to.
cluster_config = get_hacluster_config()
2013-02-27 16:07:46 +00:00
# Obtain resources
resources = {'res_swift_haproxy': 'lsb:haproxy'}
resource_params = {'res_swift_haproxy': 'op monitor interval="5s"'}
if config('dns-ha'):
update_dns_ha_resource_params(relation_id=relation_id,
resources=resources,
resource_params=resource_params)
else:
vip_group = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_swift_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_swift_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface = get_iface_for_address(vip)
if iface is not None:
vip_key = 'res_swift_{}_vip'.format(iface)
resources[vip_key] = res_swift_vip
resource_params[vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}"'
' nic="{iface}"'
''.format(ip=vip_params,
vip=vip,
iface=iface,
netmask=get_netmask_for_address(vip))
)
vip_group.append(vip_key)
if len(vip_group) >= 1:
relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
init_services = {'res_swift_haproxy': 'haproxy'}
clones = {'cl_swift_haproxy': 'res_swift_haproxy'}
2013-09-27 13:02:37 +01:00
relation_set(relation_id=relation_id,
init_services=init_services,
corosync_bindiface=cluster_config['ha-bindiface'],
corosync_mcastport=cluster_config['ha-mcastport'],
2013-09-27 13:02:37 +01:00
resources=resources,
resource_params=resource_params,
clones=clones)
def configure_https():
"""Enables SSL API Apache config if appropriate and kicks identity-service
2013-09-27 13:02:37 +01:00
with any required api updates.
"""
2013-09-27 13:02:37 +01:00
# need to write all to ensure changes to the entire request pipeline
# propagate (c-api, haprxy, apache)
CONFIGS.write_all()
if 'https' in CONFIGS.complete_contexts():
cmd = ['a2ensite', 'openstack_https_frontend']
check_call(cmd)
2013-09-27 13:02:37 +01:00
else:
cmd = ['a2dissite', 'openstack_https_frontend']
check_call(cmd)
2013-09-27 13:02:37 +01:00
# Apache 2.4 required enablement of configuration
if os.path.exists('/usr/sbin/a2enconf'):
check_call(['a2enconf', 'swift-rings'])
if not openstack.is_unit_paused_set():
# TODO: improve this by checking if local CN certs are available
# first then checking reload status (see LP #1433114).
service_reload('apache2', restart_on_failure=True)
2013-09-27 13:02:37 +01:00
for rid in relation_ids('identity-service'):
keystone_joined(relid=rid)
env_vars = {'OPENSTACK_SERVICE_SWIFT': 'proxy-server',
'OPENSTACK_PORT_API': config('bind-port'),
'OPENSTACK_PORT_MEMCACHED': 11211}
openstack.save_script_rc(**env_vars)
2013-09-27 13:02:37 +01:00
@hooks.hook('nrpe-external-master-relation-joined',
'nrpe-external-master-relation-changed')
2014-10-29 22:30:36 -05:00
def update_nrpe_config():
2015-01-12 12:04:01 +00:00
# python-dbus is used by check_upstart_job
2014-10-29 22:30:36 -05:00
apt_install('python-dbus')
2015-01-12 12:04:01 +00:00
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
2015-02-19 15:24:24 +10:00
nrpe.copy_nrpe_checks()
2015-01-12 12:04:01 +00:00
nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
2015-02-19 15:24:24 +10:00
nrpe.add_haproxy_checks(nrpe_setup, current_unit)
nrpe_setup.add_check(
shortname="swift-proxy-healthcheck",
description="Check Swift Proxy Healthcheck",
check_cmd="/usr/lib/nagios/plugins/check_http \
-I localhost -u /healthcheck -p 8070 \
-e \"OK\""
)
2015-01-12 12:04:01 +00:00
nrpe_setup.write()
2014-10-29 22:30:36 -05:00
@hooks.hook('upgrade-charm')
@harden()
def upgrade_charm():
update_rsync_acls()
@hooks.hook('update-status')
@harden()
def update_status():
log('Updating status.')
2013-09-27 13:02:37 +01:00
def main():
try:
hooks.execute(sys.argv)
except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e), level=DEBUG)
assess_status(CONFIGS)
2013-09-27 13:02:37 +01:00
if __name__ == '__main__':
main()