Enhanced pause and resume for maintenance mode

Adds improved pause and resume unit to the charm such tha the
charm stays paused during maintenance operations.
Sync latest version of charm-helpers for maintenance mode.

Change-Id: I6e793744709b65c89afb6f0fc460f9d96755bf82
This commit is contained in:
Alex Kavanagh 2016-03-17 18:31:50 +00:00
parent 783cdd2dc5
commit d4cf177cc5
12 changed files with 957 additions and 318 deletions

3
.gitignore vendored
View File

@ -5,3 +5,6 @@ tags
.testrepository .testrepository
*.sw[nop] *.sw[nop]
*.pyc *.pyc
.unit-state.db
tests/*.img
trusty

View File

@ -3,12 +3,13 @@
import sys import sys
import os import os
from charmhelpers.core.host import service_pause, service_resume
from charmhelpers.core.hookenv import action_fail from charmhelpers.core.hookenv import action_fail
from charmhelpers.core.unitdata import HookData, kv
from hooks.glance_utils import services, assess_status from hooks.glance_utils import (
from hooks.glance_relations import CONFIGS pause_unit_helper,
resume_unit_helper,
register_configs,
)
def pause(args): def pause(args):
@ -16,13 +17,7 @@ def pause(args):
@raises Exception if any services fail to stop @raises Exception if any services fail to stop
""" """
for service in services(): pause_unit_helper(register_configs())
stopped = service_pause(service)
if not stopped:
raise Exception("{} didn't stop cleanly.".format(service))
with HookData()():
kv().set('unit-paused', True)
assess_status(CONFIGS)
def resume(args): def resume(args):
@ -30,13 +25,7 @@ def resume(args):
@raises Exception if any services fail to start @raises Exception if any services fail to start
""" """
for service in services(): resume_unit_helper(register_configs())
started = service_resume(service)
if not started:
raise Exception("{} didn't start cleanly.".format(service))
with HookData()():
kv().set('unit-paused', False)
assess_status(CONFIGS)
# A dictionary of all the defined actions to callables (which take # A dictionary of all the defined actions to callables (which take

View File

@ -27,7 +27,11 @@ import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client from keystoneclient.auth.identity import v3 as keystone_id_v3
from keystoneclient import session as keystone_session
from keystoneclient.v3 import client as keystone_client_v3
import novaclient.client as nova_client
import pika import pika
import swiftclient import swiftclient
@ -38,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
DEBUG = logging.DEBUG DEBUG = logging.DEBUG
ERROR = logging.ERROR ERROR = logging.ERROR
NOVA_CLIENT_VERSION = "2"
class OpenStackAmuletUtils(AmuletUtils): class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities. """OpenStack amulet utilities.
@ -139,7 +145,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return "role {} does not exist".format(e['name']) return "role {} does not exist".format(e['name'])
return ret return ret
def validate_user_data(self, expected, actual): def validate_user_data(self, expected, actual, api_version=None):
"""Validate user data. """Validate user data.
Validate a list of actual user data vs a list of expected user Validate a list of actual user data vs a list of expected user
@ -150,10 +156,15 @@ class OpenStackAmuletUtils(AmuletUtils):
for e in expected: for e in expected:
found = False found = False
for act in actual: for act in actual:
if e['name'] == act.name:
a = {'enabled': act.enabled, 'name': act.name, a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId, 'email': act.email, 'id': act.id}
'id': act.id} if api_version == 3:
if e['name'] == a['name']: a['default_project_id'] = getattr(act,
'default_project_id',
'none')
else:
a['tenantId'] = act.tenantId
found = True found = True
ret = self._validate_dict_data(e, a) ret = self._validate_dict_data(e, a)
if ret: if ret:
@ -188,15 +199,30 @@ class OpenStackAmuletUtils(AmuletUtils):
return cinder_client.Client(username, password, tenant, ept) return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password, def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant): tenant=None, api_version=None,
keystone_ip=None):
"""Authenticates admin user with the keystone admin endpoint.""" """Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...') self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry unit = keystone_sentry
service_ip = unit.relation('shared-db', if not keystone_ip:
keystone_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address'] 'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
if not api_version or api_version == 2:
ep = base_ep + "/v2.0"
return keystone_client.Client(username=user, password=password, return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep) tenant_name=tenant, auth_url=ep)
else:
ep = base_ep + "/v3"
auth = keystone_id_v3.Password(
user_domain_name='admin_domain',
username=user,
password=password,
domain_name='admin_domain',
auth_url=ep,
)
sess = keystone_session.Session(auth=auth)
return keystone_client_v3.Client(session=sess)
def authenticate_keystone_user(self, keystone, user, password, tenant): def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint.""" """Authenticates a regular user with the keystone public endpoint."""
@ -225,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Authenticating nova user ({})...'.format(user)) self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity', ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL') endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password, return nova_client.Client(NOVA_CLIENT_VERSION,
username=user, api_key=password,
project_id=tenant, auth_url=ep) project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant): def authenticate_swift_user(self, keystone, user, password, tenant):

View File

@ -24,6 +24,7 @@ import os
import sys import sys
import re import re
import itertools import itertools
import functools
import six import six
import tempfile import tempfile
@ -69,7 +70,15 @@ from charmhelpers.contrib.python.packages import (
pip_install, pip_install,
) )
from charmhelpers.core.host import lsb_release, mounts, umount, service_running from charmhelpers.core.host import (
lsb_release,
mounts,
umount,
service_running,
service_pause,
service_resume,
restart_on_change_helper,
)
from charmhelpers.fetch import apt_install, apt_cache, install_remote from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@ -128,7 +137,7 @@ SWIFT_CODENAMES = OrderedDict([
('liberty', ('liberty',
['2.3.0', '2.4.0', '2.5.0']), ['2.3.0', '2.4.0', '2.5.0']),
('mitaka', ('mitaka',
['2.5.0']), ['2.5.0', '2.6.0']),
]) ])
# >= Liberty version->codename mapping # >= Liberty version->codename mapping
@ -763,7 +772,8 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
os.mkdir(parent_dir) os.mkdir(parent_dir)
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth) repo_dir = install_remote(
repo, dest=parent_dir, branch=branch, depth=depth)
venv = os.path.join(parent_dir, 'venv') venv = os.path.join(parent_dir, 'venv')
@ -862,66 +872,155 @@ def os_workload_status(configs, required_interfaces, charm_func=None):
return wrap return wrap
def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None): def set_os_workload_status(configs, required_interfaces, charm_func=None,
""" services=None, ports=None):
Set workload status based on complete contexts. """Set the state of the workload status for the charm.
status-set missing or incomplete contexts
and juju-log details of missing required data.
charm_func is a charm specific function to run checking
for charm specific requirements such as a VIP setting.
This function also checks for whether the services defined are ACTUALLY This calls _determine_os_workload_status() to get the new state, message
running and that the ports they advertise are open and being listened to. and sets the status using status_set()
@param services - OPTIONAL: a [{'service': <string>, 'ports': [<int>]] @param configs: a templating.OSConfigRenderer() object
The ports are optional. @param required_interfaces: {generic: [specific, specific2, ...]}
If services is a [<string>] then ports are ignored. @param charm_func: a callable function that returns state, message. The
@param ports - OPTIONAL: an [<int>] representing ports that shoudl be signature is charm_func(configs) -> (state, message)
open. @param services: list of strings OR dictionary specifying services/ports
@returns None @param ports: OPTIONAL list of port numbers.
@returns state, message: the new workload status, user message
""" """
incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) state, message = _determine_os_workload_status(
configs, required_interfaces, charm_func, services, ports)
status_set(state, message)
def _determine_os_workload_status(
configs, required_interfaces, charm_func=None,
services=None, ports=None):
"""Determine the state of the workload status for the charm.
This function returns the new workload status for the charm based
on the state of the interfaces, the paused state and whether the
services are actually running and any specified ports are open.
This checks:
1. if the unit should be paused, that it is actually paused. If so the
state is 'maintenance' + message, else 'broken'.
2. that the interfaces/relations are complete. If they are not then
it sets the state to either 'broken' or 'waiting' and an appropriate
message.
3. If all the relation data is set, then it checks that the actual
services really are running. If not it sets the state to 'broken'.
If everything is okay then the state returns 'active'.
@param configs: a templating.OSConfigRenderer() object
@param required_interfaces: {generic: [specific, specific2, ...]}
@param charm_func: a callable function that returns state, message. The
signature is charm_func(configs) -> (state, message)
@param services: list of strings OR dictionary specifying services/ports
@param ports: OPTIONAL list of port numbers.
@returns state, message: the new workload status, user message
"""
state, message = _ows_check_if_paused(services, ports)
if state is None:
state, message = _ows_check_generic_interfaces(
configs, required_interfaces)
if state != 'maintenance' and charm_func:
# _ows_check_charm_func() may modify the state, message
state, message = _ows_check_charm_func(
state, message, lambda: charm_func(configs))
if state is None:
state, message = _ows_check_services_running(services, ports)
if state is None:
state = 'active' state = 'active'
missing_relations = [] message = "Unit is ready"
incomplete_relations = [] juju_log(message, 'INFO')
message = None
charm_state = None
charm_message = None
for generic_interface in incomplete_rel_data.keys(): return state, message
def _ows_check_if_paused(services=None, ports=None):
"""Check if the unit is supposed to be paused, and if so check that the
services/ports (if passed) are actually stopped/not being listened to.
if the unit isn't supposed to be paused, just return None, None
@param services: OPTIONAL services spec or list of service names.
@param ports: OPTIONAL list of port numbers.
@returns state, message or None, None
"""
if is_unit_paused_set():
state, message = check_actually_paused(services=services,
ports=ports)
if state is None:
# we're paused okay, so set maintenance and return
state = "maintenance"
message = "Paused. Use 'resume' action to resume normal service."
return state, message
return None, None
def _ows_check_generic_interfaces(configs, required_interfaces):
"""Check the complete contexts to determine the workload status.
- Checks for missing or incomplete contexts
- juju log details of missing required data.
- determines the correct workload status
- creates an appropriate message for status_set(...)
if there are no problems then the function returns None, None
@param configs: a templating.OSConfigRenderer() object
@params required_interfaces: {generic_interface: [specific_interface], }
@returns state, message or None, None
"""
incomplete_rel_data = incomplete_relation_data(configs,
required_interfaces)
state = None
message = None
missing_relations = set()
incomplete_relations = set()
for generic_interface, relations_states in incomplete_rel_data.items():
related_interface = None related_interface = None
missing_data = {} missing_data = {}
# Related or not? # Related or not?
for interface in incomplete_rel_data[generic_interface]: for interface, relation_state in relations_states.items():
if incomplete_rel_data[generic_interface][interface].get('related'): if relation_state.get('related'):
related_interface = interface related_interface = interface
missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') missing_data = relation_state.get('missing_data')
# No relation ID for the generic_interface break
# No relation ID for the generic_interface?
if not related_interface: if not related_interface:
juju_log("{} relation is missing and must be related for " juju_log("{} relation is missing and must be related for "
"functionality. ".format(generic_interface), 'WARN') "functionality. ".format(generic_interface), 'WARN')
state = 'blocked' state = 'blocked'
if generic_interface not in missing_relations: missing_relations.add(generic_interface)
missing_relations.append(generic_interface)
else: else:
# Relation ID exists but no related unit # Relation ID eists but no related unit
if not missing_data: if not missing_data:
# Edge case relation ID exists but departing # Edge case - relation ID exists but departings
if ('departed' in hook_name() or 'broken' in hook_name()) \ _hook_name = hook_name()
and related_interface in hook_name(): if (('departed' in _hook_name or 'broken' in _hook_name) and
related_interface in _hook_name):
state = 'blocked' state = 'blocked'
if generic_interface not in missing_relations: missing_relations.add(generic_interface)
missing_relations.append(generic_interface)
juju_log("{} relation's interface, {}, " juju_log("{} relation's interface, {}, "
"relationship is departed or broken " "relationship is departed or broken "
"and is required for functionality." "and is required for functionality."
"".format(generic_interface, related_interface), "WARN") "".format(generic_interface, related_interface),
"WARN")
# Normal case relation ID exists but no related unit # Normal case relation ID exists but no related unit
# (joining) # (joining)
else: else:
juju_log("{} relations's interface, {}, is related but has" juju_log("{} relations's interface, {}, is related but has"
" no units in the relation." " no units in the relation."
"".format(generic_interface, related_interface), "INFO") "".format(generic_interface, related_interface),
"INFO")
# Related unit exists and data missing on the relation # Related unit exists and data missing on the relation
else: else:
juju_log("{} relation's interface, {}, is related awaiting " juju_log("{} relation's interface, {}, is related awaiting "
@ -930,9 +1029,8 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic
", ".join(missing_data)), "INFO") ", ".join(missing_data)), "INFO")
if state != 'blocked': if state != 'blocked':
state = 'waiting' state = 'waiting'
if generic_interface not in incomplete_relations \ if generic_interface not in missing_relations:
and generic_interface not in missing_relations: incomplete_relations.add(generic_interface)
incomplete_relations.append(generic_interface)
if missing_relations: if missing_relations:
message = "Missing relations: {}".format(", ".join(missing_relations)) message = "Missing relations: {}".format(", ".join(missing_relations))
@ -945,9 +1043,22 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic
"".format(", ".join(incomplete_relations)) "".format(", ".join(incomplete_relations))
state = 'waiting' state = 'waiting'
# Run charm specific checks return state, message
if charm_func:
charm_state, charm_message = charm_func(configs)
def _ows_check_charm_func(state, message, charm_func_with_configs):
"""Run a custom check function for the charm to see if it wants to
change the state. This is only run if not in 'maintenance' and
tests to see if the new state is more important that the previous
one determined by the interfaces/relations check.
@param state: the previously determined state so far.
@param message: the user orientated message so far.
@param charm_func: a callable function that returns state, message
@returns state, message strings.
"""
if charm_func_with_configs:
charm_state, charm_message = charm_func_with_configs()
if charm_state != 'active' and charm_state != 'unknown': if charm_state != 'active' and charm_state != 'unknown':
state = workload_state_compare(state, charm_state) state = workload_state_compare(state, charm_state)
if message: if message:
@ -956,72 +1067,151 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None, servic
message = "{}, {}".format(message, charm_message) message = "{}, {}".format(message, charm_message)
else: else:
message = charm_message message = charm_message
return state, message
# If the charm thinks the unit is active, check that the actual services
# really are active. def _ows_check_services_running(services, ports):
if services is not None and state == 'active': """Check that the services that should be running are actually running
# if we're passed the dict() then just grab the values as a list. and that any ports specified are being listened to.
@param services: list of strings OR dictionary specifying services/ports
@param ports: list of ports
@returns state, message: strings or None, None
"""
messages = []
state = None
if services is not None:
services = _extract_services_list_helper(services)
services_running, running = _check_running_services(services)
if not all(running):
messages.append(
"Services not running that should be: {}"
.format(", ".join(_filter_tuples(services_running, False))))
state = 'blocked'
# also verify that the ports that should be open are open
# NB, that ServiceManager objects only OPTIONALLY have ports
map_not_open, ports_open = (
_check_listening_on_services_ports(services))
if not all(ports_open):
# find which service has missing ports. They are in service
# order which makes it a bit easier.
message_parts = {service: ", ".join([str(v) for v in open_ports])
for service, open_ports in map_not_open.items()}
message = ", ".join(
["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
messages.append(
"Services with ports not open that should be: {}"
.format(message))
state = 'blocked'
if ports is not None:
# and we can also check ports which we don't know the service for
ports_open, ports_open_bools = _check_listening_on_ports_list(ports)
if not all(ports_open_bools):
messages.append(
"Ports which should be open, but are not: {}"
.format(", ".join([str(p) for p, v in ports_open
if not v])))
state = 'blocked'
if state is not None:
message = "; ".join(messages)
return state, message
return None, None
def _extract_services_list_helper(services):
"""Extract a OrderedDict of {service: [ports]} of the supplied services
for use by the other functions.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param services: see above
@returns OrderedDict(service: [ports], ...)
"""
if services is None:
return {}
if isinstance(services, dict): if isinstance(services, dict):
services = services.values() services = services.values()
# either extract the list of services from the dictionary, or if # either extract the list of services from the dictionary, or if
# it is a simple string, use that. i.e. works with mixed lists. # it is a simple string, use that. i.e. works with mixed lists.
_s = [] _s = OrderedDict()
for s in services: for s in services:
if isinstance(s, dict) and 'service' in s: if isinstance(s, dict) and 'service' in s:
_s.append(s['service']) _s[s['service']] = s.get('ports', [])
if isinstance(s, str): if isinstance(s, str):
_s.append(s) _s[s] = []
services_running = [service_running(s) for s in _s] return _s
if not all(services_running):
not_running = [s for s, running in zip(_s, services_running)
if not running]
message = ("Services not running that should be: {}"
.format(", ".join(not_running)))
state = 'blocked'
# also verify that the ports that should be open are open
# NB, that ServiceManager objects only OPTIONALLY have ports
port_map = OrderedDict([(s['service'], s['ports'])
for s in services if 'ports' in s])
if state == 'active' and port_map:
all_ports = list(itertools.chain(*port_map.values()))
ports_open = [port_has_listener('0.0.0.0', p)
for p in all_ports]
if not all(ports_open):
not_opened = [p for p, opened in zip(all_ports, ports_open)
if not opened]
map_not_open = OrderedDict()
for service, ports in port_map.items():
closed_ports = set(ports).intersection(not_opened)
if closed_ports:
map_not_open[service] = closed_ports
# find which service has missing ports. They are in service
# order which makes it a bit easier.
message = (
"Services with ports not open that should be: {}"
.format(
", ".join([
"{}: [{}]".format(
service,
", ".join([str(v) for v in ports]))
for service, ports in map_not_open.items()])))
state = 'blocked'
if ports is not None and state == 'active':
# and we can also check ports which we don't know the service for def _check_running_services(services):
"""Check that the services dict provided is actually running and provide
a list of (service, boolean) tuples for each service.
Returns both a zipped list of (service, boolean) and a list of booleans
in the same order as the services.
@param services: OrderedDict of strings: [ports], one for each service to
check.
@returns [(service, boolean), ...], : results for checks
[boolean] : just the result of the service checks
"""
services_running = [service_running(s) for s in services]
return list(zip(services, services_running)), services_running
def _check_listening_on_services_ports(services, test=False):
"""Check that the unit is actually listening (has the port open) on the
ports that the service specifies are open. If test is True then the
function returns the services with ports that are open rather than
closed.
Returns an OrderedDict of service: ports and a list of booleans
@param services: OrderedDict(service: [port, ...], ...)
@param test: default=False, if False, test for closed, otherwise open.
@returns OrderedDict(service: [port-not-open, ...]...), [boolean]
"""
test = not(not(test)) # ensure test is True or False
all_ports = list(itertools.chain(*services.values()))
ports_states = [port_has_listener('0.0.0.0', p) for p in all_ports]
map_ports = OrderedDict()
matched_ports = [p for p, opened in zip(all_ports, ports_states)
if opened == test] # essentially opened xor test
for service, ports in services.items():
set_ports = set(ports).intersection(matched_ports)
if set_ports:
map_ports[service] = set_ports
return map_ports, ports_states
def _check_listening_on_ports_list(ports):
"""Check that the ports list given are being listened to
Returns a list of ports being listened to and a list of the
booleans.
@param ports: LIST or port numbers.
@returns [(port_num, boolean), ...], [boolean]
"""
ports_open = [port_has_listener('0.0.0.0', p) for p in ports] ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
if not all(ports_open): return zip(ports, ports_open), ports_open
message = (
"Ports which should be open, but are not: {}"
.format(", ".join([str(p) for p, v in zip(ports, ports_open)
if not v])))
state = 'blocked'
# Set to active if all requirements have been met
if state == 'active':
message = "Unit is ready"
juju_log(message, "INFO")
status_set(state, message) def _filter_tuples(services_states, state):
"""Return a simple list from a list of tuples according to the condition
@param services_states: LIST of (string, boolean): service and running
state.
@param state: Boolean to match the tuple against.
@returns [LIST of strings] that matched the tuple RHS.
"""
return [s for s, b in services_states if b == state]
def workload_state_compare(current_workload_state, workload_state): def workload_state_compare(current_workload_state, workload_state):
@ -1046,8 +1236,7 @@ def workload_state_compare(current_workload_state, workload_state):
def incomplete_relation_data(configs, required_interfaces): def incomplete_relation_data(configs, required_interfaces):
""" """Check complete contexts against required_interfaces
Check complete contexts against required_interfaces
Return dictionary of incomplete relation data. Return dictionary of incomplete relation data.
configs is an OSConfigRenderer object with configs registered configs is an OSConfigRenderer object with configs registered
@ -1072,19 +1261,13 @@ def incomplete_relation_data(configs, required_interfaces):
'shared-db': {'related': True}}} 'shared-db': {'related': True}}}
""" """
complete_ctxts = configs.complete_contexts() complete_ctxts = configs.complete_contexts()
incomplete_relations = [] incomplete_relations = [
for svc_type in required_interfaces.keys(): svc_type
# Avoid duplicates for svc_type, interfaces in required_interfaces.items()
found_ctxt = False if not set(interfaces).intersection(complete_ctxts)]
for interface in required_interfaces[svc_type]: return {
if interface in complete_ctxts: i: configs.get_incomplete_context_data(required_interfaces[i])
found_ctxt = True for i in incomplete_relations}
if not found_ctxt:
incomplete_relations.append(svc_type)
incomplete_context_data = {}
for i in incomplete_relations:
incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
return incomplete_context_data
def do_action_openstack_upgrade(package, upgrade_callback, configs): def do_action_openstack_upgrade(package, upgrade_callback, configs):
@ -1145,3 +1328,245 @@ def remote_restart(rel_name, remote_service=None):
relation_set(relation_id=rid, relation_set(relation_id=rid,
relation_settings=trigger, relation_settings=trigger,
) )
def check_actually_paused(services=None, ports=None):
"""Check that services listed in the services object and and ports
are actually closed (not listened to), to verify that the unit is
properly paused.
@param services: See _extract_services_list_helper
@returns status, : string for status (None if okay)
message : string for problem for status_set
"""
state = None
message = None
messages = []
if services is not None:
services = _extract_services_list_helper(services)
services_running, services_states = _check_running_services(services)
if any(services_states):
# there shouldn't be any running so this is a problem
messages.append("these services running: {}"
.format(", ".join(
_filter_tuples(services_running, True))))
state = "blocked"
ports_open, ports_open_bools = (
_check_listening_on_services_ports(services, True))
if any(ports_open_bools):
message_parts = {service: ", ".join([str(v) for v in open_ports])
for service, open_ports in ports_open.items()}
message = ", ".join(
["{}: [{}]".format(s, sp) for s, sp in message_parts.items()])
messages.append(
"these service:ports are open: {}".format(message))
state = 'blocked'
if ports is not None:
ports_open, bools = _check_listening_on_ports_list(ports)
if any(bools):
messages.append(
"these ports which should be closed, but are open: {}"
.format(", ".join([str(p) for p, v in ports_open if v])))
state = 'blocked'
if messages:
message = ("Services should be paused but {}"
.format(", ".join(messages)))
return state, message
def set_unit_paused():
"""Set the unit to a paused state in the local kv() store.
This does NOT actually pause the unit
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-paused', True)
def clear_unit_paused():
"""Clear the unit from a paused state in the local kv() store
This does NOT actually restart any services - it only clears the
local state.
"""
with unitdata.HookData()() as t:
kv = t[0]
kv.set('unit-paused', False)
def is_unit_paused_set():
"""Return the state of the kv().get('unit-paused').
This does NOT verify that the unit really is paused.
To help with units that don't have HookData() (testing)
if it excepts, return False
"""
try:
with unitdata.HookData()() as t:
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused')))
except:
return False
def pause_unit(assess_status_func, services=None, ports=None,
charm_func=None):
"""Pause a unit by stopping the services and setting 'unit-paused'
in the local kv() store.
Also checks that the services have stopped and ports are no longer
being listened to.
An optional charm_func() can be called that can either raise an
Exception or return non None, None to indicate that the unit
didn't pause cleanly.
The signature for charm_func is:
charm_func() -> message: string
charm_func() is executed after any services are stopped, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param assess_status_func: (f() -> message: string | None) or None
@param services: OPTIONAL see above
@param ports: OPTIONAL list of port
@param charm_func: function to run for custom charm pausing.
@returns None
@raises Exception(message) on an error for action_fail().
"""
services = _extract_services_list_helper(services)
messages = []
if services:
for service in services.keys():
stopped = service_pause(service)
if not stopped:
messages.append("{} didn't stop cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
set_unit_paused()
if assess_status_func:
message = assess_status_func()
if message:
messages.append(message)
if messages:
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
def resume_unit(assess_status_func, services=None, ports=None,
charm_func=None):
"""Resume a unit by starting the services and clearning 'unit-paused'
in the local kv() store.
Also checks that the services have started and ports are being listened to.
An optional charm_func() can be called that can either raise an
Exception or return non None to indicate that the unit
didn't resume cleanly.
The signature for charm_func is:
charm_func() -> message: string
charm_func() is executed after any services are started, if supplied.
The services object can either be:
- None : no services were passed (an empty dict is returned)
- a list of strings
- A dictionary (optionally OrderedDict) {service_name: {'service': ..}}
- An array of [{'service': service_name, ...}, ...]
@param assess_status_func: (f() -> message: string | None) or None
@param services: OPTIONAL see above
@param ports: OPTIONAL list of port
@param charm_func: function to run for custom charm resuming.
@returns None
@raises Exception(message) on an error for action_fail().
"""
services = _extract_services_list_helper(services)
messages = []
if services:
for service in services.keys():
started = service_resume(service)
if not started:
messages.append("{} didn't start cleanly.".format(service))
if charm_func:
try:
message = charm_func()
if message:
messages.append(message)
except Exception as e:
message.append(str(e))
clear_unit_paused()
if assess_status_func:
message = assess_status_func()
if message:
messages.append(message)
if messages:
raise Exception("Couldn't resume: {}".format("; ".join(messages)))
def make_assess_status_func(*args, **kwargs):
"""Creates an assess_status_func() suitable for handing to pause_unit()
and resume_unit().
This uses the _determine_os_workload_status(...) function to determine
what the workload_status should be for the unit. If the unit is
not in maintenance or active states, then the message is returned to
the caller. This is so an action that doesn't result in either a
complete pause or complete resume can signal failure with an action_fail()
"""
def _assess_status_func():
state, message = _determine_os_workload_status(*args, **kwargs)
status_set(state, message)
if state not in ['maintenance', 'active']:
return message
return None
return _assess_status_func
def pausable_restart_on_change(restart_map, stopstart=False):
"""A restart_on_change decorator that checks to see if the unit is
paused. If it is paused then the decorated function doesn't fire.
This is provided as a helper, as the @restart_on_change(...) decorator
is in core.host, yet the openstack specific helpers are in this file
(contrib.openstack.utils). Thus, this needs to be an optional feature
for openstack charms (or charms that wish to use the openstack
pause/resume type features).
It is used as follows:
from contrib.openstack.utils import (
pausable_restart_on_change as restart_on_change)
@restart_on_change(restart_map, stopstart=<boolean>)
def some_hook(...):
pass
see core.utils.restart_on_change() for more details.
@param f: the function to decorate
@param restart_map: the restart map {conf_file: [services]}
@param stopstart: DEFAULT false; whether to stop, start or just restart
@returns decorator to use a restart_on_change with pausability
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
if is_unit_paused_set():
return f(*args, **kwargs)
# otherwise, normal restart_on_change functionality
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart)
return wrapped_f
return wrap

View File

@ -24,6 +24,8 @@
# Adam Gandelman <adamg@ubuntu.com> # Adam Gandelman <adamg@ubuntu.com>
# #
import bisect import bisect
import errno
import hashlib
import six import six
import os import os
@ -163,7 +165,7 @@ class Pool(object):
:return: None :return: None
""" """
# read-only is easy, writeback is much harder # read-only is easy, writeback is much harder
mode = get_cache_mode(cache_pool) mode = get_cache_mode(self.service, cache_pool)
if mode == 'readonly': if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@ -171,7 +173,7 @@ class Pool(object):
elif mode == 'writeback': elif mode == 'writeback':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
# Flush the cache and wait for it to return # Flush the cache and wait for it to return
check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all']) check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@ -259,6 +261,134 @@ class ErasurePool(Pool):
Returns json formatted output""" Returns json formatted output"""
def get_mon_map(service):
"""
Returns the current monitor map.
:param service: six.string_types. The Ceph user name to run the command under
:return: json string. :raise: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
"""
try:
mon_status = check_output(
['ceph', '--id', service,
'mon_status', '--format=json'])
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}".format(
mon_status, v.message))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}".format(
e.message))
raise
def hash_monitor_names(service):
"""
Uses the get_mon_map() function to get information about the monitor
cluster.
Hash the name of each monitor. Return a sorted list of monitor hashes
in an ascending order.
:param service: six.string_types. The Ceph user name to run the command under
:rtype : dict. json dict of monitor name, ip address and rank
example: {
'name': 'ip-172-31-13-165',
'rank': 0,
'addr': '172.31.13.165:6789/0'}
"""
try:
hash_list = []
monitor_list = get_mon_map(service=service)
if monitor_list['monmap']['mons']:
for mon in monitor_list['monmap']['mons']:
hash_list.append(
hashlib.sha224(mon['name'].encode('utf-8')).hexdigest())
return sorted(hash_list)
else:
return None
except (ValueError, CalledProcessError):
raise
def monitor_key_delete(service, key):
"""
Delete a key and value pair from the monitor cluster
:param service: six.string_types. The Ceph user name to run the command under
Deletes a key value pair on the monitor cluster.
:param key: six.string_types. The key to delete.
"""
try:
check_output(
['ceph', '--id', service,
'config-key', 'del', str(key)])
except CalledProcessError as e:
log("Monitor config-key put failed with message: {}".format(
e.output))
raise
def monitor_key_set(service, key, value):
"""
Sets a key value pair on the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to set.
:param value: The value to set. This will be converted to a string
before setting
"""
try:
check_output(
['ceph', '--id', service,
'config-key', 'put', str(key), str(value)])
except CalledProcessError as e:
log("Monitor config-key put failed with message: {}".format(
e.output))
raise
def monitor_key_get(service, key):
"""
Gets the value of an existing key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for.
:return: Returns the value of that key or None if not found.
"""
try:
output = check_output(
['ceph', '--id', service,
'config-key', 'get', str(key)])
return output
except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format(
e.output))
return None
def monitor_key_exists(service, key):
"""
Searches for the existence of a key in the monitor cluster.
:param service: six.string_types. The Ceph user name to run the command under
:param key: six.string_types. The key to search for
:return: Returns True if the key exists, False if not and raises an
exception if an unknown error occurs. :raise: CalledProcessError if
an unknown error occurs
"""
try:
check_call(
['ceph', '--id', service,
'config-key', 'exists', str(key)])
# I can return true here regardless because Ceph returns
# ENOENT if the key wasn't found
return True
except CalledProcessError as e:
if e.returncode == errno.ENOENT:
return False
else:
log("Unknown error from ceph config-get exists: {} {}".format(
e.returncode, e.output))
raise
def get_erasure_profile(service, name): def get_erasure_profile(service, name):
""" """
:param service: six.string_types. The Ceph user name to run the command under :param service: six.string_types. The Ceph user name to run the command under

View File

@ -912,6 +912,24 @@ def payload_status_set(klass, pid, status):
subprocess.check_call(cmd) subprocess.check_call(cmd)
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def resource_get(name):
"""used to fetch the resource path of the given name.
<name> must match a name of defined resource in metadata.yaml
returns either a path or False if resource not available
"""
if not name:
return False
cmd = ['resource-get', name]
try:
return subprocess.check_output(cmd).decode('UTF-8')
except subprocess.CalledProcessError:
return False
@cached @cached
def juju_version(): def juju_version():
"""Full version string (eg. '1.23.3.1-trusty-amd64')""" """Full version string (eg. '1.23.3.1-trusty-amd64')"""
@ -976,3 +994,16 @@ def _run_atexit():
for callback, args, kwargs in reversed(_atexit): for callback, args, kwargs in reversed(_atexit):
callback(*args, **kwargs) callback(*args, **kwargs)
del _atexit[:] del _atexit[:]
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding):
'''
Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding
:return: string. The primary IP address for the named binding
:raise: NotImplementedError if run on Juju < 2.0
'''
cmd = ['network-get', '--primary-address', binding]
return subprocess.check_output(cmd).strip()

View File

@ -30,6 +30,8 @@ import random
import string import string
import subprocess import subprocess
import hashlib import hashlib
import functools
import itertools
from contextlib import contextmanager from contextlib import contextmanager
from collections import OrderedDict from collections import OrderedDict
@ -428,27 +430,47 @@ def restart_on_change(restart_map, stopstart=False):
restarted if any file matching the pattern got changed, created restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information. for the 'glob' module for more information.
@param restart_map: {path_file_name: [service_name, ...]
@param stopstart: DEFAULT false; whether to stop, start OR restart
@returns result from decorated function
""" """
def wrap(f): def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs): def wrapped_f(*args, **kwargs):
checksums = {path: path_hash(path) for path in restart_map} return restart_on_change_helper(
f(*args, **kwargs) (lambda: f(*args, **kwargs)), restart_map, stopstart)
restarts = []
for path in restart_map:
if path_hash(path) != checksums[path]:
restarts += restart_map[path]
services_list = list(OrderedDict.fromkeys(restarts))
if not stopstart:
for service_name in services_list:
service('restart', service_name)
else:
for action in ['stop', 'start']:
for service_name in services_list:
service(action, service_name)
return wrapped_f return wrapped_f
return wrap return wrap
def restart_on_change_helper(lambda_f, restart_map, stopstart=False):
"""Helper function to perform the restart_on_change function.
This is provided for decorators to restart services if files described
in the restart_map have changed after an invocation of lambda_f().
@param lambda_f: function to call.
@param restart_map: {file: [service, ...]}
@param stopstart: whether to stop, start or restart a service
@returns result of lambda_f()
"""
checksums = {path: path_hash(path) for path in restart_map}
r = lambda_f()
# create a list of lists of the services to restart
restarts = [restart_map[path]
for path in restart_map
if path_hash(path) != checksums[path]]
# create a flat list of ordered services without duplicates from lists
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
if services_list:
actions = ('stop', 'start') if stopstart else ('restart',)
for action in actions:
for service_name in services_list:
service(action, service_name)
return r
def lsb_release(): def lsb_release():
"""Return /etc/lsb-release in a dict""" """Return /etc/lsb-release in a dict"""
d = {} d = {}

View File

@ -43,7 +43,7 @@ from charmhelpers.core.hookenv import (
status_set, status_set,
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
restart_on_change, # restart_on_change,
service_reload, service_reload,
service_restart, service_restart,
service_stop, service_stop,
@ -65,6 +65,8 @@ from charmhelpers.contrib.openstack.utils import (
openstack_upgrade_available, openstack_upgrade_available,
os_release, os_release,
sync_db_with_multi_ipv6_addresses, sync_db_with_multi_ipv6_addresses,
pausable_restart_on_change as restart_on_change,
is_unit_paused_set,
) )
from charmhelpers.contrib.storage.linux.ceph import ( from charmhelpers.contrib.storage.linux.ceph import (
send_request_if_needed, send_request_if_needed,
@ -280,6 +282,8 @@ def ceph_changed():
CONFIGS.write(ceph_config_file()) CONFIGS.write(ceph_config_file())
# Ensure that glance-api is restarted since only now can we # Ensure that glance-api is restarted since only now can we
# guarantee that ceph resources are ready. # guarantee that ceph resources are ready.
# Don't restart if the unit is in maintenance mode
if not is_unit_paused_set():
service_restart('glance-api') service_restart('glance-api')
else: else:
send_request_if_needed(get_ceph_request()) send_request_if_needed(get_ceph_request())
@ -489,6 +493,7 @@ def configure_https():
# TODO: improve this by checking if local CN certs are available # TODO: improve this by checking if local CN certs are available
# first then checking reload status (see LP #1433114). # first then checking reload status (see LP #1433114).
if not is_unit_paused_set():
service_reload('apache2', restart_on_failure=True) service_reload('apache2', restart_on_failure=True)
for r_id in relation_ids('identity-service'): for r_id in relation_ids('identity-service'):

View File

@ -26,7 +26,6 @@ from charmhelpers.core.hookenv import (
relation_ids, relation_ids,
service_name, service_name,
status_get, status_get,
status_set,
) )
@ -63,6 +62,10 @@ from charmhelpers.contrib.openstack.utils import (
configure_installation_source, configure_installation_source,
os_release, os_release,
set_os_workload_status, set_os_workload_status,
is_unit_paused_set,
make_assess_status_func,
pause_unit,
resume_unit,
) )
from charmhelpers.core.templating import render from charmhelpers.core.templating import render
@ -71,10 +74,7 @@ from charmhelpers.core.decorators import (
retry_on_exception, retry_on_exception,
) )
from charmhelpers.core.unitdata import ( from charmhelpers.core.unitdata import kv
HookData,
kv,
)
CLUSTER_RES = "grp_glance_vips" CLUSTER_RES = "grp_glance_vips"
@ -286,6 +286,8 @@ def do_openstack_upgrade(configs):
[service_stop(s) for s in services()] [service_stop(s) for s in services()]
if is_elected_leader(CLUSTER_RES): if is_elected_leader(CLUSTER_RES):
migrate_database() migrate_database()
# Don't start services if the unit is supposed to be paused.
if not is_unit_paused_set():
[service_start(s) for s in services()] [service_start(s) for s in services()]
@ -433,6 +435,8 @@ def git_post_install(projects_yaml):
render('git.upstart', '/etc/init/glance-registry.conf', render('git.upstart', '/etc/init/glance-registry.conf',
glance_registry_context, perms=0o644, templates_dir=templates_dir) glance_registry_context, perms=0o644, templates_dir=templates_dir)
# Don't restart services if the unit is supposed to be paused.
if not is_unit_paused_set():
service_restart('glance-api') service_restart('glance-api')
service_restart('glance-registry') service_restart('glance-registry')
@ -503,32 +507,71 @@ def swift_temp_url_key():
return connect_and_post() return connect_and_post()
def is_paused(status_get=status_get):
"""Is the unit paused?"""
with HookData()():
if kv().get('unit-paused'):
return True
else:
return False
def assess_status(configs): def assess_status(configs):
"""Assess status of current unit """Assess status of current unit
Decides what the state of the unit should be based on the current Decides what the state of the unit should be based on the current
configuration. configuration.
SIDE EFFECT: calls set_os_workload_status(...) which sets the workload
status of the unit.
Also calls status_set(...) directly if paused state isn't complete.
@param configs: a templating.OSConfigRenderer() object @param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
""" """
assess_status_func(configs)()
if is_paused():
status_set("maintenance",
"Paused. Use 'resume' action to resume normal service.")
return
# set the status according to the current state of the contexts def assess_status_func(configs):
set_os_workload_status( """Helper function to create the function that will assess_status() for
configs, REQUIRED_INTERFACES, charm_func=check_optional_relations) the unit.
Uses charmhelpers.contrib.openstack.utils.make_assess_status_func() to
create the appropriate status function and then returns it.
Used directly by assess_status() and also for pausing and resuming
the unit.
NOTE(ajkavanagh) ports are not checked due to race hazards with services
that don't behave sychronously w.r.t their service scripts. e.g.
apache2.
@param configs: a templating.OSConfigRenderer() object
@return f() -> None : a function that assesses the unit's workload status
"""
return make_assess_status_func(
configs, REQUIRED_INTERFACES,
charm_func=check_optional_relations,
services=services(), ports=None)
def pause_unit_helper(configs):
"""Helper function to pause a unit, and then call assess_status(...) in
effect, so that the status is correctly updated.
Uses charmhelpers.contrib.openstack.utils.pause_unit() to do the work.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
_pause_resume_helper(pause_unit, configs)
def resume_unit_helper(configs):
"""Helper function to resume a unit, and then call assess_status(...) in
effect, so that the status is correctly updated.
Uses charmhelpers.contrib.openstack.utils.resume_unit() to do the work.
@param configs: a templating.OSConfigRenderer() object
@returns None - this function is executed for its side-effect
"""
_pause_resume_helper(resume_unit, configs)
def _pause_resume_helper(f, configs):
"""Helper function that uses the make_assess_status_func(...) from
charmhelpers.contrib.openstack.utils to create an assess_status(...)
function that can be used with the pause/resume of the unit
@param f: the function to be used with the assess_status(...) function
@returns None - this function is executed for its side-effect
"""
# TODO(ajkavanagh) - ports= has been left off because of the race hazard
# that exists due to service_start()
f(assess_status_func(configs),
services=services(),
ports=None)
PASTE_INI_MARKER = 'paste-ini-marker' PASTE_INI_MARKER = 'paste-ini-marker'

View File

@ -27,7 +27,11 @@ import cinderclient.v1.client as cinder_client
import glanceclient.v1.client as glance_client import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client import heatclient.v1.client as heat_client
import keystoneclient.v2_0 as keystone_client import keystoneclient.v2_0 as keystone_client
import novaclient.v1_1.client as nova_client from keystoneclient.auth.identity import v3 as keystone_id_v3
from keystoneclient import session as keystone_session
from keystoneclient.v3 import client as keystone_client_v3
import novaclient.client as nova_client
import pika import pika
import swiftclient import swiftclient
@ -38,6 +42,8 @@ from charmhelpers.contrib.amulet.utils import (
DEBUG = logging.DEBUG DEBUG = logging.DEBUG
ERROR = logging.ERROR ERROR = logging.ERROR
NOVA_CLIENT_VERSION = "2"
class OpenStackAmuletUtils(AmuletUtils): class OpenStackAmuletUtils(AmuletUtils):
"""OpenStack amulet utilities. """OpenStack amulet utilities.
@ -139,7 +145,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return "role {} does not exist".format(e['name']) return "role {} does not exist".format(e['name'])
return ret return ret
def validate_user_data(self, expected, actual): def validate_user_data(self, expected, actual, api_version=None):
"""Validate user data. """Validate user data.
Validate a list of actual user data vs a list of expected user Validate a list of actual user data vs a list of expected user
@ -150,10 +156,15 @@ class OpenStackAmuletUtils(AmuletUtils):
for e in expected: for e in expected:
found = False found = False
for act in actual: for act in actual:
if e['name'] == act.name:
a = {'enabled': act.enabled, 'name': act.name, a = {'enabled': act.enabled, 'name': act.name,
'email': act.email, 'tenantId': act.tenantId, 'email': act.email, 'id': act.id}
'id': act.id} if api_version == 3:
if e['name'] == a['name']: a['default_project_id'] = getattr(act,
'default_project_id',
'none')
else:
a['tenantId'] = act.tenantId
found = True found = True
ret = self._validate_dict_data(e, a) ret = self._validate_dict_data(e, a)
if ret: if ret:
@ -188,15 +199,30 @@ class OpenStackAmuletUtils(AmuletUtils):
return cinder_client.Client(username, password, tenant, ept) return cinder_client.Client(username, password, tenant, ept)
def authenticate_keystone_admin(self, keystone_sentry, user, password, def authenticate_keystone_admin(self, keystone_sentry, user, password,
tenant): tenant=None, api_version=None,
keystone_ip=None):
"""Authenticates admin user with the keystone admin endpoint.""" """Authenticates admin user with the keystone admin endpoint."""
self.log.debug('Authenticating keystone admin...') self.log.debug('Authenticating keystone admin...')
unit = keystone_sentry unit = keystone_sentry
service_ip = unit.relation('shared-db', if not keystone_ip:
keystone_ip = unit.relation('shared-db',
'mysql:shared-db')['private-address'] 'mysql:shared-db')['private-address']
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
if not api_version or api_version == 2:
ep = base_ep + "/v2.0"
return keystone_client.Client(username=user, password=password, return keystone_client.Client(username=user, password=password,
tenant_name=tenant, auth_url=ep) tenant_name=tenant, auth_url=ep)
else:
ep = base_ep + "/v3"
auth = keystone_id_v3.Password(
user_domain_name='admin_domain',
username=user,
password=password,
domain_name='admin_domain',
auth_url=ep,
)
sess = keystone_session.Session(auth=auth)
return keystone_client_v3.Client(session=sess)
def authenticate_keystone_user(self, keystone, user, password, tenant): def authenticate_keystone_user(self, keystone, user, password, tenant):
"""Authenticates a regular user with the keystone public endpoint.""" """Authenticates a regular user with the keystone public endpoint."""
@ -225,7 +251,8 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Authenticating nova user ({})...'.format(user)) self.log.debug('Authenticating nova user ({})...'.format(user))
ep = keystone.service_catalog.url_for(service_type='identity', ep = keystone.service_catalog.url_for(service_type='identity',
endpoint_type='publicURL') endpoint_type='publicURL')
return nova_client.Client(username=user, api_key=password, return nova_client.Client(NOVA_CLIENT_VERSION,
username=user, api_key=password,
project_id=tenant, auth_url=ep) project_id=tenant, auth_url=ep)
def authenticate_swift_user(self, keystone, user, password, tenant): def authenticate_swift_user(self, keystone, user, password, tenant):

View File

@ -1,14 +1,14 @@
import os import os
import mock import mock
from mock import patch
from test_utils import CharmTestCase from test_utils import CharmTestCase
from mock import patch
os.environ['JUJU_UNIT_NAME'] = 'glance' os.environ['JUJU_UNIT_NAME'] = 'glance'
with patch('actions.hooks.glance_utils.is_paused') as is_paused:
with patch('actions.hooks.glance_utils.register_configs') as configs: with patch('actions.hooks.glance_utils.register_configs') as configs:
configs.return_value = 'test-config'
import actions.actions import actions.actions
@ -16,93 +16,22 @@ class PauseTestCase(CharmTestCase):
def setUp(self): def setUp(self):
super(PauseTestCase, self).setUp( super(PauseTestCase, self).setUp(
actions.actions, ["service_pause", "HookData", "kv", actions.actions, ["pause_unit_helper"])
"assess_status"])
def test_pauses_services(self): def test_pauses_services(self):
"""Pause action pauses all Glance services."""
pause_calls = []
def fake_service_pause(svc):
pause_calls.append(svc)
return True
self.service_pause.side_effect = fake_service_pause
actions.actions.pause([]) actions.actions.pause([])
self.assertItemsEqual( self.pause_unit_helper.assert_called_once_with('test-config')
pause_calls,
['glance-api', 'glance-registry', 'haproxy', 'apache2'])
def test_bails_out_early_on_error(self):
"""Pause action fails early if there are errors stopping a service."""
pause_calls = []
def maybe_kill(svc):
if svc == "glance-registry":
return False
else:
pause_calls.append(svc)
return True
self.service_pause.side_effect = maybe_kill
self.assertRaisesRegexp(
Exception, "glance-registry didn't stop cleanly.",
actions.actions.pause, [])
self.assertEqual(pause_calls, ['haproxy', 'glance-api'])
def test_pause_sets_value(self):
"""Pause action sets the unit-paused value to True."""
self.HookData()().return_value = True
actions.actions.pause([])
self.kv().set.assert_called_with('unit-paused', True)
class ResumeTestCase(CharmTestCase): class ResumeTestCase(CharmTestCase):
def setUp(self): def setUp(self):
super(ResumeTestCase, self).setUp( super(ResumeTestCase, self).setUp(
actions.actions, ["service_resume", "HookData", "kv", actions.actions, ["resume_unit_helper"])
"assess_status"])
def test_resumes_services(self): def test_pauses_services(self):
"""Resume action resumes all Glance services."""
resume_calls = []
def fake_service_resume(svc):
resume_calls.append(svc)
return True
self.service_resume.side_effect = fake_service_resume
actions.actions.resume([]) actions.actions.resume([])
self.assertItemsEqual( self.resume_unit_helper.assert_called_once_with('test-config')
resume_calls,
['glance-api', 'glance-registry', 'haproxy', 'apache2'])
def test_bails_out_early_on_error(self):
"""Resume action fails early if there are errors starting a service."""
resume_calls = []
def maybe_kill(svc):
if svc == "glance-registry":
return False
else:
resume_calls.append(svc)
return True
self.service_resume.side_effect = maybe_kill
self.assertRaisesRegexp(
Exception, "glance-registry didn't start cleanly.",
actions.actions.resume, [])
self.assertEqual(resume_calls, ['haproxy', 'glance-api'])
def test_resume_sets_value(self):
"""Resume action sets the unit-paused value to False."""
self.HookData()().return_value = True
actions.actions.resume([])
self.kv().set.assert_called_with('unit-paused', False)
class MainTestCase(CharmTestCase): class MainTestCase(CharmTestCase):

View File

@ -287,39 +287,47 @@ class TestGlanceUtils(CharmTestCase):
] ]
self.assertEquals(service_restart.call_args_list, expected) self.assertEquals(service_restart.call_args_list, expected)
@patch.object(utils, 'HookData') def test_assess_status(self):
@patch.object(utils, 'kv') with patch.object(utils, 'assess_status_func') as asf:
def test_is_paused(self, kv, HookData): callee = MagicMock()
"""test_is_paused: Test is_paused() returns value asf.return_value = callee
from kv('unit-paused')""" utils.assess_status('test-config')
HookData()().return_value = True asf.assert_called_once_with('test-config')
kv().get.return_value = True callee.assert_called_once_with()
self.assertEqual(utils.is_paused(), True)
kv().get.assert_called_with('unit-paused')
kv().get.return_value = False
self.assertEqual(utils.is_paused(), False)
@patch.object(utils, 'is_paused') @patch.object(utils, 'REQUIRED_INTERFACES')
@patch.object(utils, 'status_set') @patch.object(utils, 'services')
def test_assess_status(self, status_set, is_paused): @patch.object(utils, 'make_assess_status_func')
"""test_assess_status: verify that it does pick the right status""" def test_assess_status_func(self,
# check that paused status does the right thing make_assess_status_func,
is_paused.return_value = True services,
utils.assess_status(None) REQUIRED_INTERFACES):
status_set.assert_called_with( services.return_value = 's1'
"maintenance", utils.assess_status_func('test-config')
"Paused. Use 'resume' action to resume normal service.") # ports=None whilst port checks are disabled.
make_assess_status_func.assert_called_once_with(
'test-config', REQUIRED_INTERFACES,
charm_func=utils.check_optional_relations,
services='s1', ports=None)
# if it isn't paused, the assess_status() calls def test_pause_unit_helper(self):
# set_os_workload_status() with patch.object(utils, '_pause_resume_helper') as prh:
is_paused.return_value = False utils.pause_unit_helper('random-config')
with patch.object(utils, 'set_os_workload_status') \ prh.assert_called_once_with(utils.pause_unit, 'random-config')
as set_os_workload_status: with patch.object(utils, '_pause_resume_helper') as prh:
utils.assess_status("TEST CONFIG") utils.resume_unit_helper('random-config')
set_os_workload_status.assert_called_with( prh.assert_called_once_with(utils.resume_unit, 'random-config')
"TEST CONFIG",
utils.REQUIRED_INTERFACES, @patch.object(utils, 'services')
charm_func=utils.check_optional_relations) def test_pause_resume_helper(self, services):
f = MagicMock()
services.return_value = 's1'
with patch.object(utils, 'assess_status_func') as asf:
asf.return_value = 'assessor'
utils._pause_resume_helper(f, 'some-config')
asf.assert_called_once_with('some-config')
# ports=None whilst port checks are disabled.
f.assert_called_once_with('assessor', services='s1', ports=None)
@patch.object(utils, 'os') @patch.object(utils, 'os')
@patch.object(utils, 'kv') @patch.object(utils, 'kv')