[hopem,r=wolsen,james-page]

Ensure services are restarted after db initialised.

Closes-Bug: 1457267
This commit is contained in:
Billy Olsen 2015-06-04 10:04:03 -07:00
commit 1fcaffbb9b
4 changed files with 95 additions and 3 deletions

View File

@ -24,7 +24,8 @@ from cinder_utils import (
CINDER_CONF,
CINDER_API_CONF,
ceph_config_file,
setup_ipv6
setup_ipv6,
check_db_initialised,
)
from charmhelpers.core.hookenv import (
@ -40,7 +41,7 @@ from charmhelpers.core.hookenv import (
unit_get,
log,
ERROR,
INFO
INFO,
)
from charmhelpers.fetch import (
@ -365,6 +366,11 @@ def cluster_joined(relation_id=None):
relation_id=relation_id,
relation_settings={'{}-address'.format(addr_type): address}
)
# Only do if this is fired by cluster rel
if not relation_id:
check_db_initialised()
if config('prefer-ipv6'):
private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
relation_set(relation_id=relation_id,
@ -375,6 +381,7 @@ def cluster_joined(relation_id=None):
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
check_db_initialised()
CONFIGS.write_all()

View File

@ -1,6 +1,7 @@
import os
import shutil
import subprocess
import uuid
from collections import OrderedDict
from copy import copy
@ -12,8 +13,12 @@ from charmhelpers.contrib.python.packages import (
from charmhelpers.core.hookenv import (
charm_dir,
config,
local_unit,
relation_get,
relation_set,
relation_ids,
log,
DEBUG,
service_name
)
@ -127,6 +132,9 @@ DEFAULT_LOOPBACK_SIZE = '5G'
# Cluster resource used to determine leadership when hacluster'd
CLUSTER_RES = 'grp_cinder_vips'
CINDER_DB_INIT_RKEY = 'cinder-db-initialised'
CINDER_DB_INIT_ECHO_RKEY = 'cinder-db-initialised-echo'
class CinderCharmError(Exception):
pass
@ -305,6 +313,15 @@ def restart_map():
return OrderedDict(_map)
def enabled_services():
m = restart_map()
svcs = set()
for t in m.iteritems():
svcs.update(t[1])
return list(svcs)
def services():
''' Returns a list of services associate with this charm '''
_services = []
@ -451,10 +468,39 @@ def _parse_block_device(block_device):
return ('/dev/{}'.format(block_device), 0)
def check_db_initialised():
"""Check if we have received db init'd notify and restart services if we
have not already.
"""
settings = relation_get() or {}
if settings:
init_id = settings.get(CINDER_DB_INIT_RKEY)
echoed_init_id = relation_get(unit=local_unit(),
attribute=CINDER_DB_INIT_ECHO_RKEY)
if (init_id and init_id != echoed_init_id and
local_unit() not in init_id):
log("Restarting cinder services following db initialisation",
level=DEBUG)
for svc in enabled_services():
service_restart(svc)
# Set echo
relation_set(**{CINDER_DB_INIT_ECHO_RKEY: init_id})
def migrate_database():
'Runs cinder-manage to initialize a new database or migrate existing'
cmd = ['cinder-manage', 'db', 'sync']
subprocess.check_call(cmd)
# Notify peers so that services get restarted
log("Notifying peer(s) that db is initialised and restarting services",
level=DEBUG)
for r_id in relation_ids('cluster'):
for svc in enabled_services():
service_restart(svc)
id = "%s-%s" % (local_unit(), uuid.uuid4())
relation_set(relation_id=r_id, **{CINDER_DB_INIT_RKEY: id})
def set_ceph_env_variables(service):

View File

@ -14,10 +14,14 @@ TO_PATCH = [
# helpers.core.hookenv
'config',
'log',
'relation_get',
'relation_set',
'local_unit',
# helpers.core.host
'mounts',
'umount',
'mkdir',
'service_restart',
# ceph utils
# storage_utils
'create_lvm_physical_volume',
@ -26,6 +30,7 @@ TO_PATCH = [
'is_lvm_physical_volume',
'list_lvm_volume_group',
'relation_ids',
'relation_set',
'remove_lvm_physical_volume',
'ensure_loopback_device',
'is_block_device',
@ -406,11 +411,19 @@ class TestCinderUtils(CharmTestCase):
cinder_utils.extend_lvm_volume_group('test', '/dev/sdb')
_call.assert_called_with(['vgextend', 'test', '/dev/sdb'])
def test_migrate_database(self):
@patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0')
@patch.object(cinder_utils, 'uuid')
def test_migrate_database(self, mock_uuid):
'It migrates database with cinder-manage'
uuid = 'a-great-uuid'
mock_uuid.uuid4.return_value = uuid
rid = 'cluster:0'
self.relation_ids.return_value = [rid]
args = {'cinder-db-initialised': "unit/0-%s" % uuid}
with patch('subprocess.check_call') as check_call:
cinder_utils.migrate_database()
check_call.assert_called_with(['cinder-manage', 'db', 'sync'])
self.relation_set.assert_called_with(relation_id=rid, **args)
@patch('os.path.exists')
def test_register_configs_apache(self, exists):
@ -673,3 +686,26 @@ class TestCinderUtils(CharmTestCase):
call('cinder-scheduler'),
]
self.assertEquals(service_restart.call_args_list, expected)
@patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0')
def test_check_db_initialised_by_self(self):
self.relation_get.return_value = {}
cinder_utils.check_db_initialised()
self.assertFalse(self.relation_set.called)
self.relation_get.return_value = {'cinder-db-initialised':
'unit/0-1234'}
cinder_utils.check_db_initialised()
self.assertFalse(self.relation_set.called)
@patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0')
def test_check_db_initialised(self):
self.relation_get.return_value = {}
cinder_utils.check_db_initialised()
self.assertFalse(self.relation_set.called)
self.relation_get.return_value = {'cinder-db-initialised':
'unit/1-1234'}
cinder_utils.check_db_initialised()
calls = [call(**{'cinder-db-initialised-echo': 'unit/1-1234'})]
self.relation_set.assert_has_calls(calls)

View File

@ -63,6 +63,7 @@ class TestClusterHooks(CharmTestCase):
super(TestClusterHooks, self).setUp(hooks, TO_PATCH)
self.config.side_effect = self.test_config.get
@patch.object(hooks, 'check_db_initialised', lambda *args, **kwargs: None)
@patch('charmhelpers.core.host.service')
@patch('charmhelpers.core.host.file_hash')
def test_cluster_hook(self, file_hash, service):
@ -88,12 +89,14 @@ class TestClusterHooks(CharmTestCase):
call('start', 'apache2')]
self.assertEquals(ex, service.call_args_list)
@patch.object(hooks, 'check_db_initialised', lambda *args, **kwargs: None)
def test_cluster_joined_hook(self):
self.config.side_effect = self.test_config.get
self.get_address_in_network.return_value = None
hooks.hooks.execute(['hooks/cluster-relation-joined'])
self.assertFalse(self.relation_set.called)
@patch.object(hooks, 'check_db_initialised', lambda *args, **kwargs: None)
def test_cluster_joined_hook_multinet(self):
self.config.side_effect = self.test_config.get
self.get_address_in_network.side_effect = [