[hopem,r=]

Ensure services are restarted after db initialised.

Closes-Bug: 1457267
This commit is contained in:
Edward Hope-Morley 2015-05-22 12:31:45 -07:00
parent f3c77b7769
commit 619ce06579
4 changed files with 76 additions and 3 deletions

View File

@ -24,7 +24,8 @@ from cinder_utils import (
CINDER_CONF,
CINDER_API_CONF,
ceph_config_file,
setup_ipv6
setup_ipv6,
check_db_initialised,
)
from charmhelpers.core.hookenv import (
@ -40,7 +41,7 @@ from charmhelpers.core.hookenv import (
unit_get,
log,
ERROR,
INFO
INFO,
)
from charmhelpers.fetch import (
@ -365,6 +366,11 @@ def cluster_joined(relation_id=None):
relation_id=relation_id,
relation_settings={'{}-address'.format(addr_type): address}
)
# Only do if this is fired by cluster rel
if not relation_id:
check_db_initialised(passthrough=True)
if config('prefer-ipv6'):
private_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
relation_set(relation_id=relation_id,
@ -375,6 +381,7 @@ def cluster_joined(relation_id=None):
'cluster-relation-departed')
@restart_on_change(restart_map(), stopstart=True)
def cluster_changed():
check_db_initialised()
CONFIGS.write_all()

View File

@ -1,6 +1,7 @@
import os
import shutil
import subprocess
import uuid
from collections import OrderedDict
from copy import copy
@ -8,8 +9,12 @@ from copy import copy
from charmhelpers.core.hookenv import (
charm_dir,
config,
local_unit,
relation_get,
relation_set,
relation_ids,
log,
DEBUG,
service_name
)
@ -295,6 +300,15 @@ def restart_map():
return OrderedDict(_map)
def enabled_services():
m = restart_map()
svcs = set()
for t in m.iteritems():
svcs.update(t[1])
return list(svcs)
def services():
''' Returns a list of services associate with this charm '''
_services = []
@ -441,10 +455,37 @@ def _parse_block_device(block_device):
return ('/dev/{}'.format(block_device), 0)
def check_db_initialised(passthrough=False):
"""Check if we have received db init'd notify and restart services if we
have not already.
"""
settings = relation_get() or {}
if settings:
key = 'cinder-db-initialised'
db_init = settings.get(key)
echoed_db_init = relation_get(unit=local_unit(), attribute=key)
if db_init and db_init != echoed_db_init:
if not passthrough:
log("Restarting cinder services following db initialisation",
level=DEBUG)
for svc in enabled_services():
service_restart(svc)
else:
log("Passthough db init", level=DEBUG)
relation_set(**{key: db_init})
def migrate_database():
'Runs cinder-manage to initialize a new database or migrate existing'
cmd = ['cinder-manage', 'db', 'sync']
subprocess.check_call(cmd)
# Notify peers so that services get restarted
log("Notifying peer(s) that db is initialised", level=DEBUG)
for r_id in relation_ids('cluster'):
enabled_services()
key = 'cinder-db-initialised'
relation_set(relation_id=r_id, **{key: str(uuid.uuid4())})
def set_ceph_env_variables(service):

View File

@ -14,10 +14,14 @@ TO_PATCH = [
# helpers.core.hookenv
'config',
'log',
'relation_get',
'relation_set',
'local_unit',
# helpers.core.host
'mounts',
'umount',
'mkdir',
'service_restart',
# ceph utils
# storage_utils
'create_lvm_physical_volume',
@ -26,6 +30,7 @@ TO_PATCH = [
'is_lvm_physical_volume',
'list_lvm_volume_group',
'relation_ids',
'relation_set',
'remove_lvm_physical_volume',
'ensure_loopback_device',
'is_block_device',
@ -406,11 +411,18 @@ class TestCinderUtils(CharmTestCase):
cinder_utils.extend_lvm_volume_group('test', '/dev/sdb')
_call.assert_called_with(['vgextend', 'test', '/dev/sdb'])
def test_migrate_database(self):
@patch.object(cinder_utils, 'uuid')
def test_migrate_database(self, mock_uuid):
'It migrates database with cinder-manage'
uuid = 'a-great-uuid'
mock_uuid.uuid4.return_value = uuid
rid = 'cluster:0'
self.relation_ids.return_value = [rid]
args = {'cinder-db-initialised': uuid}
with patch('subprocess.check_call') as check_call:
cinder_utils.migrate_database()
check_call.assert_called_with(['cinder-manage', 'db', 'sync'])
self.relation_set.assert_called_with(relation_id=rid, **args)
@patch('os.path.exists')
def test_register_configs_apache(self, exists):
@ -663,3 +675,13 @@ class TestCinderUtils(CharmTestCase):
call('cinder-scheduler'),
]
self.assertEquals(service_restart.call_args_list, expected)
def test_check_db_initialised(self):
self.relation_get.return_value = {}
cinder_utils.check_db_initialised()
self.assertFalse(self.relation_set.called)
self.relation_get.return_value = {'cinder-db-initialised': '1234'}
cinder_utils.check_db_initialised()
calls = [call(**{'cinder-db-initialised': '1234'})]
self.relation_set.assert_has_calls(calls)

View File

@ -63,6 +63,7 @@ class TestClusterHooks(CharmTestCase):
super(TestClusterHooks, self).setUp(hooks, TO_PATCH)
self.config.side_effect = self.test_config.get
@patch.object(hooks, 'check_db_initialised', lambda *args, **kwargs: None)
@patch('charmhelpers.core.host.service')
@patch('charmhelpers.core.host.file_hash')
def test_cluster_hook(self, file_hash, service):
@ -88,12 +89,14 @@ class TestClusterHooks(CharmTestCase):
call('start', 'apache2')]
self.assertEquals(ex, service.call_args_list)
@patch.object(hooks, 'check_db_initialised', lambda *args, **kwargs: None)
def test_cluster_joined_hook(self):
self.config.side_effect = self.test_config.get
self.get_address_in_network.return_value = None
hooks.hooks.execute(['hooks/cluster-relation-joined'])
self.assertFalse(self.relation_set.called)
@patch.object(hooks, 'check_db_initialised', lambda *args, **kwargs: None)
def test_cluster_joined_hook_multinet(self):
self.config.side_effect = self.test_config.get
self.get_address_in_network.side_effect = [