cinder-backup service should be optional

The charm has cinder-backup listed as a core service which should always be
restarted on changes to config. However, cinder-backup is only present if
installed by the cinder-backup subordinate charm. The new pause/resume code
will put the charm in a blocked state in this case so it needs fixing.

This charm was not using some of the openstack charm conventions such as using
register_configs with resource_map and using hook_context rather context as
the key in resource map dict. These have been fixed.

Closes-Bug: 1563340
Change-Id: I2fd46e96e60dd60462a4ffa89f7f84e9fd31d234
This commit is contained in:
Liam Young 2016-03-29 14:01:56 +00:00
parent 9969922c88
commit f434b40b04
2 changed files with 261 additions and 146 deletions

View File

@ -3,6 +3,7 @@ import shutil
import subprocess import subprocess
import uuid import uuid
from copy import deepcopy
from collections import OrderedDict from collections import OrderedDict
from copy import copy from copy import copy
@ -182,57 +183,57 @@ def ceph_config_file():
# Map config files to hook contexts and services that will be associated # Map config files to hook contexts and services that will be associated
# with file in restart_on_changes()'s service map. # with file in restart_on_changes()'s service map.
CONFIG_FILES = OrderedDict([ BASE_RESOURCE_MAP = OrderedDict([
(CINDER_CONF, { (CINDER_CONF, {
'hook_contexts': [context.SharedDBContext(ssl_dir=CINDER_CONF_DIR), 'contexts': [context.SharedDBContext(ssl_dir=CINDER_CONF_DIR),
context.PostgresqlDBContext(), context.PostgresqlDBContext(),
context.AMQPContext(ssl_dir=CINDER_CONF_DIR), context.AMQPContext(ssl_dir=CINDER_CONF_DIR),
context.ImageServiceContext(), context.ImageServiceContext(),
context.OSConfigFlagContext(), context.OSConfigFlagContext(),
context.SyslogContext(), context.SyslogContext(),
cinder_contexts.CephContext(), cinder_contexts.CephContext(),
cinder_contexts.HAProxyContext(), cinder_contexts.HAProxyContext(),
cinder_contexts.ImageServiceContext(), cinder_contexts.ImageServiceContext(),
cinder_contexts.CinderSubordinateConfigContext( cinder_contexts.CinderSubordinateConfigContext(
interface=['storage-backend', 'backup-backend'], interface=['storage-backend', 'backup-backend'],
service='cinder', service='cinder',
config_file=CINDER_CONF), config_file=CINDER_CONF),
cinder_contexts.StorageBackendContext(), cinder_contexts.StorageBackendContext(),
cinder_contexts.LoggingConfigContext(), cinder_contexts.LoggingConfigContext(),
context.IdentityServiceContext( context.IdentityServiceContext(
service='cinder', service='cinder',
service_user='cinder'), service_user='cinder'),
context.BindHostContext(), context.BindHostContext(),
context.WorkerConfigContext(), context.WorkerConfigContext(),
cinder_contexts.RegionContext()], cinder_contexts.RegionContext()],
'services': ['cinder-api', 'cinder-volume', 'cinder-backup', 'services': ['cinder-api', 'cinder-volume', 'cinder-scheduler',
'cinder-scheduler', 'haproxy'] 'haproxy']
}), }),
(CINDER_API_CONF, { (CINDER_API_CONF, {
'hook_contexts': [context.IdentityServiceContext()], 'contexts': [context.IdentityServiceContext()],
'services': ['cinder-api'], 'services': ['cinder-api'],
}), }),
(ceph_config_file(), { (ceph_config_file(), {
'hook_contexts': [context.CephContext()], 'contexts': [context.CephContext()],
'services': ['cinder-volume', 'cinder-backup'] 'services': ['cinder-volume']
}), }),
(HAPROXY_CONF, { (HAPROXY_CONF, {
'hook_contexts': [context.HAProxyContext(singlenode_mode=True), 'contexts': [context.HAProxyContext(singlenode_mode=True),
cinder_contexts.HAProxyContext()], cinder_contexts.HAProxyContext()],
'services': ['haproxy'], 'services': ['haproxy'],
}), }),
(APACHE_SITE_CONF, { (APACHE_SITE_CONF, {
'hook_contexts': [cinder_contexts.ApacheSSLContext()], 'contexts': [cinder_contexts.ApacheSSLContext()],
'services': ['apache2'], 'services': ['apache2'],
}), }),
(APACHE_SITE_24_CONF, { (APACHE_SITE_24_CONF, {
'hook_contexts': [cinder_contexts.ApacheSSLContext()], 'contexts': [cinder_contexts.ApacheSSLContext()],
'services': ['apache2'], 'services': ['apache2'],
}), }),
]) ])
def register_configs(): def register_configs(release=None):
"""Register config files with their respective contexts. """Register config files with their respective contexts.
Regstration of some configs may not be required depending on Regstration of some configs may not be required depending on
existing of certain relations. existing of certain relations.
@ -240,18 +241,29 @@ def register_configs():
# if called without anything installed (eg during install hook) # if called without anything installed (eg during install hook)
# just default to earliest supported release. configs dont get touched # just default to earliest supported release. configs dont get touched
# till post-install, anyway. # till post-install, anyway.
release = os_release('cinder-common', base='folsom') release = release or os_release('cinder-common', base='icehouse')
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release) openstack_release=release)
for cfg, rscs in resource_map().iteritems():
configs.register(cfg, rscs['contexts'])
return configs
confs = [CINDER_API_CONF,
CINDER_CONF, def resource_map(release=None):
HAPROXY_CONF] """
Dynamically generate a map of resources that will be managed for a single
hook execution.
"""
resource_map = deepcopy(BASE_RESOURCE_MAP)
if relation_ids('backup-backend'):
resource_map[CINDER_CONF]['services'].append('cinder-backup')
resource_map[ceph_config_file()]['services'].append('cinder-backup')
if relation_ids('ceph'): if relation_ids('ceph'):
# need to create this early, new peers will have a relation during # need to create this early, new peers will have a relation during
# registration # before they've run the ceph hooks to create the # registration # before they've run the ceph hooks to create the
# directory. # directory.
# !!! FIX: These side effects seem inappropriate for this method
mkdir(os.path.dirname(CEPH_CONF)) mkdir(os.path.dirname(CEPH_CONF))
mkdir(os.path.dirname(ceph_config_file())) mkdir(os.path.dirname(ceph_config_file()))
@ -263,18 +275,31 @@ def register_configs():
open(ceph_config_file(), 'w').close() open(ceph_config_file(), 'w').close()
install_alternative(os.path.basename(CEPH_CONF), install_alternative(os.path.basename(CEPH_CONF),
CEPH_CONF, ceph_config_file()) CEPH_CONF, ceph_config_file())
confs.append(ceph_config_file()) else:
resource_map.pop(ceph_config_file())
for conf in confs:
configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
if os.path.exists('/etc/apache2/conf-available'): if os.path.exists('/etc/apache2/conf-available'):
configs.register(APACHE_SITE_24_CONF, resource_map.pop(APACHE_SITE_CONF)
CONFIG_FILES[APACHE_SITE_24_CONF]['hook_contexts'])
else: else:
configs.register(APACHE_SITE_CONF, resource_map.pop(APACHE_SITE_24_CONF)
CONFIG_FILES[APACHE_SITE_CONF]['hook_contexts'])
return configs # Remove services from map which are not enabled by user config
for cfg in resource_map.keys():
resource_map[cfg]['services'] = \
filter_services(resource_map[cfg]['services'])
return resource_map
def filter_services(svcs):
'''Remove services not enabled by user config from a list of services
@param svcs: List of services
@returns : List of enabled services
'''
return [s for s in svcs
if service_enabled(s.lstrip('cinder-')) or
not s.startswith('cinder')]
def juju_log(msg): def juju_log(msg):
@ -324,18 +349,9 @@ def restart_map():
:returns: dict: A dictionary mapping config file to lists of services :returns: dict: A dictionary mapping config file to lists of services
that should be restarted when file changes. that should be restarted when file changes.
''' '''
_map = [] return OrderedDict([(cfg, v['services'])
for f, ctxt in CONFIG_FILES.iteritems(): for cfg, v in resource_map().iteritems()
svcs = [] if v['services']])
for svc in ctxt['services']:
if svc.startswith('cinder-'):
if service_enabled(svc.split('-')[1]):
svcs.append(svc)
else:
svcs.append(svc)
if svcs:
_map.append((f, svcs))
return OrderedDict(_map)
def enabled_services(): def enabled_services():

View File

@ -90,6 +90,10 @@ class TestCinderUtils(CharmTestCase):
def setUp(self): def setUp(self):
super(TestCinderUtils, self).setUp(cinder_utils, TO_PATCH) super(TestCinderUtils, self).setUp(cinder_utils, TO_PATCH)
self.config.side_effect = self.test_config.get_all self.config.side_effect = self.test_config.get_all
self.apache24_conf_dir = '/etc/apache2/conf-available'
self.charm_ceph_conf = '/var/lib/charm/cinder/ceph.conf'
self.ceph_conf = '/etc/ceph/ceph.conf'
self.cinder_conf = '/etc/cinder/cinder.conf'
def svc_enabled(self, svc): def svc_enabled(self, svc):
return svc in self.test_config.get('enabled-services') return svc in self.test_config.get('enabled-services')
@ -149,58 +153,184 @@ class TestCinderUtils(CharmTestCase):
sorted(common + cinder_utils.API_PACKAGES + sorted(common + cinder_utils.API_PACKAGES +
cinder_utils.SCHEDULER_PACKAGES)) cinder_utils.SCHEDULER_PACKAGES))
def test_services(self): @patch('cinder_utils.restart_map')
self.assertEquals(cinder_utils.services(), def test_services(self, restart_map):
['haproxy', 'cinder-backup', 'cinder-api', restart_map.return_value = OrderedDict([
'cinder-volume', 'apache2', 'cinder-scheduler']) ('test_conf1', ['svc1']),
('test_conf2', ['svc2', 'svc3', 'svc1']),
])
self.assertEquals(cinder_utils.services(), ['svc2', 'svc3', 'svc1'])
def test_creates_restart_map_all_enabled(self): @patch('cinder_utils.service_enabled')
'It creates correct restart map when all services enabled' @patch('os.path.exists')
def test_creates_resource_map_all_enabled(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.return_value = []
ex_map = OrderedDict([ ex_map = OrderedDict([
('/etc/cinder/cinder.conf', ['cinder-api', 'cinder-volume', ('/etc/cinder/cinder.conf', ['cinder-api', 'cinder-volume',
'cinder-backup', 'cinder-scheduler', 'cinder-scheduler', 'haproxy']),
'haproxy']),
('/etc/cinder/api-paste.ini', ['cinder-api']), ('/etc/cinder/api-paste.ini', ['cinder-api']),
('/var/lib/charm/cinder/ceph.conf', ['cinder-volume',
'cinder-backup']),
('/etc/haproxy/haproxy.cfg', ['haproxy']), ('/etc/haproxy/haproxy.cfg', ['haproxy']),
('/etc/apache2/sites-available/openstack_https_frontend',
['apache2']),
('/etc/apache2/sites-available/openstack_https_frontend.conf', ('/etc/apache2/sites-available/openstack_https_frontend.conf',
['apache2']), ['apache2']),
]) ])
self.assertEquals(cinder_utils.restart_map(), ex_map) for cfg in ex_map.keys():
self.assertEquals(cinder_utils.resource_map()[cfg]['services'],
ex_map[cfg])
@patch('cinder_utils.service_enabled') @patch('cinder_utils.service_enabled')
def test_creates_restart_map_no_api(self, service_enabled): @patch('os.path.exists')
'It creates correct restart map with api disabled' def test_creates_resource_map_no_api(self, path_exists,
service_enabled):
service_enabled.side_effect = self.svc_enabled service_enabled.side_effect = self.svc_enabled
self.test_config.set('enabled-services', 'scheduler,volume') self.test_config.set('enabled-services', 'scheduler,volume')
path_exists.return_value = True
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.return_value = []
ex_map = OrderedDict([ ex_map = OrderedDict([
('/etc/cinder/cinder.conf', ['cinder-volume', 'cinder-scheduler', ('/etc/cinder/cinder.conf', ['cinder-volume', 'cinder-scheduler',
'haproxy']), 'haproxy']),
('/var/lib/charm/cinder/ceph.conf', ['cinder-volume']), ('/etc/cinder/api-paste.ini', []),
('/etc/haproxy/haproxy.cfg', ['haproxy']), ('/etc/haproxy/haproxy.cfg', ['haproxy']),
('/etc/apache2/sites-available/openstack_https_frontend',
['apache2']),
('/etc/apache2/sites-available/openstack_https_frontend.conf', ('/etc/apache2/sites-available/openstack_https_frontend.conf',
['apache2']), ['apache2']),
]) ])
self.assertEquals(cinder_utils.restart_map(), ex_map) for cfg in ex_map.keys():
self.assertEquals(cinder_utils.resource_map()[cfg]['services'],
ex_map[cfg])
@patch('cinder_utils.service_enabled') @patch('cinder_utils.service_enabled')
def test_creates_restart_map_only_api(self, service_enabled): @patch('os.path.exists')
'It creates correct restart map with only api enabled' def test_creates_resource_map_backup_backend(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': ['rid1'],
'ceph': []}[x]
self.assertTrue(
'cinder-backup' in
cinder_utils.resource_map()[self.cinder_conf]['services'])
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_no_backup(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': []}[x]
self.assertFalse(
'cinder-backup' in
cinder_utils.resource_map()[self.cinder_conf]['services'])
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_no_ceph_conf(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': []}[x]
self.assertFalse(self.charm_ceph_conf in
cinder_utils.resource_map().keys())
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_ceph_conf(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.return_value = True
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': ['rid1']}[x]
self.assertTrue(self.charm_ceph_conf in
cinder_utils.resource_map().keys())
self.mkdir.assert_has_calls(
[call('/etc/ceph'),
call('/var/lib/charm/cinder')]
)
self.install_alternative.assert_called_with(
'ceph.conf',
'/etc/ceph/ceph.conf',
self.charm_ceph_conf)
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_old_apache(self, path_exists,
service_enabled):
service_enabled.return_value = True
path_exists.side_effect = lambda x: x not in [self.apache24_conf_dir]
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': []}[x]
self.assertTrue(
'/etc/apache2/sites-available/openstack_https_frontend' in
cinder_utils.resource_map().keys())
@patch('cinder_utils.service_enabled')
@patch('os.path.exists')
def test_creates_resource_map_apache24(self, path_exists, service_enabled):
service_enabled.return_value = True
path_exists.side_effect = lambda x: x in [self.apache24_conf_dir]
self.ceph_config_file.return_value = self.charm_ceph_conf
self.relation_ids.side_effect = lambda x: {
'storage-backend': [],
'backup-backend': [],
'ceph': []}[x]
self.assertTrue(
'/etc/apache2/sites-available/openstack_https_frontend.conf' in
cinder_utils.resource_map().keys())
@patch('cinder_utils.service_enabled')
def test_filter_services_selective(self, service_enabled):
service_enabled.side_effect = self.svc_enabled service_enabled.side_effect = self.svc_enabled
self.test_config.set('enabled-services', 'api') self.test_config.set('enabled-services', 'scheduler,volume')
self.assertEqual(
cinder_utils.filter_services(['cinder-api', 'cinder-volume',
'haproxy']),
['cinder-volume', 'haproxy']
)
@patch('cinder_utils.service_enabled')
def test_filter_services_all(self, service_enabled):
service_enabled.return_value = True
self.test_config.set('enabled-services', 'scheduler,volume')
self.assertEqual(
cinder_utils.filter_services(['cinder-api', 'cinder-volume',
'haproxy']),
['cinder-api', 'cinder-volume', 'haproxy']
)
@patch('cinder_utils.resource_map')
def test_restart_map(self, resource_map):
resource_map.return_value = OrderedDict([
('/etc/testfile1.conf', {
'hook_contexts': ['dummyctxt1', 'dummyctxt2'],
'services': ['svc1'],
}),
('/etc/testfile2.conf', {
'hook_contexts': ['dummyctxt1', 'dummyctxt3'],
'services': [],
}),
])
ex_map = OrderedDict([ ex_map = OrderedDict([
('/etc/cinder/cinder.conf', ['cinder-api', 'haproxy']), ('/etc/testfile1.conf', ['svc1']),
('/etc/cinder/api-paste.ini', ['cinder-api']),
('/etc/haproxy/haproxy.cfg', ['haproxy']),
('/etc/apache2/sites-available/openstack_https_frontend',
['apache2']),
('/etc/apache2/sites-available/openstack_https_frontend.conf',
['apache2']),
]) ])
self.assertEquals(cinder_utils.restart_map(), ex_map) self.assertEquals(cinder_utils.restart_map(), ex_map)
@ -455,12 +585,14 @@ class TestCinderUtils(CharmTestCase):
cinder_utils.extend_lvm_volume_group('test', '/dev/sdb') cinder_utils.extend_lvm_volume_group('test', '/dev/sdb')
_call.assert_called_with(['vgextend', 'test', '/dev/sdb']) _call.assert_called_with(['vgextend', 'test', '/dev/sdb'])
@patch.object(cinder_utils, 'enabled_services')
@patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0') @patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0')
@patch.object(cinder_utils, 'uuid') @patch.object(cinder_utils, 'uuid')
def test_migrate_database(self, mock_uuid): def test_migrate_database(self, mock_uuid, mock_enabled_services):
'It migrates database with cinder-manage' 'It migrates database with cinder-manage'
uuid = 'a-great-uuid' uuid = 'a-great-uuid'
mock_uuid.uuid4.return_value = uuid mock_uuid.uuid4.return_value = uuid
mock_enabled_services.return_value = ['svc1']
rid = 'cluster:0' rid = 'cluster:0'
self.relation_ids.return_value = [rid] self.relation_ids.return_value = [rid]
args = {'cinder-db-initialised': "unit/0-%s" % uuid} args = {'cinder-db-initialised': "unit/0-%s" % uuid}
@ -468,60 +600,26 @@ class TestCinderUtils(CharmTestCase):
cinder_utils.migrate_database() cinder_utils.migrate_database()
check_call.assert_called_with(['cinder-manage', 'db', 'sync']) check_call.assert_called_with(['cinder-manage', 'db', 'sync'])
self.relation_set.assert_called_with(relation_id=rid, **args) self.relation_set.assert_called_with(relation_id=rid, **args)
self.service_restart.assert_called_with('svc1')
@patch('os.path.exists') @patch.object(cinder_utils, 'resource_map')
def test_register_configs_apache(self, exists): def test_register_configs(self, resource_map):
exists.return_value = False resource_map.return_value = OrderedDict([
self.os_release.return_value = 'grizzly' ('/etc/testfile1.conf', {
self.relation_ids.return_value = False 'contexts': ['dummyctxt1', 'dummyctxt2'],
'services': ['svc1'],
}),
('/etc/testfile2.conf', {
'contexts': ['dummyctxt1', 'dummyctxt3'],
'services': [],
}),
])
configs = cinder_utils.register_configs() configs = cinder_utils.register_configs()
calls = [] calls = [
for conf in [cinder_utils.CINDER_API_CONF, call('/etc/testfile1.conf', ['dummyctxt1', 'dummyctxt2']),
cinder_utils.CINDER_CONF, call('/etc/testfile2.conf', ['dummyctxt1', 'dummyctxt3']),
cinder_utils.APACHE_SITE_CONF, ]
cinder_utils.HAPROXY_CONF]: configs.register.assert_has_calls(calls)
calls.append(
call(conf,
cinder_utils.CONFIG_FILES[conf]['hook_contexts'])
)
configs.register.assert_has_calls(calls, any_order=True)
@patch('os.path.exists')
def test_register_configs_apache24(self, exists):
exists.return_value = True
self.os_release.return_value = 'grizzly'
self.relation_ids.return_value = False
configs = cinder_utils.register_configs()
calls = []
for conf in [cinder_utils.CINDER_API_CONF,
cinder_utils.CINDER_CONF,
cinder_utils.APACHE_SITE_24_CONF,
cinder_utils.HAPROXY_CONF]:
calls.append(
call(conf,
cinder_utils.CONFIG_FILES[conf]['hook_contexts'])
)
configs.register.assert_has_calls(calls, any_order=True)
@patch('os.path.isdir')
@patch('os.path.exists')
def test_register_configs_ceph(self, exists, isdir):
exists.return_value = True
isdir.return_value = False
self.os_release.return_value = 'grizzly'
self.relation_ids.return_value = ['ceph:0']
self.ceph_config_file.return_value = '/var/lib/charm/cinder/ceph.conf'
configs = cinder_utils.register_configs()
calls = []
for conf in [cinder_utils.CINDER_API_CONF,
cinder_utils.CINDER_CONF,
cinder_utils.HAPROXY_CONF,
cinder_utils.ceph_config_file()]:
calls.append(
call(conf,
cinder_utils.CONFIG_FILES[conf]['hook_contexts'])
)
configs.register.assert_has_calls(calls, any_order=True)
def test_set_ceph_kludge(self): def test_set_ceph_kludge(self):
pass pass
@ -626,6 +724,7 @@ class TestCinderUtils(CharmTestCase):
] ]
self.assertEquals(write_file.call_args_list, expected) self.assertEquals(write_file.call_args_list, expected)
@patch.object(cinder_utils, 'services')
@patch.object(cinder_utils, 'git_src_dir') @patch.object(cinder_utils, 'git_src_dir')
@patch.object(cinder_utils, 'service_restart') @patch.object(cinder_utils, 'service_restart')
@patch.object(cinder_utils, 'render') @patch.object(cinder_utils, 'render')
@ -641,7 +740,8 @@ class TestCinderUtils(CharmTestCase):
@patch('os.symlink') @patch('os.symlink')
def test_git_post_install(self, symlink, chmod, chown, grp, pwd, rmtree, def test_git_post_install(self, symlink, chmod, chown, grp, pwd, rmtree,
copytree, exists, join, pip_install, render, copytree, exists, join, pip_install, render,
service_restart, git_src_dir): service_restart, git_src_dir, services):
services.return_value = ['svc1']
projects_yaml = openstack_origin_git projects_yaml = openstack_origin_git
join.return_value = 'joined-string' join.return_value = 'joined-string'
cinder_utils.git_post_install(projects_yaml) cinder_utils.git_post_install(projects_yaml)
@ -724,11 +824,7 @@ class TestCinderUtils(CharmTestCase):
templates_dir='joined-string'), templates_dir='joined-string'),
] ]
self.assertEquals(render.call_args_list, expected) self.assertEquals(render.call_args_list, expected)
expected = [ expected = [call('tgtd'), call('svc1')]
call('tgtd'), call('haproxy'), call('cinder-backup'),
call('cinder-api'), call('cinder-volume'), call('apache2'),
call('cinder-scheduler'),
]
self.assertEquals(service_restart.call_args_list, expected) self.assertEquals(service_restart.call_args_list, expected)
@patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0') @patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0')
@ -742,8 +838,10 @@ class TestCinderUtils(CharmTestCase):
cinder_utils.check_db_initialised() cinder_utils.check_db_initialised()
self.assertFalse(self.relation_set.called) self.assertFalse(self.relation_set.called)
@patch.object(cinder_utils, 'enabled_services')
@patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0') @patch.object(cinder_utils, 'local_unit', lambda *args: 'unit/0')
def test_check_db_initialised(self): def test_check_db_initialised(self, enabled_services):
enabled_services.return_value = ['svc1']
self.relation_get.return_value = {} self.relation_get.return_value = {}
cinder_utils.check_db_initialised() cinder_utils.check_db_initialised()
self.assertFalse(self.relation_set.called) self.assertFalse(self.relation_set.called)
@ -753,6 +851,7 @@ class TestCinderUtils(CharmTestCase):
cinder_utils.check_db_initialised() cinder_utils.check_db_initialised()
calls = [call(**{'cinder-db-initialised-echo': 'unit/1-1234'})] calls = [call(**{'cinder-db-initialised-echo': 'unit/1-1234'})]
self.relation_set.assert_has_calls(calls) self.relation_set.assert_has_calls(calls)
self.service_restart.assert_called_with('svc1')
@patch('subprocess.check_output') @patch('subprocess.check_output')
def test_log_lvm_info(self, _check): def test_log_lvm_info(self, _check):