Tidy ceph backend configuration
Drop generation of upstart override file and /etc/environment and scrub any existing charm configuration in these locations from an existing install. These where required way back in the dawn of time when ceph support was alpha/beta in cinder. Provide backend specific configuration file path, allowing multiple ceph clusters to be used with a single cinder application. Change-Id: I7adba0d35fb7406afa40f047b79a9ab51a6a333d Closes-Bug: 1769196
This commit is contained in:
parent
d6e2e76060
commit
3451c1c498
@ -38,6 +38,8 @@ from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_api_port,
|
||||
)
|
||||
|
||||
CHARM_CEPH_CONF = '/var/lib/charm/{}/ceph.conf'
|
||||
|
||||
|
||||
def enable_lvm():
|
||||
"""Check whether the LVM backend should be configured
|
||||
@ -47,6 +49,10 @@ def enable_lvm():
|
||||
return block_device.lower() != 'none'
|
||||
|
||||
|
||||
def ceph_config_file():
|
||||
return CHARM_CEPH_CONF.format(service_name())
|
||||
|
||||
|
||||
class ImageServiceContext(OSContextGenerator):
|
||||
interfaces = ['image-service']
|
||||
|
||||
@ -81,7 +87,8 @@ class CephContext(OSContextGenerator):
|
||||
# ensure_ceph_pool() creates pool based on service name.
|
||||
'rbd_pool': service,
|
||||
'rbd_user': service,
|
||||
'host': service
|
||||
'host': service,
|
||||
'rbd_ceph_conf': ceph_config_file()
|
||||
}
|
||||
|
||||
|
||||
|
@ -14,6 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
@ -35,17 +36,18 @@ from cinder_utils import (
|
||||
services,
|
||||
service_enabled,
|
||||
service_restart,
|
||||
set_ceph_env_variables,
|
||||
CLUSTER_RES,
|
||||
CINDER_CONF,
|
||||
CINDER_API_CONF,
|
||||
ceph_config_file,
|
||||
setup_ipv6,
|
||||
check_local_db_actions_complete,
|
||||
filesystem_mounted,
|
||||
assess_status,
|
||||
scrub_old_style_ceph,
|
||||
)
|
||||
|
||||
from cinder_contexts import ceph_config_file
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
Hooks,
|
||||
UnregisteredHookError,
|
||||
@ -403,7 +405,6 @@ def ceph_changed(relation_id=None):
|
||||
|
||||
if is_request_complete(get_ceph_request()):
|
||||
log('Request complete')
|
||||
set_ceph_env_variables(service=service)
|
||||
CONFIGS.write(CINDER_CONF)
|
||||
CONFIGS.write(ceph_config_file())
|
||||
# Ensure that cinder-volume is restarted since only now can we
|
||||
@ -577,6 +578,7 @@ def upgrade_charm():
|
||||
for rel_id in relation_ids('amqp'):
|
||||
amqp_joined(relation_id=rel_id)
|
||||
update_nrpe_config()
|
||||
scrub_old_style_ceph()
|
||||
|
||||
|
||||
@hooks.hook('storage-backend-relation-changed')
|
||||
|
@ -12,13 +12,17 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import uuid
|
||||
|
||||
from copy import deepcopy
|
||||
from collections import OrderedDict
|
||||
from copy import copy
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from charmhelpers.core.strutils import (
|
||||
bytes_from_string
|
||||
@ -33,7 +37,6 @@ from charmhelpers.core.hookenv import (
|
||||
related_units,
|
||||
log,
|
||||
DEBUG,
|
||||
service_name,
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
@ -107,6 +110,8 @@ from charmhelpers.core.decorators import (
|
||||
|
||||
import cinder_contexts
|
||||
|
||||
from cinder_contexts import ceph_config_file
|
||||
|
||||
COMMON_PACKAGES = [
|
||||
'apache2',
|
||||
'cinder-common',
|
||||
@ -141,7 +146,6 @@ CINDER_CONF_DIR = "/etc/cinder"
|
||||
CINDER_CONF = '%s/cinder.conf' % CINDER_CONF_DIR
|
||||
CINDER_API_CONF = '%s/api-paste.ini' % CINDER_CONF_DIR
|
||||
CEPH_CONF = '/etc/ceph/ceph.conf'
|
||||
CHARM_CEPH_CONF = '/var/lib/charm/{}/ceph.conf'
|
||||
|
||||
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
||||
APACHE_SITE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
|
||||
@ -175,9 +179,6 @@ def required_interfaces():
|
||||
return _interfaces
|
||||
|
||||
|
||||
def ceph_config_file():
|
||||
return CHARM_CEPH_CONF.format(service_name())
|
||||
|
||||
# Map config files to hook contexts and services that will be associated
|
||||
# with file in restart_on_changes()'s service map.
|
||||
BASE_RESOURCE_MAP = OrderedDict([
|
||||
@ -692,17 +693,6 @@ def migrate_database(upgrade=False):
|
||||
relation_set(relation_id=r_id, **{CINDER_DB_INIT_RKEY: id})
|
||||
|
||||
|
||||
def set_ceph_env_variables(service):
|
||||
# XXX: Horrid kludge to make cinder-volume use
|
||||
# a different ceph username than admin
|
||||
env = open('/etc/environment', 'r').read()
|
||||
if 'CEPH_ARGS' not in env:
|
||||
with open('/etc/environment', 'a') as out:
|
||||
out.write('CEPH_ARGS="--id %s"\n' % service)
|
||||
with open('/etc/init/cinder-volume.override', 'w') as out:
|
||||
out.write('env CEPH_ARGS="--id %s"\n' % service)
|
||||
|
||||
|
||||
def do_openstack_upgrade(configs=None):
|
||||
"""Perform an uprade of cinder. Takes care of upgrading
|
||||
packages, rewriting configs + database migration and
|
||||
@ -893,3 +883,21 @@ def disable_package_apache_site():
|
||||
"""
|
||||
if os.path.exists(PACKAGE_CINDER_API_CONF):
|
||||
subprocess.check_call(['a2disconf', 'cinder-wsgi'])
|
||||
|
||||
|
||||
def scrub_old_style_ceph():
|
||||
"""Purge any legacy ceph configuration from install"""
|
||||
# NOTE: purge old override file - no longer needed
|
||||
if os.path.exists('/etc/init/cinder-volume.override'):
|
||||
os.remove('/etc/init/cinder-volume.override')
|
||||
# NOTE: purge any CEPH_ARGS data from /etc/environment
|
||||
env_file = '/etc/environment'
|
||||
ceph_match = re.compile("^CEPH_ARGS.*").search
|
||||
with open(env_file, 'r') as input_file:
|
||||
with NamedTemporaryFile(mode='w',
|
||||
delete=False,
|
||||
dir=os.path.dirname(env_file)) as outfile:
|
||||
for line in input_file:
|
||||
if not ceph_match(line):
|
||||
print(line, end='', file=outfile)
|
||||
os.rename(outfile.name, input_file.name)
|
||||
|
@ -32,5 +32,6 @@ rbd_pool = {{ rbd_pool }}
|
||||
host = {{ host }}
|
||||
rbd_user = {{ rbd_user }}
|
||||
volume_driver = {{ ceph_volume_driver }}
|
||||
rbd_ceph_conf = {{ rbd_ceph_conf }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
@ -78,6 +78,7 @@ class TestCinderContext(CharmTestCase):
|
||||
{'volume_driver': 'cinder.volume.driver.RBDDriver',
|
||||
'rbd_pool': service,
|
||||
'rbd_user': service,
|
||||
'rbd_ceph_conf': '/var/lib/charm/mycinder/ceph.conf',
|
||||
'host': service})
|
||||
|
||||
def test_ceph_related_icehouse(self):
|
||||
@ -90,6 +91,7 @@ class TestCinderContext(CharmTestCase):
|
||||
{'volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
|
||||
'rbd_pool': service,
|
||||
'rbd_user': service,
|
||||
'rbd_ceph_conf': '/var/lib/charm/mycinder/ceph.conf',
|
||||
'host': service})
|
||||
|
||||
def test_ceph_related_ocata(self):
|
||||
@ -102,6 +104,7 @@ class TestCinderContext(CharmTestCase):
|
||||
{'ceph_volume_driver': 'cinder.volume.drivers.rbd.RBDDriver',
|
||||
'rbd_pool': service,
|
||||
'rbd_user': service,
|
||||
'rbd_ceph_conf': '/var/lib/charm/mycinder/ceph.conf',
|
||||
'host': service})
|
||||
|
||||
@patch.object(utils, 'service_enabled')
|
||||
|
@ -53,7 +53,6 @@ TO_PATCH = [
|
||||
'register_configs',
|
||||
'restart_map',
|
||||
'service_enabled',
|
||||
'set_ceph_env_variables',
|
||||
'CONFIGS',
|
||||
'CLUSTER_RES',
|
||||
'ceph_config_file',
|
||||
@ -115,17 +114,21 @@ class TestChangedHooks(CharmTestCase):
|
||||
super(TestChangedHooks, self).setUp(hooks, TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
|
||||
@patch.object(hooks, 'scrub_old_style_ceph')
|
||||
@patch.object(hooks, 'amqp_joined')
|
||||
def test_upgrade_charm_no_amqp(self, _joined):
|
||||
def test_upgrade_charm_no_amqp(self, _joined, _scrub_old_style_ceph):
|
||||
self.relation_ids.return_value = []
|
||||
hooks.hooks.execute(['hooks/upgrade-charm'])
|
||||
_joined.assert_not_called()
|
||||
_scrub_old_style_ceph.assert_called_once_with()
|
||||
|
||||
@patch.object(hooks, 'scrub_old_style_ceph')
|
||||
@patch.object(hooks, 'amqp_joined')
|
||||
def test_upgrade_charm_with_amqp(self, _joined):
|
||||
def test_upgrade_charm_with_amqp(self, _joined, _scrub_old_style_ceph):
|
||||
self.relation_ids.return_value = ['amqp:1']
|
||||
hooks.hooks.execute(['hooks/upgrade-charm'])
|
||||
_joined.assert_called_with(relation_id='amqp:1')
|
||||
_scrub_old_style_ceph.assert_called_once_with()
|
||||
|
||||
@patch.object(hooks, 'configure_https')
|
||||
@patch.object(hooks, 'config_value_changed')
|
||||
@ -509,7 +512,6 @@ class TestJoinedHooks(CharmTestCase):
|
||||
for c in [call('/var/lib/charm/cinder/ceph.conf'),
|
||||
call('/etc/cinder/cinder.conf')]:
|
||||
self.assertNotIn(c, self.CONFIGS.write.call_args_list)
|
||||
self.assertFalse(self.set_ceph_env_variables.called)
|
||||
|
||||
@patch('charmhelpers.core.host.service')
|
||||
@patch("cinder_hooks.relation_get", autospec=True)
|
||||
@ -529,7 +531,6 @@ class TestJoinedHooks(CharmTestCase):
|
||||
for c in [call('/var/lib/charm/cinder/ceph.conf'),
|
||||
call('/etc/cinder/cinder.conf')]:
|
||||
self.assertIn(c, self.CONFIGS.write.call_args_list)
|
||||
self.set_ceph_env_variables.assert_called_with(service='cinder')
|
||||
self.service_restart.assert_called_with('cinder-volume')
|
||||
|
||||
def test_ceph_changed_broker_nonzero_rc(self):
|
||||
@ -545,7 +546,6 @@ class TestJoinedHooks(CharmTestCase):
|
||||
for c in [call('/var/lib/charm/cinder/ceph.conf'),
|
||||
call('/etc/cinder/cinder.conf')]:
|
||||
self.assertNotIn(c, self.CONFIGS.write.call_args_list)
|
||||
self.assertFalse(self.set_ceph_env_variables.called)
|
||||
|
||||
def test_ceph_changed_no_keys(self):
|
||||
'It ensures ceph assets created on ceph changed'
|
||||
|
@ -47,7 +47,6 @@ TO_PATCH = [
|
||||
'configure_lvm_storage',
|
||||
'register_configs',
|
||||
'service_enabled',
|
||||
'set_ceph_env_variables',
|
||||
'CONFIGS',
|
||||
'CLUSTER_RES',
|
||||
# charmhelpers.core.hookenv
|
||||
|
Loading…
Reference in New Issue
Block a user