[hopem] Added support for ceph-client logging to syslog
This commit is contained in:
commit
6163ae5093
@ -40,8 +40,7 @@ options:
|
|||||||
type: boolean
|
type: boolean
|
||||||
default: False
|
default: False
|
||||||
description: |
|
description: |
|
||||||
By default, all services will log into their corresponding log files.
|
If set to True, supporting services will log to syslog.
|
||||||
Setting this to True will force all services to log to the syslog.
|
|
||||||
# HA configuration settings
|
# HA configuration settings
|
||||||
vip:
|
vip:
|
||||||
type: string
|
type: string
|
||||||
|
@ -29,6 +29,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
|
|||||||
determine_apache_port,
|
determine_apache_port,
|
||||||
determine_api_port,
|
determine_api_port,
|
||||||
https,
|
https,
|
||||||
|
is_clustered
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.hahelpers.apache import (
|
from charmhelpers.contrib.hahelpers.apache import (
|
||||||
@ -198,6 +199,7 @@ class AMQPContext(OSContextGenerator):
|
|||||||
|
|
||||||
ctxt = {}
|
ctxt = {}
|
||||||
for rid in relation_ids('amqp'):
|
for rid in relation_ids('amqp'):
|
||||||
|
ha_vip_only = False
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
if relation_get('clustered', rid=rid, unit=unit):
|
if relation_get('clustered', rid=rid, unit=unit):
|
||||||
ctxt['clustered'] = True
|
ctxt['clustered'] = True
|
||||||
@ -212,16 +214,18 @@ class AMQPContext(OSContextGenerator):
|
|||||||
unit=unit),
|
unit=unit),
|
||||||
'rabbitmq_virtual_host': vhost,
|
'rabbitmq_virtual_host': vhost,
|
||||||
})
|
})
|
||||||
|
if relation_get('ha_queues', rid=rid, unit=unit) is not None:
|
||||||
|
ctxt['rabbitmq_ha_queues'] = True
|
||||||
|
|
||||||
|
ha_vip_only = relation_get('ha-vip-only',
|
||||||
|
rid=rid, unit=unit) is not None
|
||||||
|
|
||||||
if context_complete(ctxt):
|
if context_complete(ctxt):
|
||||||
# Sufficient information found = break out!
|
# Sufficient information found = break out!
|
||||||
break
|
break
|
||||||
# Used for active/active rabbitmq >= grizzly
|
# Used for active/active rabbitmq >= grizzly
|
||||||
if ('clustered' not in ctxt or relation_get('ha-vip-only') == 'True') and \
|
if ('clustered' not in ctxt or ha_vip_only) \
|
||||||
len(related_units(rid)) > 1:
|
and len(related_units(rid)) > 1:
|
||||||
if relation_get('ha_queues'):
|
|
||||||
ctxt['rabbitmq_ha_queues'] = relation_get('ha_queues')
|
|
||||||
else:
|
|
||||||
ctxt['rabbitmq_ha_queues'] = False
|
|
||||||
rabbitmq_hosts = []
|
rabbitmq_hosts = []
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rabbitmq_hosts.append(relation_get('private-address',
|
rabbitmq_hosts.append(relation_get('private-address',
|
||||||
@ -240,10 +244,13 @@ class CephContext(OSContextGenerator):
|
|||||||
'''This generates context for /etc/ceph/ceph.conf templates'''
|
'''This generates context for /etc/ceph/ceph.conf templates'''
|
||||||
if not relation_ids('ceph'):
|
if not relation_ids('ceph'):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
log('Generating template context for ceph')
|
log('Generating template context for ceph')
|
||||||
|
|
||||||
mon_hosts = []
|
mon_hosts = []
|
||||||
auth = None
|
auth = None
|
||||||
key = None
|
key = None
|
||||||
|
use_syslog = str(config('use-syslog')).lower()
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids('ceph'):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
mon_hosts.append(relation_get('private-address', rid=rid,
|
mon_hosts.append(relation_get('private-address', rid=rid,
|
||||||
@ -255,6 +262,7 @@ class CephContext(OSContextGenerator):
|
|||||||
'mon_hosts': ' '.join(mon_hosts),
|
'mon_hosts': ' '.join(mon_hosts),
|
||||||
'auth': auth,
|
'auth': auth,
|
||||||
'key': key,
|
'key': key,
|
||||||
|
'use_syslog': use_syslog
|
||||||
}
|
}
|
||||||
|
|
||||||
if not os.path.isdir('/etc/ceph'):
|
if not os.path.isdir('/etc/ceph'):
|
||||||
@ -391,7 +399,7 @@ class ApacheSSLContext(OSContextGenerator):
|
|||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class NeutronContext(object):
|
class NeutronContext(OSContextGenerator):
|
||||||
interfaces = []
|
interfaces = []
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -452,6 +460,22 @@ class NeutronContext(object):
|
|||||||
|
|
||||||
return nvp_ctxt
|
return nvp_ctxt
|
||||||
|
|
||||||
|
def neutron_ctxt(self):
|
||||||
|
if https():
|
||||||
|
proto = 'https'
|
||||||
|
else:
|
||||||
|
proto = 'http'
|
||||||
|
if is_clustered():
|
||||||
|
host = config('vip')
|
||||||
|
else:
|
||||||
|
host = unit_get('private-address')
|
||||||
|
url = '%s://%s:%s' % (proto, host, '9696')
|
||||||
|
ctxt = {
|
||||||
|
'network_manager': self.network_manager,
|
||||||
|
'neutron_url': url,
|
||||||
|
}
|
||||||
|
return ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self._ensure_packages()
|
self._ensure_packages()
|
||||||
|
|
||||||
@ -461,7 +485,7 @@ class NeutronContext(object):
|
|||||||
if not self.plugin:
|
if not self.plugin:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
ctxt = {'network_manager': self.network_manager}
|
ctxt = self.neutron_ctxt()
|
||||||
|
|
||||||
if self.plugin == 'ovs':
|
if self.plugin == 'ovs':
|
||||||
ctxt.update(self.ovs_ctxt())
|
ctxt.update(self.ovs_ctxt())
|
||||||
@ -493,41 +517,7 @@ class OSConfigFlagContext(OSContextGenerator):
|
|||||||
if not config_flags:
|
if not config_flags:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
if config_flags.find('==') >= 0:
|
flags = config_flags_parser(config_flags)
|
||||||
log("config_flags is not in expected format (key=value)",
|
|
||||||
level=ERROR)
|
|
||||||
raise OSContextError
|
|
||||||
|
|
||||||
# strip the following from each value.
|
|
||||||
post_strippers = ' ,'
|
|
||||||
# we strip any leading/trailing '=' or ' ' from the string then
|
|
||||||
# split on '='.
|
|
||||||
split = config_flags.strip(' =').split('=')
|
|
||||||
limit = len(split)
|
|
||||||
flags = {}
|
|
||||||
for i in xrange(0, limit - 1):
|
|
||||||
current = split[i]
|
|
||||||
next = split[i + 1]
|
|
||||||
vindex = next.rfind(',')
|
|
||||||
if (i == limit - 2) or (vindex < 0):
|
|
||||||
value = next
|
|
||||||
else:
|
|
||||||
value = next[:vindex]
|
|
||||||
|
|
||||||
if i == 0:
|
|
||||||
key = current
|
|
||||||
else:
|
|
||||||
# if this not the first entry, expect an embedded key.
|
|
||||||
index = current.rfind(',')
|
|
||||||
if index < 0:
|
|
||||||
log("invalid config value(s) at index %s" % (i),
|
|
||||||
level=ERROR)
|
|
||||||
raise OSContextError
|
|
||||||
key = current[index + 1:]
|
|
||||||
|
|
||||||
# Add to collection.
|
|
||||||
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
|
|
||||||
|
|
||||||
return {'user_config_flags': flags}
|
return {'user_config_flags': flags}
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,6 +18,22 @@ def headers_package():
|
|||||||
return 'linux-headers-%s' % kver
|
return 'linux-headers-%s' % kver
|
||||||
|
|
||||||
|
|
||||||
|
def kernel_version():
|
||||||
|
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
|
||||||
|
kver = check_output(['uname', '-r']).strip()
|
||||||
|
kver = kver.split('.')
|
||||||
|
return (int(kver[0]), int(kver[1]))
|
||||||
|
|
||||||
|
|
||||||
|
def determine_dkms_package():
|
||||||
|
""" Determine which DKMS package should be used based on kernel version """
|
||||||
|
# NOTE: 3.13 kernels have support for GRE and VXLAN native
|
||||||
|
if kernel_version() >= (3, 13):
|
||||||
|
return []
|
||||||
|
else:
|
||||||
|
return ['openvswitch-datapath-dkms']
|
||||||
|
|
||||||
|
|
||||||
# legacy
|
# legacy
|
||||||
def quantum_plugins():
|
def quantum_plugins():
|
||||||
from charmhelpers.contrib.openstack import context
|
from charmhelpers.contrib.openstack import context
|
||||||
@ -32,7 +48,7 @@ def quantum_plugins():
|
|||||||
database=config('neutron-database'),
|
database=config('neutron-database'),
|
||||||
relation_prefix='neutron')],
|
relation_prefix='neutron')],
|
||||||
'services': ['quantum-plugin-openvswitch-agent'],
|
'services': ['quantum-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
['quantum-plugin-openvswitch-agent']],
|
['quantum-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['quantum-server',
|
'server_packages': ['quantum-server',
|
||||||
'quantum-plugin-openvswitch'],
|
'quantum-plugin-openvswitch'],
|
||||||
@ -57,7 +73,8 @@ def quantum_plugins():
|
|||||||
|
|
||||||
def neutron_plugins():
|
def neutron_plugins():
|
||||||
from charmhelpers.contrib.openstack import context
|
from charmhelpers.contrib.openstack import context
|
||||||
return {
|
release = os_release('nova-common')
|
||||||
|
plugins = {
|
||||||
'ovs': {
|
'ovs': {
|
||||||
'config': '/etc/neutron/plugins/openvswitch/'
|
'config': '/etc/neutron/plugins/openvswitch/'
|
||||||
'ovs_neutron_plugin.ini',
|
'ovs_neutron_plugin.ini',
|
||||||
@ -68,8 +85,8 @@ def neutron_plugins():
|
|||||||
database=config('neutron-database'),
|
database=config('neutron-database'),
|
||||||
relation_prefix='neutron')],
|
relation_prefix='neutron')],
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
['quantum-plugin-openvswitch-agent']],
|
['neutron-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-openvswitch'],
|
'neutron-plugin-openvswitch'],
|
||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
@ -89,6 +106,13 @@ def neutron_plugins():
|
|||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
# NOTE: patch in ml2 plugin for icehouse onwards
|
||||||
|
if release >= 'icehouse':
|
||||||
|
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
|
||||||
|
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||||
|
plugins['ovs']['server_packages'] = ['neutron-server',
|
||||||
|
'neutron-plugin-ml2']
|
||||||
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
###############################################################################
|
|
||||||
# [ WARNING ]
|
|
||||||
# cinder configuration file maintained by Juju
|
|
||||||
# local changes may be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
{% if auth -%}
|
|
||||||
[global]
|
|
||||||
auth_supported = {{ auth }}
|
|
||||||
keyring = /etc/ceph/$cluster.$name.keyring
|
|
||||||
mon host = {{ mon_hosts }}
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
|
|
@ -65,6 +65,9 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
('1.10.0', 'havana'),
|
('1.10.0', 'havana'),
|
||||||
('1.9.1', 'havana'),
|
('1.9.1', 'havana'),
|
||||||
('1.9.0', 'havana'),
|
('1.9.0', 'havana'),
|
||||||
|
('1.13.0', 'icehouse'),
|
||||||
|
('1.12.0', 'icehouse'),
|
||||||
|
('1.11.0', 'icehouse'),
|
||||||
])
|
])
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
@ -420,19 +423,19 @@ def get_hostname(address, fqdn=True):
|
|||||||
Resolves hostname for given IP, or returns the input
|
Resolves hostname for given IP, or returns the input
|
||||||
if it is already a hostname.
|
if it is already a hostname.
|
||||||
"""
|
"""
|
||||||
if not is_ip(address):
|
if is_ip(address):
|
||||||
return address
|
try:
|
||||||
|
import dns.reversename
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-dnspython')
|
||||||
|
import dns.reversename
|
||||||
|
|
||||||
try:
|
rev = dns.reversename.from_address(address)
|
||||||
import dns.reversename
|
result = ns_query(rev)
|
||||||
except ImportError:
|
if not result:
|
||||||
apt_install('python-dnspython')
|
return None
|
||||||
import dns.reversename
|
else:
|
||||||
|
result = address
|
||||||
rev = dns.reversename.from_address(address)
|
|
||||||
result = ns_query(rev)
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if fqdn:
|
if fqdn:
|
||||||
# strip trailing .
|
# strip trailing .
|
||||||
|
@ -49,6 +49,9 @@ CEPH_CONF = """[global]
|
|||||||
auth supported = {auth}
|
auth supported = {auth}
|
||||||
keyring = {keyring}
|
keyring = {keyring}
|
||||||
mon host = {mon_hosts}
|
mon host = {mon_hosts}
|
||||||
|
log to syslog = {use_syslog}
|
||||||
|
err to syslog = {use_syslog}
|
||||||
|
clog to syslog = {use_syslog}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@ -194,7 +197,7 @@ def get_ceph_nodes():
|
|||||||
return hosts
|
return hosts
|
||||||
|
|
||||||
|
|
||||||
def configure(service, key, auth):
|
def configure(service, key, auth, use_syslog):
|
||||||
''' Perform basic configuration of Ceph '''
|
''' Perform basic configuration of Ceph '''
|
||||||
create_keyring(service, key)
|
create_keyring(service, key)
|
||||||
create_key_file(service, key)
|
create_key_file(service, key)
|
||||||
@ -202,7 +205,8 @@ def configure(service, key, auth):
|
|||||||
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||||
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
||||||
keyring=_keyring_path(service),
|
keyring=_keyring_path(service),
|
||||||
mon_hosts=",".join(map(str, hosts))))
|
mon_hosts=",".join(map(str, hosts)),
|
||||||
|
use_syslog=use_syslog))
|
||||||
modprobe('rbd')
|
modprobe('rbd')
|
||||||
|
|
||||||
|
|
||||||
|
@ -97,6 +97,29 @@ def apt_install(packages, options=None, fatal=False):
|
|||||||
subprocess.call(cmd, env=env)
|
subprocess.call(cmd, env=env)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_upgrade(options=None, fatal=False, dist=False):
|
||||||
|
"""Upgrade all packages"""
|
||||||
|
if options is None:
|
||||||
|
options = ['--option=Dpkg::Options::=--force-confold']
|
||||||
|
|
||||||
|
cmd = ['apt-get', '--assume-yes']
|
||||||
|
cmd.extend(options)
|
||||||
|
if dist:
|
||||||
|
cmd.append('dist-upgrade')
|
||||||
|
else:
|
||||||
|
cmd.append('upgrade')
|
||||||
|
log("Upgrading with options: {}".format(options))
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
if 'DEBIAN_FRONTEND' not in env:
|
||||||
|
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
subprocess.check_call(cmd, env=env)
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd, env=env)
|
||||||
|
|
||||||
|
|
||||||
def apt_update(fatal=False):
|
def apt_update(fatal=False):
|
||||||
"""Update local apt cache"""
|
"""Update local apt cache"""
|
||||||
cmd = ['apt-get', 'update']
|
cmd = ['apt-get', 'update']
|
||||||
@ -135,6 +158,10 @@ def apt_hold(packages, fatal=False):
|
|||||||
|
|
||||||
|
|
||||||
def add_source(source, key=None):
|
def add_source(source, key=None):
|
||||||
|
if source is None:
|
||||||
|
log('Source is not present. Skipping')
|
||||||
|
return
|
||||||
|
|
||||||
if (source.startswith('ppa:') or
|
if (source.startswith('ppa:') or
|
||||||
source.startswith('http') or
|
source.startswith('http') or
|
||||||
source.startswith('deb ') or
|
source.startswith('deb ') or
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
import urllib2
|
import urllib2
|
||||||
|
import urlparse
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource
|
||||||
@ -24,6 +26,19 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
def download(self, source, dest):
|
def download(self, source, dest):
|
||||||
# propogate all exceptions
|
# propogate all exceptions
|
||||||
# URLError, OSError, etc
|
# URLError, OSError, etc
|
||||||
|
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
|
||||||
|
if proto in ('http', 'https'):
|
||||||
|
auth, barehost = urllib2.splituser(netloc)
|
||||||
|
if auth is not None:
|
||||||
|
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
|
||||||
|
username, password = urllib2.splitpasswd(auth)
|
||||||
|
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||||
|
# Realm is set to None in add_password to force the username and password
|
||||||
|
# to be used whatever the realm
|
||||||
|
passman.add_password(None, source, username, password)
|
||||||
|
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||||
|
opener = urllib2.build_opener(authhandler)
|
||||||
|
urllib2.install_opener(opener)
|
||||||
response = urllib2.urlopen(source)
|
response = urllib2.urlopen(source)
|
||||||
try:
|
try:
|
||||||
with open(dest, 'w') as dest_file:
|
with open(dest, 'w') as dest_file:
|
||||||
|
@ -3,10 +3,14 @@
|
|||||||
# glance configuration file maintained by Juju
|
# glance configuration file maintained by Juju
|
||||||
# local changes may be overwritten.
|
# local changes may be overwritten.
|
||||||
###############################################################################
|
###############################################################################
|
||||||
{% if auth %}
|
|
||||||
[global]
|
[global]
|
||||||
|
{% if auth -%}
|
||||||
auth_supported = {{ auth }}
|
auth_supported = {{ auth }}
|
||||||
#keyring = /etc/ceph/$cluster.$name.keyring
|
#keyring = /etc/ceph/$cluster.$name.keyring
|
||||||
keyring = /etc/ceph/ceph.$name.keyring
|
keyring = /etc/ceph/ceph.$name.keyring
|
||||||
mon host = {{ mon_hosts }}
|
mon host = {{ mon_hosts }}
|
||||||
{% endif %}
|
{% endif -%}
|
||||||
|
log to syslog = {{ use_syslog }}
|
||||||
|
err to syslog = {{ use_syslog }}
|
||||||
|
clog to syslog = {{ use_syslog }}
|
||||||
|
|
||||||
|
@ -12,7 +12,6 @@ sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database
|
|||||||
sql_idle_timeout = 3600
|
sql_idle_timeout = 3600
|
||||||
api_limit_max = 1000
|
api_limit_max = 1000
|
||||||
limit_param_default = 25
|
limit_param_default = 25
|
||||||
use_syslog = False
|
|
||||||
|
|
||||||
{% if auth_host %}
|
{% if auth_host %}
|
||||||
[paste_deploy]
|
[paste_deploy]
|
||||||
|
Loading…
x
Reference in New Issue
Block a user