Liberty changes
1. synced charmhelpers 2. removed neutron-api-plugin relation 3. Added subordinate relation with neutron-api 4. Added relation with keystone 5. Added plumgrid.ini
This commit is contained in:
parent
973df51202
commit
127e2a07a2
@ -51,7 +51,8 @@ class AmuletDeployment(object):
|
|||||||
if 'units' not in this_service:
|
if 'units' not in this_service:
|
||||||
this_service['units'] = 1
|
this_service['units'] = 1
|
||||||
|
|
||||||
self.d.add(this_service['name'], units=this_service['units'])
|
self.d.add(this_service['name'], units=this_service['units'],
|
||||||
|
constraints=this_service.get('constraints'))
|
||||||
|
|
||||||
for svc in other_services:
|
for svc in other_services:
|
||||||
if 'location' in svc:
|
if 'location' in svc:
|
||||||
@ -64,7 +65,8 @@ class AmuletDeployment(object):
|
|||||||
if 'units' not in svc:
|
if 'units' not in svc:
|
||||||
svc['units'] = 1
|
svc['units'] = 1
|
||||||
|
|
||||||
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
|
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
|
||||||
|
constraints=svc.get('constraints'))
|
||||||
|
|
||||||
def _add_relations(self, relations):
|
def _add_relations(self, relations):
|
||||||
"""Add all of the relations for the services."""
|
"""Add all of the relations for the services."""
|
||||||
|
@ -14,17 +14,25 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import amulet
|
|
||||||
import ConfigParser
|
|
||||||
import distro_info
|
|
||||||
import io
|
import io
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import six
|
import socket
|
||||||
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import urlparse
|
import uuid
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import distro_info
|
||||||
|
import six
|
||||||
|
from six.moves import configparser
|
||||||
|
if six.PY3:
|
||||||
|
from urllib import parse as urlparse
|
||||||
|
else:
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
|
||||||
class AmuletUtils(object):
|
class AmuletUtils(object):
|
||||||
@ -108,7 +116,7 @@ class AmuletUtils(object):
|
|||||||
# /!\ DEPRECATION WARNING (beisner):
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
# New and existing tests should be rewritten to use
|
# New and existing tests should be rewritten to use
|
||||||
# validate_services_by_name() as it is aware of init systems.
|
# validate_services_by_name() as it is aware of init systems.
|
||||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
self.log.warn('DEPRECATION WARNING: use '
|
||||||
'validate_services_by_name instead of validate_services '
|
'validate_services_by_name instead of validate_services '
|
||||||
'due to init system differences.')
|
'due to init system differences.')
|
||||||
|
|
||||||
@ -142,19 +150,23 @@ class AmuletUtils(object):
|
|||||||
|
|
||||||
for service_name in services_list:
|
for service_name in services_list:
|
||||||
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||||
service_name == "rabbitmq-server"):
|
service_name in ['rabbitmq-server', 'apache2']):
|
||||||
# init is systemd
|
# init is systemd (or regular sysv)
|
||||||
cmd = 'sudo service {} status'.format(service_name)
|
cmd = 'sudo service {} status'.format(service_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0
|
||||||
elif self.ubuntu_releases.index(release) < systemd_switch:
|
elif self.ubuntu_releases.index(release) < systemd_switch:
|
||||||
# init is upstart
|
# init is upstart
|
||||||
cmd = 'sudo status {}'.format(service_name)
|
cmd = 'sudo status {}'.format(service_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0 and "start/running" in output
|
||||||
|
|
||||||
output, code = sentry_unit.run(cmd)
|
|
||||||
self.log.debug('{} `{}` returned '
|
self.log.debug('{} `{}` returned '
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
cmd, code))
|
cmd, code))
|
||||||
if code != 0:
|
if not service_running:
|
||||||
return "command `{}` returned {}".format(cmd, str(code))
|
return u"command `{}` returned {} {}".format(
|
||||||
|
cmd, output, str(code))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _get_config(self, unit, filename):
|
def _get_config(self, unit, filename):
|
||||||
@ -164,7 +176,7 @@ class AmuletUtils(object):
|
|||||||
# NOTE(beisner): by default, ConfigParser does not handle options
|
# NOTE(beisner): by default, ConfigParser does not handle options
|
||||||
# with no value, such as the flags used in the mysql my.cnf file.
|
# with no value, such as the flags used in the mysql my.cnf file.
|
||||||
# https://bugs.python.org/issue7005
|
# https://bugs.python.org/issue7005
|
||||||
config = ConfigParser.ConfigParser(allow_no_value=True)
|
config = configparser.ConfigParser(allow_no_value=True)
|
||||||
config.readfp(io.StringIO(file_contents))
|
config.readfp(io.StringIO(file_contents))
|
||||||
return config
|
return config
|
||||||
|
|
||||||
@ -259,33 +271,52 @@ class AmuletUtils(object):
|
|||||||
"""Get last modification time of directory."""
|
"""Get last modification time of directory."""
|
||||||
return sentry_unit.directory_stat(directory)['mtime']
|
return sentry_unit.directory_stat(directory)['mtime']
|
||||||
|
|
||||||
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
|
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
|
||||||
"""Get process' start time.
|
"""Get start time of a process based on the last modification time
|
||||||
|
of the /proc/pid directory.
|
||||||
|
|
||||||
Determine start time of the process based on the last modification
|
:sentry_unit: The sentry unit to check for the service on
|
||||||
time of the /proc/pid directory. If pgrep_full is True, the process
|
:service: service name to look for in process table
|
||||||
name is matched against the full command line.
|
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
"""
|
:returns: epoch time of service process start
|
||||||
if pgrep_full:
|
:param commands: list of bash commands
|
||||||
cmd = 'pgrep -o -f {}'.format(service)
|
:param sentry_units: list of sentry unit pointers
|
||||||
else:
|
:returns: None if successful; Failure message otherwise
|
||||||
cmd = 'pgrep -o {}'.format(service)
|
"""
|
||||||
cmd = cmd + ' | grep -v pgrep || exit 0'
|
if pgrep_full is not None:
|
||||||
cmd_out = sentry_unit.run(cmd)
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
self.log.debug('CMDout: ' + str(cmd_out))
|
# No longer implemented, as pidof is now used instead of pgrep.
|
||||||
if cmd_out[0]:
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
|
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
|
||||||
proc_dir = '/proc/{}'.format(cmd_out[0].strip())
|
'longer implemented re: lp 1474030.')
|
||||||
return self._get_dir_mtime(sentry_unit, proc_dir)
|
|
||||||
|
pid_list = self.get_process_id_list(sentry_unit, service)
|
||||||
|
pid = pid_list[0]
|
||||||
|
proc_dir = '/proc/{}'.format(pid)
|
||||||
|
self.log.debug('Pid for {} on {}: {}'.format(
|
||||||
|
service, sentry_unit.info['unit_name'], pid))
|
||||||
|
|
||||||
|
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||||
|
|
||||||
def service_restarted(self, sentry_unit, service, filename,
|
def service_restarted(self, sentry_unit, service, filename,
|
||||||
pgrep_full=False, sleep_time=20):
|
pgrep_full=None, sleep_time=20):
|
||||||
"""Check if service was restarted.
|
"""Check if service was restarted.
|
||||||
|
|
||||||
Compare a service's start time vs a file's last modification time
|
Compare a service's start time vs a file's last modification time
|
||||||
(such as a config file for that service) to determine if the service
|
(such as a config file for that service) to determine if the service
|
||||||
has been restarted.
|
has been restarted.
|
||||||
"""
|
"""
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# This method is prone to races in that no before-time is known.
|
||||||
|
# Use validate_service_config_changed instead.
|
||||||
|
|
||||||
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
self.log.warn('DEPRECATION WARNING: use '
|
||||||
|
'validate_service_config_changed instead of '
|
||||||
|
'service_restarted due to known races.')
|
||||||
|
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||||
self._get_file_mtime(sentry_unit, filename)):
|
self._get_file_mtime(sentry_unit, filename)):
|
||||||
@ -294,78 +325,122 @@ class AmuletUtils(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def service_restarted_since(self, sentry_unit, mtime, service,
|
def service_restarted_since(self, sentry_unit, mtime, service,
|
||||||
pgrep_full=False, sleep_time=20,
|
pgrep_full=None, sleep_time=20,
|
||||||
retry_count=2):
|
retry_count=30, retry_sleep_time=10):
|
||||||
"""Check if service was been started after a given time.
|
"""Check if service was been started after a given time.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
sentry_unit (sentry): The sentry unit to check for the service on
|
sentry_unit (sentry): The sentry unit to check for the service on
|
||||||
mtime (float): The epoch time to check against
|
mtime (float): The epoch time to check against
|
||||||
service (string): service name to look for in process table
|
service (string): service name to look for in process table
|
||||||
pgrep_full (boolean): Use full command line search mode with pgrep
|
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
sleep_time (int): Seconds to sleep before looking for process
|
sleep_time (int): Initial sleep time (s) before looking for file
|
||||||
retry_count (int): If service is not found, how many times to retry
|
retry_sleep_time (int): Time (s) to sleep between retries
|
||||||
|
retry_count (int): If file is not found, how many times to retry
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if service found and its start time it newer than mtime,
|
bool: True if service found and its start time it newer than mtime,
|
||||||
False if service is older than mtime or if service was
|
False if service is older than mtime or if service was
|
||||||
not found.
|
not found.
|
||||||
"""
|
"""
|
||||||
self.log.debug('Checking %s restarted since %s' % (service, mtime))
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
|
# deprecation WARNS. lp1474030
|
||||||
|
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
self.log.debug('Checking that %s service restarted since %s on '
|
||||||
|
'%s' % (service, mtime, unit_name))
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
proc_start_time = self._get_proc_start_time(sentry_unit, service,
|
proc_start_time = None
|
||||||
pgrep_full)
|
tries = 0
|
||||||
while retry_count > 0 and not proc_start_time:
|
while tries <= retry_count and not proc_start_time:
|
||||||
self.log.debug('No pid file found for service %s, will retry %i '
|
try:
|
||||||
'more times' % (service, retry_count))
|
proc_start_time = self._get_proc_start_time(sentry_unit,
|
||||||
time.sleep(30)
|
service,
|
||||||
proc_start_time = self._get_proc_start_time(sentry_unit, service,
|
pgrep_full)
|
||||||
pgrep_full)
|
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||||
retry_count = retry_count - 1
|
'OK'.format(tries, service, unit_name))
|
||||||
|
except IOError as e:
|
||||||
|
# NOTE(beisner) - race avoidance, proc may not exist yet.
|
||||||
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
|
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||||
|
'failed\n{}'.format(tries, service,
|
||||||
|
unit_name, e))
|
||||||
|
time.sleep(retry_sleep_time)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
if not proc_start_time:
|
if not proc_start_time:
|
||||||
self.log.warn('No proc start time found, assuming service did '
|
self.log.warn('No proc start time found, assuming service did '
|
||||||
'not start')
|
'not start')
|
||||||
return False
|
return False
|
||||||
if proc_start_time >= mtime:
|
if proc_start_time >= mtime:
|
||||||
self.log.debug('proc start time is newer than provided mtime'
|
self.log.debug('Proc start time is newer than provided mtime'
|
||||||
'(%s >= %s)' % (proc_start_time, mtime))
|
'(%s >= %s) on %s (OK)' % (proc_start_time,
|
||||||
|
mtime, unit_name))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.log.warn('proc start time (%s) is older than provided mtime '
|
self.log.warn('Proc start time (%s) is older than provided mtime '
|
||||||
'(%s), service did not restart' % (proc_start_time,
|
'(%s) on %s, service did not '
|
||||||
mtime))
|
'restart' % (proc_start_time, mtime, unit_name))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def config_updated_since(self, sentry_unit, filename, mtime,
|
def config_updated_since(self, sentry_unit, filename, mtime,
|
||||||
sleep_time=20):
|
sleep_time=20, retry_count=30,
|
||||||
|
retry_sleep_time=10):
|
||||||
"""Check if file was modified after a given time.
|
"""Check if file was modified after a given time.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
sentry_unit (sentry): The sentry unit to check the file mtime on
|
sentry_unit (sentry): The sentry unit to check the file mtime on
|
||||||
filename (string): The file to check mtime of
|
filename (string): The file to check mtime of
|
||||||
mtime (float): The epoch time to check against
|
mtime (float): The epoch time to check against
|
||||||
sleep_time (int): Seconds to sleep before looking for process
|
sleep_time (int): Initial sleep time (s) before looking for file
|
||||||
|
retry_sleep_time (int): Time (s) to sleep between retries
|
||||||
|
retry_count (int): If file is not found, how many times to retry
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
bool: True if file was modified more recently than mtime, False if
|
bool: True if file was modified more recently than mtime, False if
|
||||||
file was modified before mtime,
|
file was modified before mtime, or if file not found.
|
||||||
"""
|
"""
|
||||||
self.log.debug('Checking %s updated since %s' % (filename, mtime))
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
self.log.debug('Checking that %s updated since %s on '
|
||||||
|
'%s' % (filename, mtime, unit_name))
|
||||||
time.sleep(sleep_time)
|
time.sleep(sleep_time)
|
||||||
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
file_mtime = None
|
||||||
|
tries = 0
|
||||||
|
while tries <= retry_count and not file_mtime:
|
||||||
|
try:
|
||||||
|
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
||||||
|
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||||
|
'OK'.format(tries, filename, unit_name))
|
||||||
|
except IOError as e:
|
||||||
|
# NOTE(beisner) - race avoidance, file may not exist yet.
|
||||||
|
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||||
|
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||||
|
'failed\n{}'.format(tries, filename,
|
||||||
|
unit_name, e))
|
||||||
|
time.sleep(retry_sleep_time)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if not file_mtime:
|
||||||
|
self.log.warn('Could not determine file mtime, assuming '
|
||||||
|
'file does not exist')
|
||||||
|
return False
|
||||||
|
|
||||||
if file_mtime >= mtime:
|
if file_mtime >= mtime:
|
||||||
self.log.debug('File mtime is newer than provided mtime '
|
self.log.debug('File mtime is newer than provided mtime '
|
||||||
'(%s >= %s)' % (file_mtime, mtime))
|
'(%s >= %s) on %s (OK)' % (file_mtime,
|
||||||
|
mtime, unit_name))
|
||||||
return True
|
return True
|
||||||
else:
|
else:
|
||||||
self.log.warn('File mtime %s is older than provided mtime %s'
|
self.log.warn('File mtime is older than provided mtime'
|
||||||
% (file_mtime, mtime))
|
'(%s < on %s) on %s' % (file_mtime,
|
||||||
|
mtime, unit_name))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
||||||
filename, pgrep_full=False,
|
filename, pgrep_full=None,
|
||||||
sleep_time=20, retry_count=2):
|
sleep_time=20, retry_count=30,
|
||||||
|
retry_sleep_time=10):
|
||||||
"""Check service and file were updated after mtime
|
"""Check service and file were updated after mtime
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@ -373,9 +448,10 @@ class AmuletUtils(object):
|
|||||||
mtime (float): The epoch time to check against
|
mtime (float): The epoch time to check against
|
||||||
service (string): service name to look for in process table
|
service (string): service name to look for in process table
|
||||||
filename (string): The file to check mtime of
|
filename (string): The file to check mtime of
|
||||||
pgrep_full (boolean): Use full command line search mode with pgrep
|
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||||
sleep_time (int): Seconds to sleep before looking for process
|
sleep_time (int): Initial sleep in seconds to pass to test helpers
|
||||||
retry_count (int): If service is not found, how many times to retry
|
retry_count (int): If service is not found, how many times to retry
|
||||||
|
retry_sleep_time (int): Time in seconds to wait between retries
|
||||||
|
|
||||||
Typical Usage:
|
Typical Usage:
|
||||||
u = OpenStackAmuletUtils(ERROR)
|
u = OpenStackAmuletUtils(ERROR)
|
||||||
@ -392,15 +468,27 @@ class AmuletUtils(object):
|
|||||||
mtime, False if service is older than mtime or if service was
|
mtime, False if service is older than mtime or if service was
|
||||||
not found or if filename was modified before mtime.
|
not found or if filename was modified before mtime.
|
||||||
"""
|
"""
|
||||||
self.log.debug('Checking %s restarted since %s' % (service, mtime))
|
|
||||||
time.sleep(sleep_time)
|
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||||
service_restart = self.service_restarted_since(sentry_unit, mtime,
|
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||||
service,
|
# deprecation WARNS. lp1474030
|
||||||
pgrep_full=pgrep_full,
|
|
||||||
sleep_time=0,
|
service_restart = self.service_restarted_since(
|
||||||
retry_count=retry_count)
|
sentry_unit, mtime,
|
||||||
config_update = self.config_updated_since(sentry_unit, filename, mtime,
|
service,
|
||||||
sleep_time=0)
|
pgrep_full=pgrep_full,
|
||||||
|
sleep_time=sleep_time,
|
||||||
|
retry_count=retry_count,
|
||||||
|
retry_sleep_time=retry_sleep_time)
|
||||||
|
|
||||||
|
config_update = self.config_updated_since(
|
||||||
|
sentry_unit,
|
||||||
|
filename,
|
||||||
|
mtime,
|
||||||
|
sleep_time=sleep_time,
|
||||||
|
retry_count=retry_count,
|
||||||
|
retry_sleep_time=retry_sleep_time)
|
||||||
|
|
||||||
return service_restart and config_update
|
return service_restart and config_update
|
||||||
|
|
||||||
def get_sentry_time(self, sentry_unit):
|
def get_sentry_time(self, sentry_unit):
|
||||||
@ -418,7 +506,6 @@ class AmuletUtils(object):
|
|||||||
"""Return a list of all Ubuntu releases in order of release."""
|
"""Return a list of all Ubuntu releases in order of release."""
|
||||||
_d = distro_info.UbuntuDistroInfo()
|
_d = distro_info.UbuntuDistroInfo()
|
||||||
_release_list = _d.all
|
_release_list = _d.all
|
||||||
self.log.debug('Ubuntu release list: {}'.format(_release_list))
|
|
||||||
return _release_list
|
return _release_list
|
||||||
|
|
||||||
def file_to_url(self, file_rel_path):
|
def file_to_url(self, file_rel_path):
|
||||||
@ -450,15 +537,20 @@ class AmuletUtils(object):
|
|||||||
cmd, code, output))
|
cmd, code, output))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_process_id_list(self, sentry_unit, process_name):
|
def get_process_id_list(self, sentry_unit, process_name,
|
||||||
|
expect_success=True):
|
||||||
"""Get a list of process ID(s) from a single sentry juju unit
|
"""Get a list of process ID(s) from a single sentry juju unit
|
||||||
for a single process name.
|
for a single process name.
|
||||||
|
|
||||||
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
:param sentry_unit: Amulet sentry instance (juju unit)
|
||||||
:param process_name: Process name
|
:param process_name: Process name
|
||||||
|
:param expect_success: If False, expect the PID to be missing,
|
||||||
|
raise if it is present.
|
||||||
:returns: List of process IDs
|
:returns: List of process IDs
|
||||||
"""
|
"""
|
||||||
cmd = 'pidof {}'.format(process_name)
|
cmd = 'pidof -x {}'.format(process_name)
|
||||||
|
if not expect_success:
|
||||||
|
cmd += " || exit 0 && exit 1"
|
||||||
output, code = sentry_unit.run(cmd)
|
output, code = sentry_unit.run(cmd)
|
||||||
if code != 0:
|
if code != 0:
|
||||||
msg = ('{} `{}` returned {} '
|
msg = ('{} `{}` returned {} '
|
||||||
@ -467,14 +559,23 @@ class AmuletUtils(object):
|
|||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
return str(output).split()
|
return str(output).split()
|
||||||
|
|
||||||
def get_unit_process_ids(self, unit_processes):
|
def get_unit_process_ids(self, unit_processes, expect_success=True):
|
||||||
"""Construct a dict containing unit sentries, process names, and
|
"""Construct a dict containing unit sentries, process names, and
|
||||||
process IDs."""
|
process IDs.
|
||||||
|
|
||||||
|
:param unit_processes: A dictionary of Amulet sentry instance
|
||||||
|
to list of process names.
|
||||||
|
:param expect_success: if False expect the processes to not be
|
||||||
|
running, raise if they are.
|
||||||
|
:returns: Dictionary of Amulet sentry instance to dictionary
|
||||||
|
of process names to PIDs.
|
||||||
|
"""
|
||||||
pid_dict = {}
|
pid_dict = {}
|
||||||
for sentry_unit, process_list in unit_processes.iteritems():
|
for sentry_unit, process_list in six.iteritems(unit_processes):
|
||||||
pid_dict[sentry_unit] = {}
|
pid_dict[sentry_unit] = {}
|
||||||
for process in process_list:
|
for process in process_list:
|
||||||
pids = self.get_process_id_list(sentry_unit, process)
|
pids = self.get_process_id_list(
|
||||||
|
sentry_unit, process, expect_success=expect_success)
|
||||||
pid_dict[sentry_unit].update({process: pids})
|
pid_dict[sentry_unit].update({process: pids})
|
||||||
return pid_dict
|
return pid_dict
|
||||||
|
|
||||||
@ -488,7 +589,7 @@ class AmuletUtils(object):
|
|||||||
return ('Unit count mismatch. expected, actual: {}, '
|
return ('Unit count mismatch. expected, actual: {}, '
|
||||||
'{} '.format(len(expected), len(actual)))
|
'{} '.format(len(expected), len(actual)))
|
||||||
|
|
||||||
for (e_sentry, e_proc_names) in expected.iteritems():
|
for (e_sentry, e_proc_names) in six.iteritems(expected):
|
||||||
e_sentry_name = e_sentry.info['unit_name']
|
e_sentry_name = e_sentry.info['unit_name']
|
||||||
if e_sentry in actual.keys():
|
if e_sentry in actual.keys():
|
||||||
a_proc_names = actual[e_sentry]
|
a_proc_names = actual[e_sentry]
|
||||||
@ -507,11 +608,23 @@ class AmuletUtils(object):
|
|||||||
'{}'.format(e_proc_name, a_proc_name))
|
'{}'.format(e_proc_name, a_proc_name))
|
||||||
|
|
||||||
a_pids_length = len(a_pids)
|
a_pids_length = len(a_pids)
|
||||||
if e_pids_length != a_pids_length:
|
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
|
||||||
return ('PID count mismatch. {} ({}) expected, actual: '
|
|
||||||
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||||
e_pids_length, a_pids_length,
|
e_pids_length, a_pids_length,
|
||||||
a_pids))
|
a_pids))
|
||||||
|
|
||||||
|
# If expected is not bool, ensure PID quantities match
|
||||||
|
if not isinstance(e_pids_length, bool) and \
|
||||||
|
a_pids_length != e_pids_length:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool True, ensure 1 or more PIDs exist
|
||||||
|
elif isinstance(e_pids_length, bool) and \
|
||||||
|
e_pids_length is True and a_pids_length < 1:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool False, ensure 0 PIDs exist
|
||||||
|
elif isinstance(e_pids_length, bool) and \
|
||||||
|
e_pids_length is False and a_pids_length != 0:
|
||||||
|
return fail_msg
|
||||||
else:
|
else:
|
||||||
self.log.debug('PID check OK: {} {} {}: '
|
self.log.debug('PID check OK: {} {} {}: '
|
||||||
'{}'.format(e_sentry_name, e_proc_name,
|
'{}'.format(e_sentry_name, e_proc_name,
|
||||||
@ -531,3 +644,175 @@ class AmuletUtils(object):
|
|||||||
return 'Dicts within list are not identical'
|
return 'Dicts within list are not identical'
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def validate_sectionless_conf(self, file_contents, expected):
|
||||||
|
"""A crude conf parser. Useful to inspect configuration files which
|
||||||
|
do not have section headers (as would be necessary in order to use
|
||||||
|
the configparser). Such as openstack-dashboard or rabbitmq confs."""
|
||||||
|
for line in file_contents.split('\n'):
|
||||||
|
if '=' in line:
|
||||||
|
args = line.split('=')
|
||||||
|
if len(args) <= 1:
|
||||||
|
continue
|
||||||
|
key = args[0].strip()
|
||||||
|
value = args[1].strip()
|
||||||
|
if key in expected.keys():
|
||||||
|
if expected[key] != value:
|
||||||
|
msg = ('Config mismatch. Expected, actual: {}, '
|
||||||
|
'{}'.format(expected[key], value))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
def get_unit_hostnames(self, units):
|
||||||
|
"""Return a dict of juju unit names to hostnames."""
|
||||||
|
host_names = {}
|
||||||
|
for unit in units:
|
||||||
|
host_names[unit.info['unit_name']] = \
|
||||||
|
str(unit.file_contents('/etc/hostname').strip())
|
||||||
|
self.log.debug('Unit host names: {}'.format(host_names))
|
||||||
|
return host_names
|
||||||
|
|
||||||
|
def run_cmd_unit(self, sentry_unit, cmd):
|
||||||
|
"""Run a command on a unit, return the output and exit code."""
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} `{}` command returned {} '
|
||||||
|
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
else:
|
||||||
|
msg = ('{} `{}` command returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return str(output), code
|
||||||
|
|
||||||
|
def file_exists_on_unit(self, sentry_unit, file_name):
|
||||||
|
"""Check if a file exists on a unit."""
|
||||||
|
try:
|
||||||
|
sentry_unit.file_stat(file_name)
|
||||||
|
return True
|
||||||
|
except IOError:
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Error checking file {}: {}'.format(file_name, e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
def file_contents_safe(self, sentry_unit, file_name,
|
||||||
|
max_wait=60, fatal=False):
|
||||||
|
"""Get file contents from a sentry unit. Wrap amulet file_contents
|
||||||
|
with retry logic to address races where a file checks as existing,
|
||||||
|
but no longer exists by the time file_contents is called.
|
||||||
|
Return None if file not found. Optionally raise if fatal is True."""
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
file_contents = False
|
||||||
|
tries = 0
|
||||||
|
while not file_contents and tries < (max_wait / 4):
|
||||||
|
try:
|
||||||
|
file_contents = sentry_unit.file_contents(file_name)
|
||||||
|
except IOError:
|
||||||
|
self.log.debug('Attempt {} to open file {} from {} '
|
||||||
|
'failed'.format(tries, file_name,
|
||||||
|
unit_name))
|
||||||
|
time.sleep(4)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if file_contents:
|
||||||
|
return file_contents
|
||||||
|
elif not fatal:
|
||||||
|
return None
|
||||||
|
elif fatal:
|
||||||
|
msg = 'Failed to get file contents from unit.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
|
||||||
|
"""Open a TCP socket to check for a listening sevice on a host.
|
||||||
|
|
||||||
|
:param host: host name or IP address, default to localhost
|
||||||
|
:param port: TCP port number, default to 22
|
||||||
|
:param timeout: Connect timeout, default to 15 seconds
|
||||||
|
:returns: True if successful, False if connect failed
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Resolve host name if possible
|
||||||
|
try:
|
||||||
|
connect_host = socket.gethostbyname(host)
|
||||||
|
host_human = "{} ({})".format(connect_host, host)
|
||||||
|
except socket.error as e:
|
||||||
|
self.log.warn('Unable to resolve address: '
|
||||||
|
'{} ({}) Trying anyway!'.format(host, e))
|
||||||
|
connect_host = host
|
||||||
|
host_human = connect_host
|
||||||
|
|
||||||
|
# Attempt socket connection
|
||||||
|
try:
|
||||||
|
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
knock.settimeout(timeout)
|
||||||
|
knock.connect((connect_host, port))
|
||||||
|
knock.close()
|
||||||
|
self.log.debug('Socket connect OK for host '
|
||||||
|
'{} on port {}.'.format(host_human, port))
|
||||||
|
return True
|
||||||
|
except socket.error as e:
|
||||||
|
self.log.debug('Socket connect FAIL for'
|
||||||
|
' {} port {} ({})'.format(host_human, port, e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def port_knock_units(self, sentry_units, port=22,
|
||||||
|
timeout=15, expect_success=True):
|
||||||
|
"""Open a TCP socket to check for a listening sevice on each
|
||||||
|
listed juju unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param port: TCP port number, default to 22
|
||||||
|
:param timeout: Connect timeout, default to 15 seconds
|
||||||
|
:expect_success: True by default, set False to invert logic
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
for unit in sentry_units:
|
||||||
|
host = unit.info['public-address']
|
||||||
|
connected = self.port_knock_tcp(host, port, timeout)
|
||||||
|
if not connected and expect_success:
|
||||||
|
return 'Socket connect failed.'
|
||||||
|
elif connected and not expect_success:
|
||||||
|
return 'Socket connected unexpectedly.'
|
||||||
|
|
||||||
|
def get_uuid_epoch_stamp(self):
|
||||||
|
"""Returns a stamp string based on uuid4 and epoch time. Useful in
|
||||||
|
generating test messages which need to be unique-ish."""
|
||||||
|
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
||||||
|
|
||||||
|
# amulet juju action helpers:
|
||||||
|
def run_action(self, unit_sentry, action,
|
||||||
|
_check_output=subprocess.check_output):
|
||||||
|
"""Run the named action on a given unit sentry.
|
||||||
|
|
||||||
|
_check_output parameter is used for dependency injection.
|
||||||
|
|
||||||
|
@return action_id.
|
||||||
|
"""
|
||||||
|
unit_id = unit_sentry.info["unit_name"]
|
||||||
|
command = ["juju", "action", "do", "--format=json", unit_id, action]
|
||||||
|
self.log.info("Running command: %s\n" % " ".join(command))
|
||||||
|
output = _check_output(command, universal_newlines=True)
|
||||||
|
data = json.loads(output)
|
||||||
|
action_id = data[u'Action queued with id']
|
||||||
|
return action_id
|
||||||
|
|
||||||
|
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
||||||
|
"""Wait for a given action, returning if it completed or not.
|
||||||
|
|
||||||
|
_check_output parameter is used for dependency injection.
|
||||||
|
"""
|
||||||
|
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
|
||||||
|
action_id]
|
||||||
|
output = _check_output(command, universal_newlines=True)
|
||||||
|
data = json.loads(output)
|
||||||
|
return data.get(u"status") == "completed"
|
||||||
|
|
||||||
|
def status_get(self, unit):
|
||||||
|
"""Return the current service status of this unit."""
|
||||||
|
raw_status, return_code = unit.run(
|
||||||
|
"status-get --format=json --include-data")
|
||||||
|
if return_code != 0:
|
||||||
|
return ("unknown", "")
|
||||||
|
status = json.loads(raw_status)
|
||||||
|
return (status["status"], status["message"])
|
||||||
|
@ -148,6 +148,13 @@ define service {{
|
|||||||
self.description = description
|
self.description = description
|
||||||
self.check_cmd = self._locate_cmd(check_cmd)
|
self.check_cmd = self._locate_cmd(check_cmd)
|
||||||
|
|
||||||
|
def _get_check_filename(self):
|
||||||
|
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
|
||||||
|
|
||||||
|
def _get_service_filename(self, hostname):
|
||||||
|
return os.path.join(NRPE.nagios_exportdir,
|
||||||
|
'service__{}_{}.cfg'.format(hostname, self.command))
|
||||||
|
|
||||||
def _locate_cmd(self, check_cmd):
|
def _locate_cmd(self, check_cmd):
|
||||||
search_path = (
|
search_path = (
|
||||||
'/usr/lib/nagios/plugins',
|
'/usr/lib/nagios/plugins',
|
||||||
@ -163,9 +170,21 @@ define service {{
|
|||||||
log('Check command not found: {}'.format(parts[0]))
|
log('Check command not found: {}'.format(parts[0]))
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
|
def _remove_service_files(self):
|
||||||
|
if not os.path.exists(NRPE.nagios_exportdir):
|
||||||
|
return
|
||||||
|
for f in os.listdir(NRPE.nagios_exportdir):
|
||||||
|
if f.endswith('_{}.cfg'.format(self.command)):
|
||||||
|
os.remove(os.path.join(NRPE.nagios_exportdir, f))
|
||||||
|
|
||||||
|
def remove(self, hostname):
|
||||||
|
nrpe_check_file = self._get_check_filename()
|
||||||
|
if os.path.exists(nrpe_check_file):
|
||||||
|
os.remove(nrpe_check_file)
|
||||||
|
self._remove_service_files()
|
||||||
|
|
||||||
def write(self, nagios_context, hostname, nagios_servicegroups):
|
def write(self, nagios_context, hostname, nagios_servicegroups):
|
||||||
nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
|
nrpe_check_file = self._get_check_filename()
|
||||||
self.command)
|
|
||||||
with open(nrpe_check_file, 'w') as nrpe_check_config:
|
with open(nrpe_check_file, 'w') as nrpe_check_config:
|
||||||
nrpe_check_config.write("# check {}\n".format(self.shortname))
|
nrpe_check_config.write("# check {}\n".format(self.shortname))
|
||||||
nrpe_check_config.write("command[{}]={}\n".format(
|
nrpe_check_config.write("command[{}]={}\n".format(
|
||||||
@ -180,9 +199,7 @@ define service {{
|
|||||||
|
|
||||||
def write_service_config(self, nagios_context, hostname,
|
def write_service_config(self, nagios_context, hostname,
|
||||||
nagios_servicegroups):
|
nagios_servicegroups):
|
||||||
for f in os.listdir(NRPE.nagios_exportdir):
|
self._remove_service_files()
|
||||||
if re.search('.*{}.cfg'.format(self.command), f):
|
|
||||||
os.remove(os.path.join(NRPE.nagios_exportdir, f))
|
|
||||||
|
|
||||||
templ_vars = {
|
templ_vars = {
|
||||||
'nagios_hostname': hostname,
|
'nagios_hostname': hostname,
|
||||||
@ -192,8 +209,7 @@ define service {{
|
|||||||
'command': self.command,
|
'command': self.command,
|
||||||
}
|
}
|
||||||
nrpe_service_text = Check.service_template.format(**templ_vars)
|
nrpe_service_text = Check.service_template.format(**templ_vars)
|
||||||
nrpe_service_file = '{}/service__{}_{}.cfg'.format(
|
nrpe_service_file = self._get_service_filename(hostname)
|
||||||
NRPE.nagios_exportdir, hostname, self.command)
|
|
||||||
with open(nrpe_service_file, 'w') as nrpe_service_config:
|
with open(nrpe_service_file, 'w') as nrpe_service_config:
|
||||||
nrpe_service_config.write(str(nrpe_service_text))
|
nrpe_service_config.write(str(nrpe_service_text))
|
||||||
|
|
||||||
@ -218,12 +234,32 @@ class NRPE(object):
|
|||||||
if hostname:
|
if hostname:
|
||||||
self.hostname = hostname
|
self.hostname = hostname
|
||||||
else:
|
else:
|
||||||
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
nagios_hostname = get_nagios_hostname()
|
||||||
|
if nagios_hostname:
|
||||||
|
self.hostname = nagios_hostname
|
||||||
|
else:
|
||||||
|
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
||||||
self.checks = []
|
self.checks = []
|
||||||
|
|
||||||
def add_check(self, *args, **kwargs):
|
def add_check(self, *args, **kwargs):
|
||||||
self.checks.append(Check(*args, **kwargs))
|
self.checks.append(Check(*args, **kwargs))
|
||||||
|
|
||||||
|
def remove_check(self, *args, **kwargs):
|
||||||
|
if kwargs.get('shortname') is None:
|
||||||
|
raise ValueError('shortname of check must be specified')
|
||||||
|
|
||||||
|
# Use sensible defaults if they're not specified - these are not
|
||||||
|
# actually used during removal, but they're required for constructing
|
||||||
|
# the Check object; check_disk is chosen because it's part of the
|
||||||
|
# nagios-plugins-basic package.
|
||||||
|
if kwargs.get('check_cmd') is None:
|
||||||
|
kwargs['check_cmd'] = 'check_disk'
|
||||||
|
if kwargs.get('description') is None:
|
||||||
|
kwargs['description'] = ''
|
||||||
|
|
||||||
|
check = Check(*args, **kwargs)
|
||||||
|
check.remove(self.hostname)
|
||||||
|
|
||||||
def write(self):
|
def write(self):
|
||||||
try:
|
try:
|
||||||
nagios_uid = pwd.getpwnam('nagios').pw_uid
|
nagios_uid = pwd.getpwnam('nagios').pw_uid
|
||||||
@ -260,7 +296,7 @@ def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
|||||||
:param str relation_name: Name of relation nrpe sub joined to
|
:param str relation_name: Name of relation nrpe sub joined to
|
||||||
"""
|
"""
|
||||||
for rel in relations_of_type(relation_name):
|
for rel in relations_of_type(relation_name):
|
||||||
if 'nagios_hostname' in rel:
|
if 'nagios_host_context' in rel:
|
||||||
return rel['nagios_host_context']
|
return rel['nagios_host_context']
|
||||||
|
|
||||||
|
|
||||||
@ -301,11 +337,13 @@ def add_init_service_checks(nrpe, services, unit_name):
|
|||||||
upstart_init = '/etc/init/%s.conf' % svc
|
upstart_init = '/etc/init/%s.conf' % svc
|
||||||
sysv_init = '/etc/init.d/%s' % svc
|
sysv_init = '/etc/init.d/%s' % svc
|
||||||
if os.path.exists(upstart_init):
|
if os.path.exists(upstart_init):
|
||||||
nrpe.add_check(
|
# Don't add a check for these services from neutron-gateway
|
||||||
shortname=svc,
|
if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
|
||||||
description='process check {%s}' % unit_name,
|
nrpe.add_check(
|
||||||
check_cmd='check_upstart_job %s' % svc
|
shortname=svc,
|
||||||
)
|
description='process check {%s}' % unit_name,
|
||||||
|
check_cmd='check_upstart_job %s' % svc
|
||||||
|
)
|
||||||
elif os.path.exists(sysv_init):
|
elif os.path.exists(sysv_init):
|
||||||
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
||||||
cron_file = ('*/5 * * * * root '
|
cron_file = ('*/5 * * * * root '
|
||||||
|
@ -23,7 +23,7 @@ import socket
|
|||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import unit_get
|
from charmhelpers.core.hookenv import unit_get
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
log,
|
log,
|
||||||
WARNING,
|
WARNING,
|
||||||
@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import (
|
|||||||
try:
|
try:
|
||||||
import netifaces
|
import netifaces
|
||||||
except ImportError:
|
except ImportError:
|
||||||
apt_install('python-netifaces')
|
apt_update(fatal=True)
|
||||||
|
apt_install('python-netifaces', fatal=True)
|
||||||
import netifaces
|
import netifaces
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import netaddr
|
import netaddr
|
||||||
except ImportError:
|
except ImportError:
|
||||||
apt_install('python-netaddr')
|
apt_update(fatal=True)
|
||||||
|
apt_install('python-netaddr', fatal=True)
|
||||||
import netaddr
|
import netaddr
|
||||||
|
|
||||||
|
|
||||||
@ -51,7 +53,7 @@ def _validate_cidr(network):
|
|||||||
|
|
||||||
|
|
||||||
def no_ip_found_error_out(network):
|
def no_ip_found_error_out(network):
|
||||||
errmsg = ("No IP address found in network: %s" % network)
|
errmsg = ("No IP address found in network(s): %s" % network)
|
||||||
raise ValueError(errmsg)
|
raise ValueError(errmsg)
|
||||||
|
|
||||||
|
|
||||||
@ -59,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
|||||||
"""Get an IPv4 or IPv6 address within the network from the host.
|
"""Get an IPv4 or IPv6 address within the network from the host.
|
||||||
|
|
||||||
:param network (str): CIDR presentation format. For example,
|
:param network (str): CIDR presentation format. For example,
|
||||||
'192.168.1.0/24'.
|
'192.168.1.0/24'. Supports multiple networks as a space-delimited list.
|
||||||
:param fallback (str): If no address is found, return fallback.
|
:param fallback (str): If no address is found, return fallback.
|
||||||
:param fatal (boolean): If no address is found, fallback is not
|
:param fatal (boolean): If no address is found, fallback is not
|
||||||
set and fatal is True then exit(1).
|
set and fatal is True then exit(1).
|
||||||
@ -73,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
_validate_cidr(network)
|
networks = network.split() or [network]
|
||||||
network = netaddr.IPNetwork(network)
|
for network in networks:
|
||||||
for iface in netifaces.interfaces():
|
_validate_cidr(network)
|
||||||
addresses = netifaces.ifaddresses(iface)
|
network = netaddr.IPNetwork(network)
|
||||||
if network.version == 4 and netifaces.AF_INET in addresses:
|
for iface in netifaces.interfaces():
|
||||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
addresses = netifaces.ifaddresses(iface)
|
||||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
if network.version == 4 and netifaces.AF_INET in addresses:
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||||
if cidr in network:
|
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||||
return str(cidr.ip)
|
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||||
|
if cidr in network:
|
||||||
|
return str(cidr.ip)
|
||||||
|
|
||||||
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
||||||
for addr in addresses[netifaces.AF_INET6]:
|
for addr in addresses[netifaces.AF_INET6]:
|
||||||
if not addr['addr'].startswith('fe80'):
|
if not addr['addr'].startswith('fe80'):
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||||
addr['netmask']))
|
addr['netmask']))
|
||||||
if cidr in network:
|
if cidr in network:
|
||||||
return str(cidr.ip)
|
return str(cidr.ip)
|
||||||
|
|
||||||
if fallback is not None:
|
if fallback is not None:
|
||||||
return fallback
|
return fallback
|
||||||
@ -435,8 +439,12 @@ def get_hostname(address, fqdn=True):
|
|||||||
|
|
||||||
rev = dns.reversename.from_address(address)
|
rev = dns.reversename.from_address(address)
|
||||||
result = ns_query(rev)
|
result = ns_query(rev)
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
try:
|
||||||
|
result = socket.gethostbyaddr(address)[0]
|
||||||
|
except:
|
||||||
|
return None
|
||||||
else:
|
else:
|
||||||
result = address
|
result = address
|
||||||
|
|
||||||
@ -448,3 +456,18 @@ def get_hostname(address, fqdn=True):
|
|||||||
return result
|
return result
|
||||||
else:
|
else:
|
||||||
return result.split('.')[0]
|
return result.split('.')[0]
|
||||||
|
|
||||||
|
|
||||||
|
def port_has_listener(address, port):
|
||||||
|
"""
|
||||||
|
Returns True if the address:port is open and being listened to,
|
||||||
|
else False.
|
||||||
|
|
||||||
|
@param address: an IP address or hostname
|
||||||
|
@param port: integer port
|
||||||
|
|
||||||
|
Note calls 'zc' via a subprocess shell
|
||||||
|
"""
|
||||||
|
cmd = ['nc', '-z', address, str(port)]
|
||||||
|
result = subprocess.call(cmd)
|
||||||
|
return not(bool(result))
|
||||||
|
@ -40,7 +40,9 @@ Examples:
|
|||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core.kernel import modprobe, is_module_loaded
|
||||||
|
|
||||||
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
|
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
|
||||||
|
|
||||||
@ -82,14 +84,11 @@ def is_ipv6_ok(soft_fail=False):
|
|||||||
# do we have IPv6 in the machine?
|
# do we have IPv6 in the machine?
|
||||||
if os.path.isdir('/proc/sys/net/ipv6'):
|
if os.path.isdir('/proc/sys/net/ipv6'):
|
||||||
# is ip6tables kernel module loaded?
|
# is ip6tables kernel module loaded?
|
||||||
lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
|
if not is_module_loaded('ip6_tables'):
|
||||||
matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
|
|
||||||
if len(matches) == 0:
|
|
||||||
# ip6tables support isn't complete, let's try to load it
|
# ip6tables support isn't complete, let's try to load it
|
||||||
try:
|
try:
|
||||||
subprocess.check_output(['modprobe', 'ip6_tables'],
|
modprobe('ip6_tables')
|
||||||
universal_newlines=True)
|
# great, we can load the module
|
||||||
# great, we could load the module
|
|
||||||
return True
|
return True
|
||||||
except subprocess.CalledProcessError as ex:
|
except subprocess.CalledProcessError as ex:
|
||||||
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
|
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
|
||||||
|
@ -14,12 +14,18 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
import six
|
import six
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from charmhelpers.contrib.amulet.deployment import (
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
AmuletDeployment
|
AmuletDeployment
|
||||||
)
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
"""OpenStack amulet deployment.
|
"""OpenStack amulet deployment.
|
||||||
@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
that is specifically for use by OpenStack charms.
|
that is specifically for use by OpenStack charms.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
def __init__(self, series=None, openstack=None, source=None,
|
||||||
|
stable=True, log_level=DEBUG):
|
||||||
"""Initialize the deployment environment."""
|
"""Initialize the deployment environment."""
|
||||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
self.log.info('OpenStackAmuletDeployment: init')
|
||||||
self.openstack = openstack
|
self.openstack = openstack
|
||||||
self.source = source
|
self.source = source
|
||||||
self.stable = stable
|
self.stable = stable
|
||||||
@ -38,26 +47,55 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
# out.
|
# out.
|
||||||
self.current_next = "trusty"
|
self.current_next = "trusty"
|
||||||
|
|
||||||
|
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
def _determine_branch_locations(self, other_services):
|
def _determine_branch_locations(self, other_services):
|
||||||
"""Determine the branch locations for the other services.
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
Determine if the local branch being tested is derived from its
|
Determine if the local branch being tested is derived from its
|
||||||
stable or next (dev) branch, and based on this, use the corresonding
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
stable or next branches for the other_services."""
|
stable or next branches for the other_services."""
|
||||||
base_charms = ['mysql', 'mongodb']
|
|
||||||
|
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
||||||
|
|
||||||
|
# Charms outside the lp:~openstack-charmers namespace
|
||||||
|
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||||
|
|
||||||
|
# Force these charms to current series even when using an older series.
|
||||||
|
# ie. Use trusty/nrpe even when series is precise, as the P charm
|
||||||
|
# does not possess the necessary external master config and hooks.
|
||||||
|
force_series_current = ['nrpe']
|
||||||
|
|
||||||
if self.series in ['precise', 'trusty']:
|
if self.series in ['precise', 'trusty']:
|
||||||
base_series = self.series
|
base_series = self.series
|
||||||
else:
|
else:
|
||||||
base_series = self.current_next
|
base_series = self.current_next
|
||||||
|
|
||||||
if self.stable:
|
for svc in other_services:
|
||||||
for svc in other_services:
|
if svc['name'] in force_series_current:
|
||||||
|
base_series = self.current_next
|
||||||
|
# If a location has been explicitly set, use it
|
||||||
|
if svc.get('location'):
|
||||||
|
continue
|
||||||
|
if self.stable:
|
||||||
temp = 'lp:charms/{}/{}'
|
temp = 'lp:charms/{}/{}'
|
||||||
svc['location'] = temp.format(base_series,
|
svc['location'] = temp.format(base_series,
|
||||||
svc['name'])
|
svc['name'])
|
||||||
else:
|
else:
|
||||||
for svc in other_services:
|
|
||||||
if svc['name'] in base_charms:
|
if svc['name'] in base_charms:
|
||||||
temp = 'lp:charms/{}/{}'
|
temp = 'lp:charms/{}/{}'
|
||||||
svc['location'] = temp.format(base_series,
|
svc['location'] = temp.format(base_series,
|
||||||
@ -66,10 +104,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||||
svc['location'] = temp.format(self.current_next,
|
svc['location'] = temp.format(self.current_next,
|
||||||
svc['name'])
|
svc['name'])
|
||||||
|
|
||||||
return other_services
|
return other_services
|
||||||
|
|
||||||
def _add_services(self, this_service, other_services):
|
def _add_services(self, this_service, other_services):
|
||||||
"""Add services to the deployment and set openstack-origin/source."""
|
"""Add services to the deployment and set openstack-origin/source."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: adding services')
|
||||||
|
|
||||||
other_services = self._determine_branch_locations(other_services)
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
@ -77,29 +118,103 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
|
|
||||||
services = other_services
|
services = other_services
|
||||||
services.append(this_service)
|
services.append(this_service)
|
||||||
|
|
||||||
|
# Charms which should use the source config option
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw', 'ceph-mon']
|
||||||
# Most OpenStack subordinate charms do not expose an origin option
|
|
||||||
# as that is controlled by the principle.
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller',
|
||||||
|
'cinder-backup']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
if svc['name'] not in use_source + ignore:
|
if svc['name'] not in use_source + no_origin:
|
||||||
config = {'openstack-origin': self.openstack}
|
config = {'openstack-origin': self.openstack}
|
||||||
self.d.configure(svc['name'], config)
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
if self.source:
|
if self.source:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
if svc['name'] in use_source and svc['name'] not in ignore:
|
if svc['name'] in use_source and svc['name'] not in no_origin:
|
||||||
config = {'source': self.source}
|
config = {'source': self.source}
|
||||||
self.d.configure(svc['name'], config)
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
def _configure_services(self, configs):
|
def _configure_services(self, configs):
|
||||||
"""Configure all of the services."""
|
"""Configure all of the services."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: configure services')
|
||||||
for service, config in six.iteritems(configs):
|
for service, config in six.iteritems(configs):
|
||||||
self.d.configure(service, config)
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||||
|
include_only=None, timeout=1800):
|
||||||
|
"""Wait for all units to have a specific extended status, except
|
||||||
|
for any defined as excluded. Unless specified via message, any
|
||||||
|
status containing any case of 'ready' will be considered a match.
|
||||||
|
|
||||||
|
Examples of message usage:
|
||||||
|
|
||||||
|
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
|
||||||
|
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
Wait for all units to reach this status (exact match):
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
|
||||||
|
Wait for all units to reach any one of these (exact match):
|
||||||
|
message = re.compile('Unit is ready|OK|Ready')
|
||||||
|
|
||||||
|
Wait for at least one unit to reach this status (exact match):
|
||||||
|
message = {'ready'}
|
||||||
|
|
||||||
|
See Amulet's sentry.wait_for_messages() for message usage detail.
|
||||||
|
https://github.com/juju/amulet/blob/master/amulet/sentry.py
|
||||||
|
|
||||||
|
:param message: Expected status match
|
||||||
|
:param exclude_services: List of juju service names to ignore,
|
||||||
|
not to be used in conjuction with include_only.
|
||||||
|
:param include_only: List of juju service names to exclusively check,
|
||||||
|
not to be used in conjuction with exclude_services.
|
||||||
|
:param timeout: Maximum time in seconds to wait for status match
|
||||||
|
:returns: None. Raises if timeout is hit.
|
||||||
|
"""
|
||||||
|
self.log.info('Waiting for extended status on units...')
|
||||||
|
|
||||||
|
all_services = self.d.services.keys()
|
||||||
|
|
||||||
|
if exclude_services and include_only:
|
||||||
|
raise ValueError('exclude_services can not be used '
|
||||||
|
'with include_only')
|
||||||
|
|
||||||
|
if message:
|
||||||
|
if isinstance(message, re._pattern_type):
|
||||||
|
match = message.pattern
|
||||||
|
else:
|
||||||
|
match = message
|
||||||
|
|
||||||
|
self.log.debug('Custom extended status wait match: '
|
||||||
|
'{}'.format(match))
|
||||||
|
else:
|
||||||
|
self.log.debug('Default extended status wait match: contains '
|
||||||
|
'READY (case-insensitive)')
|
||||||
|
message = re.compile('.*ready.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
if exclude_services:
|
||||||
|
self.log.debug('Excluding services from extended status match: '
|
||||||
|
'{}'.format(exclude_services))
|
||||||
|
else:
|
||||||
|
exclude_services = []
|
||||||
|
|
||||||
|
if include_only:
|
||||||
|
services = include_only
|
||||||
|
else:
|
||||||
|
services = list(set(all_services) - set(exclude_services))
|
||||||
|
|
||||||
|
self.log.debug('Waiting up to {}s for extended status on services: '
|
||||||
|
'{}'.format(timeout, services))
|
||||||
|
service_messages = {service: message for service in services}
|
||||||
|
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||||
|
self.log.info('OK')
|
||||||
|
|
||||||
def _get_openstack_release(self):
|
def _get_openstack_release(self):
|
||||||
"""Get openstack release.
|
"""Get openstack release.
|
||||||
|
|
||||||
@ -111,7 +226,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
self.precise_havana, self.precise_icehouse,
|
self.precise_havana, self.precise_icehouse,
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
self.wily_liberty) = range(12)
|
self.wily_liberty, self.trusty_mitaka,
|
||||||
|
self.xenial_mitaka) = range(14)
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
@ -123,9 +239,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
|
||||||
('utopic', None): self.utopic_juno,
|
('utopic', None): self.utopic_juno,
|
||||||
('vivid', None): self.vivid_kilo,
|
('vivid', None): self.vivid_kilo,
|
||||||
('wily', None): self.wily_liberty}
|
('wily', None): self.wily_liberty,
|
||||||
|
('xenial', None): self.xenial_mitaka}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
def _get_openstack_release_string(self):
|
||||||
@ -142,6 +260,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
@ -18,6 +18,7 @@ import amulet
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
@ -27,6 +28,7 @@ import glanceclient.v1.client as glance_client
|
|||||||
import heatclient.v1.client as heat_client
|
import heatclient.v1.client as heat_client
|
||||||
import keystoneclient.v2_0 as keystone_client
|
import keystoneclient.v2_0 as keystone_client
|
||||||
import novaclient.v1_1.client as nova_client
|
import novaclient.v1_1.client as nova_client
|
||||||
|
import pika
|
||||||
import swiftclient
|
import swiftclient
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
@ -602,3 +604,382 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('Ceph {} samples (OK): '
|
self.log.debug('Ceph {} samples (OK): '
|
||||||
'{}'.format(sample_type, samples))
|
'{}'.format(sample_type, samples))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
# rabbitmq/amqp specific helpers:
|
||||||
|
|
||||||
|
def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
|
||||||
|
"""Wait for rmq units extended status to show cluster readiness,
|
||||||
|
after an optional initial sleep period. Initial sleep is likely
|
||||||
|
necessary to be effective following a config change, as status
|
||||||
|
message may not instantly update to non-ready."""
|
||||||
|
|
||||||
|
if init_sleep:
|
||||||
|
time.sleep(init_sleep)
|
||||||
|
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
deployment._auto_wait_for_status(message=message,
|
||||||
|
timeout=timeout,
|
||||||
|
include_only=['rabbitmq-server'])
|
||||||
|
|
||||||
|
def add_rmq_test_user(self, sentry_units,
|
||||||
|
username="testuser1", password="changeme"):
|
||||||
|
"""Add a test user via the first rmq juju unit, check connection as
|
||||||
|
the new user against all sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Adding rmq user ({})...'.format(username))
|
||||||
|
|
||||||
|
# Check that user does not already exist
|
||||||
|
cmd_user_list = 'rabbitmqctl list_users'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
|
||||||
|
if username in output:
|
||||||
|
self.log.warning('User ({}) already exists, returning '
|
||||||
|
'gracefully.'.format(username))
|
||||||
|
return
|
||||||
|
|
||||||
|
perms = '".*" ".*" ".*"'
|
||||||
|
cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
|
||||||
|
'rabbitmqctl set_permissions {} {}'.format(username, perms)]
|
||||||
|
|
||||||
|
# Add user via first unit
|
||||||
|
for cmd in cmds:
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd)
|
||||||
|
|
||||||
|
# Check connection against the other sentry_units
|
||||||
|
self.log.debug('Checking user connect against units...')
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
def delete_rmq_test_user(self, sentry_units, username="testuser1"):
|
||||||
|
"""Delete a rabbitmq user via the first rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: None if successful or no such user.
|
||||||
|
"""
|
||||||
|
self.log.debug('Deleting rmq user ({})...'.format(username))
|
||||||
|
|
||||||
|
# Check that the user exists
|
||||||
|
cmd_user_list = 'rabbitmqctl list_users'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
|
||||||
|
|
||||||
|
if username not in output:
|
||||||
|
self.log.warning('User ({}) does not exist, returning '
|
||||||
|
'gracefully.'.format(username))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Delete the user
|
||||||
|
cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
|
||||||
|
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
|
||||||
|
|
||||||
|
def get_rmq_cluster_status(self, sentry_unit):
|
||||||
|
"""Execute rabbitmq cluster status command on a unit and return
|
||||||
|
the full output.
|
||||||
|
|
||||||
|
:param unit: sentry unit
|
||||||
|
:returns: String containing console output of cluster status command
|
||||||
|
"""
|
||||||
|
cmd = 'rabbitmqctl cluster_status'
|
||||||
|
output, _ = self.run_cmd_unit(sentry_unit, cmd)
|
||||||
|
self.log.debug('{} cluster_status:\n{}'.format(
|
||||||
|
sentry_unit.info['unit_name'], output))
|
||||||
|
return str(output)
|
||||||
|
|
||||||
|
def get_rmq_cluster_running_nodes(self, sentry_unit):
|
||||||
|
"""Parse rabbitmqctl cluster_status output string, return list of
|
||||||
|
running rabbitmq cluster nodes.
|
||||||
|
|
||||||
|
:param unit: sentry unit
|
||||||
|
:returns: List containing node names of running nodes
|
||||||
|
"""
|
||||||
|
# NOTE(beisner): rabbitmqctl cluster_status output is not
|
||||||
|
# json-parsable, do string chop foo, then json.loads that.
|
||||||
|
str_stat = self.get_rmq_cluster_status(sentry_unit)
|
||||||
|
if 'running_nodes' in str_stat:
|
||||||
|
pos_start = str_stat.find("{running_nodes,") + 15
|
||||||
|
pos_end = str_stat.find("]},", pos_start) + 1
|
||||||
|
str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
|
||||||
|
run_nodes = json.loads(str_run_nodes)
|
||||||
|
return run_nodes
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def validate_rmq_cluster_running_nodes(self, sentry_units):
|
||||||
|
"""Check that all rmq unit hostnames are represented in the
|
||||||
|
cluster_status output of all units.
|
||||||
|
|
||||||
|
:param host_names: dict of juju unit names to host names
|
||||||
|
:param units: list of sentry unit pointers (all rmq units)
|
||||||
|
:returns: None if successful, otherwise return error message
|
||||||
|
"""
|
||||||
|
host_names = self.get_unit_hostnames(sentry_units)
|
||||||
|
errors = []
|
||||||
|
|
||||||
|
# Query every unit for cluster_status running nodes
|
||||||
|
for query_unit in sentry_units:
|
||||||
|
query_unit_name = query_unit.info['unit_name']
|
||||||
|
running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
|
||||||
|
|
||||||
|
# Confirm that every unit is represented in the queried unit's
|
||||||
|
# cluster_status running nodes output.
|
||||||
|
for validate_unit in sentry_units:
|
||||||
|
val_host_name = host_names[validate_unit.info['unit_name']]
|
||||||
|
val_node_name = 'rabbit@{}'.format(val_host_name)
|
||||||
|
|
||||||
|
if val_node_name not in running_nodes:
|
||||||
|
errors.append('Cluster member check failed on {}: {} not '
|
||||||
|
'in {}\n'.format(query_unit_name,
|
||||||
|
val_node_name,
|
||||||
|
running_nodes))
|
||||||
|
if errors:
|
||||||
|
return ''.join(errors)
|
||||||
|
|
||||||
|
def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
|
||||||
|
"""Check a single juju rmq unit for ssl and port in the config file."""
|
||||||
|
host = sentry_unit.info['public-address']
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
|
||||||
|
conf_file = '/etc/rabbitmq/rabbitmq.config'
|
||||||
|
conf_contents = str(self.file_contents_safe(sentry_unit,
|
||||||
|
conf_file, max_wait=16))
|
||||||
|
# Checks
|
||||||
|
conf_ssl = 'ssl' in conf_contents
|
||||||
|
conf_port = str(port) in conf_contents
|
||||||
|
|
||||||
|
# Port explicitly checked in config
|
||||||
|
if port and conf_port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return True
|
||||||
|
elif port and not conf_port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{} but not on port {} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return False
|
||||||
|
# Port not checked (useful when checking that ssl is disabled)
|
||||||
|
elif not port and conf_ssl:
|
||||||
|
self.log.debug('SSL is enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return True
|
||||||
|
elif not conf_ssl:
|
||||||
|
self.log.debug('SSL not enabled @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
msg = ('Unknown condition when checking SSL status @{}:{} '
|
||||||
|
'({})'.format(host, port, unit_name))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
|
||||||
|
"""Check that ssl is enabled on rmq juju sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of all rmq sentry units
|
||||||
|
:param port: optional ssl port override to validate
|
||||||
|
:returns: None if successful, otherwise return error message
|
||||||
|
"""
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
|
||||||
|
return ('Unexpected condition: ssl is disabled on unit '
|
||||||
|
'({})'.format(sentry_unit.info['unit_name']))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_rmq_ssl_disabled_units(self, sentry_units):
|
||||||
|
"""Check that ssl is enabled on listed rmq juju sentry units.
|
||||||
|
|
||||||
|
:param sentry_units: list of all rmq sentry units
|
||||||
|
:returns: True if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
|
||||||
|
return ('Unexpected condition: ssl is enabled on unit '
|
||||||
|
'({})'.format(sentry_unit.info['unit_name']))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def configure_rmq_ssl_on(self, sentry_units, deployment,
|
||||||
|
port=None, max_wait=60):
|
||||||
|
"""Turn ssl charm config option on, with optional non-default
|
||||||
|
ssl port specification. Confirm that it is enabled on every
|
||||||
|
unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry units
|
||||||
|
:param deployment: amulet deployment object pointer
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:param max_wait: maximum time to wait in seconds to confirm
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Setting ssl charm config option: on')
|
||||||
|
|
||||||
|
# Enable RMQ SSL
|
||||||
|
config = {'ssl': 'on'}
|
||||||
|
if port:
|
||||||
|
config['ssl_port'] = port
|
||||||
|
|
||||||
|
deployment.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Wait for unit status
|
||||||
|
self.rmq_wait_for_cluster(deployment)
|
||||||
|
|
||||||
|
# Confirm
|
||||||
|
tries = 0
|
||||||
|
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
|
||||||
|
while ret and tries < (max_wait / 4):
|
||||||
|
time.sleep(4)
|
||||||
|
self.log.debug('Attempt {}: {}'.format(tries, ret))
|
||||||
|
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, ret)
|
||||||
|
|
||||||
|
def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
|
||||||
|
"""Turn ssl charm config option off, confirm that it is disabled
|
||||||
|
on every unit.
|
||||||
|
|
||||||
|
:param sentry_units: list of sentry units
|
||||||
|
:param deployment: amulet deployment object pointer
|
||||||
|
:param max_wait: maximum time to wait in seconds to confirm
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug('Setting ssl charm config option: off')
|
||||||
|
|
||||||
|
# Disable RMQ SSL
|
||||||
|
config = {'ssl': 'off'}
|
||||||
|
deployment.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Wait for unit status
|
||||||
|
self.rmq_wait_for_cluster(deployment)
|
||||||
|
|
||||||
|
# Confirm
|
||||||
|
tries = 0
|
||||||
|
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
|
||||||
|
while ret and tries < (max_wait / 4):
|
||||||
|
time.sleep(4)
|
||||||
|
self.log.debug('Attempt {}: {}'.format(tries, ret))
|
||||||
|
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, ret)
|
||||||
|
|
||||||
|
def connect_amqp_by_unit(self, sentry_unit, ssl=False,
|
||||||
|
port=None, fatal=True,
|
||||||
|
username="testuser1", password="changeme"):
|
||||||
|
"""Establish and return a pika amqp connection to the rabbitmq service
|
||||||
|
running on a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:param fatal: boolean, default to True (raises on connect error)
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:returns: pika amqp connection pointer or None if failed and non-fatal
|
||||||
|
"""
|
||||||
|
host = sentry_unit.info['public-address']
|
||||||
|
unit_name = sentry_unit.info['unit_name']
|
||||||
|
|
||||||
|
# Default port logic if port is not specified
|
||||||
|
if ssl and not port:
|
||||||
|
port = 5671
|
||||||
|
elif not ssl and not port:
|
||||||
|
port = 5672
|
||||||
|
|
||||||
|
self.log.debug('Connecting to amqp on {}:{} ({}) as '
|
||||||
|
'{}...'.format(host, port, unit_name, username))
|
||||||
|
|
||||||
|
try:
|
||||||
|
credentials = pika.PlainCredentials(username, password)
|
||||||
|
parameters = pika.ConnectionParameters(host=host, port=port,
|
||||||
|
credentials=credentials,
|
||||||
|
ssl=ssl,
|
||||||
|
connection_attempts=3,
|
||||||
|
retry_delay=5,
|
||||||
|
socket_timeout=1)
|
||||||
|
connection = pika.BlockingConnection(parameters)
|
||||||
|
assert connection.server_properties['product'] == 'RabbitMQ'
|
||||||
|
self.log.debug('Connect OK')
|
||||||
|
return connection
|
||||||
|
except Exception as e:
|
||||||
|
msg = ('amqp connection failed to {}:{} as '
|
||||||
|
'{} ({})'.format(host, port, username, str(e)))
|
||||||
|
if fatal:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
else:
|
||||||
|
self.log.warn(msg)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def publish_amqp_message_by_unit(self, sentry_unit, message,
|
||||||
|
queue="test", ssl=False,
|
||||||
|
username="testuser1",
|
||||||
|
password="changeme",
|
||||||
|
port=None):
|
||||||
|
"""Publish an amqp message to a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param message: amqp message string
|
||||||
|
:param queue: message queue, default to test
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:returns: None. Raises exception if publish failed.
|
||||||
|
"""
|
||||||
|
self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
|
||||||
|
message))
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
|
||||||
|
port=port,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
|
||||||
|
# NOTE(beisner): extra debug here re: pika hang potential:
|
||||||
|
# https://github.com/pika/pika/issues/297
|
||||||
|
# https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
|
||||||
|
self.log.debug('Defining channel...')
|
||||||
|
channel = connection.channel()
|
||||||
|
self.log.debug('Declaring queue...')
|
||||||
|
channel.queue_declare(queue=queue, auto_delete=False, durable=True)
|
||||||
|
self.log.debug('Publishing message...')
|
||||||
|
channel.basic_publish(exchange='', routing_key=queue, body=message)
|
||||||
|
self.log.debug('Closing channel...')
|
||||||
|
channel.close()
|
||||||
|
self.log.debug('Closing connection...')
|
||||||
|
connection.close()
|
||||||
|
|
||||||
|
def get_amqp_message_by_unit(self, sentry_unit, queue="test",
|
||||||
|
username="testuser1",
|
||||||
|
password="changeme",
|
||||||
|
ssl=False, port=None):
|
||||||
|
"""Get an amqp message from a rmq juju unit.
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit pointer
|
||||||
|
:param queue: message queue, default to test
|
||||||
|
:param username: amqp user name, default to testuser1
|
||||||
|
:param password: amqp user password
|
||||||
|
:param ssl: boolean, default to False
|
||||||
|
:param port: amqp port, use defaults if None
|
||||||
|
:returns: amqp message body as string. Raise if get fails.
|
||||||
|
"""
|
||||||
|
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
|
||||||
|
port=port,
|
||||||
|
username=username,
|
||||||
|
password=password)
|
||||||
|
channel = connection.channel()
|
||||||
|
method_frame, _, body = channel.basic_get(queue)
|
||||||
|
|
||||||
|
if method_frame:
|
||||||
|
self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
|
||||||
|
body))
|
||||||
|
channel.basic_ack(method_frame.delivery_tag)
|
||||||
|
channel.close()
|
||||||
|
connection.close()
|
||||||
|
return body
|
||||||
|
else:
|
||||||
|
msg = 'No message retrieved.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import glob
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -50,10 +51,13 @@ from charmhelpers.core.sysctl import create as sysctl_create
|
|||||||
from charmhelpers.core.strutils import bool_from_string
|
from charmhelpers.core.strutils import bool_from_string
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
|
get_bond_master,
|
||||||
|
is_phy_iface,
|
||||||
list_nics,
|
list_nics,
|
||||||
get_nic_hwaddr,
|
get_nic_hwaddr,
|
||||||
mkdir,
|
mkdir,
|
||||||
write_file,
|
write_file,
|
||||||
|
pwgen,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import (
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
determine_apache_port,
|
determine_apache_port,
|
||||||
@ -84,6 +88,14 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
is_bridge_member,
|
is_bridge_member,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.openstack.utils import get_host_ip
|
from charmhelpers.contrib.openstack.utils import get_host_ip
|
||||||
|
from charmhelpers.core.unitdata import kv
|
||||||
|
|
||||||
|
try:
|
||||||
|
import psutil
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-psutil', fatal=True)
|
||||||
|
import psutil
|
||||||
|
|
||||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||||
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
||||||
|
|
||||||
@ -192,10 +204,50 @@ def config_flags_parser(config_flags):
|
|||||||
class OSContextGenerator(object):
|
class OSContextGenerator(object):
|
||||||
"""Base class for all context generators."""
|
"""Base class for all context generators."""
|
||||||
interfaces = []
|
interfaces = []
|
||||||
|
related = False
|
||||||
|
complete = False
|
||||||
|
missing_data = []
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def context_complete(self, ctxt):
|
||||||
|
"""Check for missing data for the required context data.
|
||||||
|
Set self.missing_data if it exists and return False.
|
||||||
|
Set self.complete if no missing data and return True.
|
||||||
|
"""
|
||||||
|
# Fresh start
|
||||||
|
self.complete = False
|
||||||
|
self.missing_data = []
|
||||||
|
for k, v in six.iteritems(ctxt):
|
||||||
|
if v is None or v == '':
|
||||||
|
if k not in self.missing_data:
|
||||||
|
self.missing_data.append(k)
|
||||||
|
|
||||||
|
if self.missing_data:
|
||||||
|
self.complete = False
|
||||||
|
log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO)
|
||||||
|
else:
|
||||||
|
self.complete = True
|
||||||
|
return self.complete
|
||||||
|
|
||||||
|
def get_related(self):
|
||||||
|
"""Check if any of the context interfaces have relation ids.
|
||||||
|
Set self.related and return True if one of the interfaces
|
||||||
|
has relation ids.
|
||||||
|
"""
|
||||||
|
# Fresh start
|
||||||
|
self.related = False
|
||||||
|
try:
|
||||||
|
for interface in self.interfaces:
|
||||||
|
if relation_ids(interface):
|
||||||
|
self.related = True
|
||||||
|
return self.related
|
||||||
|
except AttributeError as e:
|
||||||
|
log("{} {}"
|
||||||
|
"".format(self, e), 'INFO')
|
||||||
|
return self.related
|
||||||
|
|
||||||
|
|
||||||
class SharedDBContext(OSContextGenerator):
|
class SharedDBContext(OSContextGenerator):
|
||||||
interfaces = ['shared-db']
|
interfaces = ['shared-db']
|
||||||
@ -211,6 +263,7 @@ class SharedDBContext(OSContextGenerator):
|
|||||||
self.database = database
|
self.database = database
|
||||||
self.user = user
|
self.user = user
|
||||||
self.ssl_dir = ssl_dir
|
self.ssl_dir = ssl_dir
|
||||||
|
self.rel_name = self.interfaces[0]
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self.database = self.database or config('database')
|
self.database = self.database or config('database')
|
||||||
@ -244,6 +297,7 @@ class SharedDBContext(OSContextGenerator):
|
|||||||
password_setting = self.relation_prefix + '_password'
|
password_setting = self.relation_prefix + '_password'
|
||||||
|
|
||||||
for rid in relation_ids(self.interfaces[0]):
|
for rid in relation_ids(self.interfaces[0]):
|
||||||
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rdata = relation_get(rid=rid, unit=unit)
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
host = rdata.get('db_host')
|
host = rdata.get('db_host')
|
||||||
@ -255,7 +309,7 @@ class SharedDBContext(OSContextGenerator):
|
|||||||
'database_password': rdata.get(password_setting),
|
'database_password': rdata.get(password_setting),
|
||||||
'database_type': 'mysql'
|
'database_type': 'mysql'
|
||||||
}
|
}
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
db_ssl(rdata, ctxt, self.ssl_dir)
|
db_ssl(rdata, ctxt, self.ssl_dir)
|
||||||
return ctxt
|
return ctxt
|
||||||
return {}
|
return {}
|
||||||
@ -276,6 +330,7 @@ class PostgresqlDBContext(OSContextGenerator):
|
|||||||
|
|
||||||
ctxt = {}
|
ctxt = {}
|
||||||
for rid in relation_ids(self.interfaces[0]):
|
for rid in relation_ids(self.interfaces[0]):
|
||||||
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rel_host = relation_get('host', rid=rid, unit=unit)
|
rel_host = relation_get('host', rid=rid, unit=unit)
|
||||||
rel_user = relation_get('user', rid=rid, unit=unit)
|
rel_user = relation_get('user', rid=rid, unit=unit)
|
||||||
@ -285,7 +340,7 @@ class PostgresqlDBContext(OSContextGenerator):
|
|||||||
'database_user': rel_user,
|
'database_user': rel_user,
|
||||||
'database_password': rel_passwd,
|
'database_password': rel_passwd,
|
||||||
'database_type': 'postgresql'}
|
'database_type': 'postgresql'}
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
return {}
|
return {}
|
||||||
@ -346,6 +401,7 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
ctxt['signing_dir'] = cachedir
|
ctxt['signing_dir'] = cachedir
|
||||||
|
|
||||||
for rid in relation_ids(self.rel_name):
|
for rid in relation_ids(self.rel_name):
|
||||||
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rdata = relation_get(rid=rid, unit=unit)
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
serv_host = rdata.get('service_host')
|
serv_host = rdata.get('service_host')
|
||||||
@ -354,6 +410,7 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
auth_host = format_ipv6_addr(auth_host) or auth_host
|
auth_host = format_ipv6_addr(auth_host) or auth_host
|
||||||
svc_protocol = rdata.get('service_protocol') or 'http'
|
svc_protocol = rdata.get('service_protocol') or 'http'
|
||||||
auth_protocol = rdata.get('auth_protocol') or 'http'
|
auth_protocol = rdata.get('auth_protocol') or 'http'
|
||||||
|
api_version = rdata.get('api_version') or '2.0'
|
||||||
ctxt.update({'service_port': rdata.get('service_port'),
|
ctxt.update({'service_port': rdata.get('service_port'),
|
||||||
'service_host': serv_host,
|
'service_host': serv_host,
|
||||||
'auth_host': auth_host,
|
'auth_host': auth_host,
|
||||||
@ -362,9 +419,10 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
'admin_user': rdata.get('service_username'),
|
'admin_user': rdata.get('service_username'),
|
||||||
'admin_password': rdata.get('service_password'),
|
'admin_password': rdata.get('service_password'),
|
||||||
'service_protocol': svc_protocol,
|
'service_protocol': svc_protocol,
|
||||||
'auth_protocol': auth_protocol})
|
'auth_protocol': auth_protocol,
|
||||||
|
'api_version': api_version})
|
||||||
|
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
# NOTE(jamespage) this is required for >= icehouse
|
# NOTE(jamespage) this is required for >= icehouse
|
||||||
# so a missing value just indicates keystone needs
|
# so a missing value just indicates keystone needs
|
||||||
# upgrading
|
# upgrading
|
||||||
@ -403,6 +461,7 @@ class AMQPContext(OSContextGenerator):
|
|||||||
ctxt = {}
|
ctxt = {}
|
||||||
for rid in relation_ids(self.rel_name):
|
for rid in relation_ids(self.rel_name):
|
||||||
ha_vip_only = False
|
ha_vip_only = False
|
||||||
|
self.related = True
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
if relation_get('clustered', rid=rid, unit=unit):
|
if relation_get('clustered', rid=rid, unit=unit):
|
||||||
ctxt['clustered'] = True
|
ctxt['clustered'] = True
|
||||||
@ -435,7 +494,7 @@ class AMQPContext(OSContextGenerator):
|
|||||||
ha_vip_only = relation_get('ha-vip-only',
|
ha_vip_only = relation_get('ha-vip-only',
|
||||||
rid=rid, unit=unit) is not None
|
rid=rid, unit=unit) is not None
|
||||||
|
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
if 'rabbit_ssl_ca' in ctxt:
|
if 'rabbit_ssl_ca' in ctxt:
|
||||||
if not self.ssl_dir:
|
if not self.ssl_dir:
|
||||||
log("Charm not setup for ssl support but ssl ca "
|
log("Charm not setup for ssl support but ssl ca "
|
||||||
@ -467,7 +526,7 @@ class AMQPContext(OSContextGenerator):
|
|||||||
ctxt['oslo_messaging_flags'] = config_flags_parser(
|
ctxt['oslo_messaging_flags'] = config_flags_parser(
|
||||||
oslo_messaging_flags)
|
oslo_messaging_flags)
|
||||||
|
|
||||||
if not context_complete(ctxt):
|
if not self.complete:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
return ctxt
|
return ctxt
|
||||||
@ -483,13 +542,15 @@ class CephContext(OSContextGenerator):
|
|||||||
|
|
||||||
log('Generating template context for ceph', level=DEBUG)
|
log('Generating template context for ceph', level=DEBUG)
|
||||||
mon_hosts = []
|
mon_hosts = []
|
||||||
auth = None
|
ctxt = {
|
||||||
key = None
|
'use_syslog': str(config('use-syslog')).lower()
|
||||||
use_syslog = str(config('use-syslog')).lower()
|
}
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids('ceph'):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
auth = relation_get('auth', rid=rid, unit=unit)
|
if not ctxt.get('auth'):
|
||||||
key = relation_get('key', rid=rid, unit=unit)
|
ctxt['auth'] = relation_get('auth', rid=rid, unit=unit)
|
||||||
|
if not ctxt.get('key'):
|
||||||
|
ctxt['key'] = relation_get('key', rid=rid, unit=unit)
|
||||||
ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
|
ceph_pub_addr = relation_get('ceph-public-address', rid=rid,
|
||||||
unit=unit)
|
unit=unit)
|
||||||
unit_priv_addr = relation_get('private-address', rid=rid,
|
unit_priv_addr = relation_get('private-address', rid=rid,
|
||||||
@ -498,15 +559,12 @@ class CephContext(OSContextGenerator):
|
|||||||
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
|
ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
|
||||||
mon_hosts.append(ceph_addr)
|
mon_hosts.append(ceph_addr)
|
||||||
|
|
||||||
ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)),
|
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
|
||||||
'auth': auth,
|
|
||||||
'key': key,
|
|
||||||
'use_syslog': use_syslog}
|
|
||||||
|
|
||||||
if not os.path.isdir('/etc/ceph'):
|
if not os.path.isdir('/etc/ceph'):
|
||||||
os.mkdir('/etc/ceph')
|
os.mkdir('/etc/ceph')
|
||||||
|
|
||||||
if not context_complete(ctxt):
|
if not self.context_complete(ctxt):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
ensure_packages(['ceph-common'])
|
ensure_packages(['ceph-common'])
|
||||||
@ -579,15 +637,28 @@ class HAProxyContext(OSContextGenerator):
|
|||||||
if config('haproxy-client-timeout'):
|
if config('haproxy-client-timeout'):
|
||||||
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
|
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
|
||||||
|
|
||||||
|
if config('haproxy-queue-timeout'):
|
||||||
|
ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
|
||||||
|
|
||||||
|
if config('haproxy-connect-timeout'):
|
||||||
|
ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
|
||||||
|
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
ctxt['ipv6'] = True
|
ctxt['ipv6'] = True
|
||||||
ctxt['local_host'] = 'ip6-localhost'
|
ctxt['local_host'] = 'ip6-localhost'
|
||||||
ctxt['haproxy_host'] = '::'
|
ctxt['haproxy_host'] = '::'
|
||||||
ctxt['stat_port'] = ':::8888'
|
|
||||||
else:
|
else:
|
||||||
ctxt['local_host'] = '127.0.0.1'
|
ctxt['local_host'] = '127.0.0.1'
|
||||||
ctxt['haproxy_host'] = '0.0.0.0'
|
ctxt['haproxy_host'] = '0.0.0.0'
|
||||||
ctxt['stat_port'] = ':8888'
|
|
||||||
|
ctxt['stat_port'] = '8888'
|
||||||
|
|
||||||
|
db = kv()
|
||||||
|
ctxt['stat_password'] = db.get('stat-password')
|
||||||
|
if not ctxt['stat_password']:
|
||||||
|
ctxt['stat_password'] = db.set('stat-password',
|
||||||
|
pwgen(32))
|
||||||
|
db.flush()
|
||||||
|
|
||||||
for frontend in cluster_hosts:
|
for frontend in cluster_hosts:
|
||||||
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
if (len(cluster_hosts[frontend]['backends']) > 1 or
|
||||||
@ -878,19 +949,6 @@ class NeutronContext(OSContextGenerator):
|
|||||||
|
|
||||||
return calico_ctxt
|
return calico_ctxt
|
||||||
|
|
||||||
def pg_ctxt(self):
|
|
||||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
|
||||||
self.network_manager)
|
|
||||||
config = neutron_plugin_attribute(self.plugin, 'config',
|
|
||||||
self.network_manager)
|
|
||||||
ovs_ctxt = {'core_plugin': driver,
|
|
||||||
'neutron_plugin': 'plumgrid',
|
|
||||||
'neutron_security_groups': self.neutron_security_groups,
|
|
||||||
'local_ip': unit_private_ip(),
|
|
||||||
'config': config}
|
|
||||||
|
|
||||||
return ovs_ctxt
|
|
||||||
|
|
||||||
def neutron_ctxt(self):
|
def neutron_ctxt(self):
|
||||||
if https():
|
if https():
|
||||||
proto = 'https'
|
proto = 'https'
|
||||||
@ -906,6 +964,31 @@ class NeutronContext(OSContextGenerator):
|
|||||||
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
|
'neutron_url': '%s://%s:%s' % (proto, host, '9696')}
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
def pg_ctxt(self):
|
||||||
|
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||||
|
self.network_manager)
|
||||||
|
config = neutron_plugin_attribute(self.plugin, 'config',
|
||||||
|
self.network_manager)
|
||||||
|
ovs_ctxt = {'core_plugin': driver,
|
||||||
|
'neutron_plugin': 'plumgrid',
|
||||||
|
'neutron_security_groups': self.neutron_security_groups,
|
||||||
|
'local_ip': unit_private_ip(),
|
||||||
|
'config': config}
|
||||||
|
return ovs_ctxt
|
||||||
|
|
||||||
|
def midonet_ctxt(self):
|
||||||
|
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||||
|
self.network_manager)
|
||||||
|
midonet_config = neutron_plugin_attribute(self.plugin, 'config',
|
||||||
|
self.network_manager)
|
||||||
|
mido_ctxt = {'core_plugin': driver,
|
||||||
|
'neutron_plugin': 'midonet',
|
||||||
|
'neutron_security_groups': self.neutron_security_groups,
|
||||||
|
'local_ip': unit_private_ip(),
|
||||||
|
'config': midonet_config}
|
||||||
|
|
||||||
|
return mido_ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
if self.network_manager not in ['quantum', 'neutron']:
|
if self.network_manager not in ['quantum', 'neutron']:
|
||||||
return {}
|
return {}
|
||||||
@ -927,6 +1010,8 @@ class NeutronContext(OSContextGenerator):
|
|||||||
ctxt.update(self.nuage_ctxt())
|
ctxt.update(self.nuage_ctxt())
|
||||||
elif self.plugin == 'plumgrid':
|
elif self.plugin == 'plumgrid':
|
||||||
ctxt.update(self.pg_ctxt())
|
ctxt.update(self.pg_ctxt())
|
||||||
|
elif self.plugin == 'midonet':
|
||||||
|
ctxt.update(self.midonet_ctxt())
|
||||||
|
|
||||||
alchemy_flags = config('neutron-alchemy-flags')
|
alchemy_flags = config('neutron-alchemy-flags')
|
||||||
if alchemy_flags:
|
if alchemy_flags:
|
||||||
@ -938,7 +1023,6 @@ class NeutronContext(OSContextGenerator):
|
|||||||
|
|
||||||
|
|
||||||
class NeutronPortContext(OSContextGenerator):
|
class NeutronPortContext(OSContextGenerator):
|
||||||
NIC_PREFIXES = ['eth', 'bond']
|
|
||||||
|
|
||||||
def resolve_ports(self, ports):
|
def resolve_ports(self, ports):
|
||||||
"""Resolve NICs not yet bound to bridge(s)
|
"""Resolve NICs not yet bound to bridge(s)
|
||||||
@ -950,7 +1034,18 @@ class NeutronPortContext(OSContextGenerator):
|
|||||||
|
|
||||||
hwaddr_to_nic = {}
|
hwaddr_to_nic = {}
|
||||||
hwaddr_to_ip = {}
|
hwaddr_to_ip = {}
|
||||||
for nic in list_nics(self.NIC_PREFIXES):
|
for nic in list_nics():
|
||||||
|
# Ignore virtual interfaces (bond masters will be identified from
|
||||||
|
# their slaves)
|
||||||
|
if not is_phy_iface(nic):
|
||||||
|
continue
|
||||||
|
|
||||||
|
_nic = get_bond_master(nic)
|
||||||
|
if _nic:
|
||||||
|
log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
|
||||||
|
level=DEBUG)
|
||||||
|
nic = _nic
|
||||||
|
|
||||||
hwaddr = get_nic_hwaddr(nic)
|
hwaddr = get_nic_hwaddr(nic)
|
||||||
hwaddr_to_nic[hwaddr] = nic
|
hwaddr_to_nic[hwaddr] = nic
|
||||||
addresses = get_ipv4_addr(nic, fatal=False)
|
addresses = get_ipv4_addr(nic, fatal=False)
|
||||||
@ -976,7 +1071,8 @@ class NeutronPortContext(OSContextGenerator):
|
|||||||
# trust it to be the real external network).
|
# trust it to be the real external network).
|
||||||
resolved.append(entry)
|
resolved.append(entry)
|
||||||
|
|
||||||
return resolved
|
# Ensure no duplicates
|
||||||
|
return list(set(resolved))
|
||||||
|
|
||||||
|
|
||||||
class OSConfigFlagContext(OSContextGenerator):
|
class OSConfigFlagContext(OSContextGenerator):
|
||||||
@ -1016,6 +1112,20 @@ class OSConfigFlagContext(OSContextGenerator):
|
|||||||
config_flags_parser(config_flags)}
|
config_flags_parser(config_flags)}
|
||||||
|
|
||||||
|
|
||||||
|
class LibvirtConfigFlagsContext(OSContextGenerator):
|
||||||
|
"""
|
||||||
|
This context provides support for extending
|
||||||
|
the libvirt section through user-defined flags.
|
||||||
|
"""
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {}
|
||||||
|
libvirt_flags = config('libvirt-flags')
|
||||||
|
if libvirt_flags:
|
||||||
|
ctxt['libvirt_flags'] = config_flags_parser(
|
||||||
|
libvirt_flags)
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class SubordinateConfigContext(OSContextGenerator):
|
class SubordinateConfigContext(OSContextGenerator):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -1048,7 +1158,7 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||||||
|
|
||||||
ctxt = {
|
ctxt = {
|
||||||
... other context ...
|
... other context ...
|
||||||
'subordinate_config': {
|
'subordinate_configuration': {
|
||||||
'DEFAULT': {
|
'DEFAULT': {
|
||||||
'key1': 'value1',
|
'key1': 'value1',
|
||||||
},
|
},
|
||||||
@ -1066,13 +1176,22 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||||||
:param config_file : Service's config file to query sections
|
:param config_file : Service's config file to query sections
|
||||||
:param interface : Subordinate interface to inspect
|
:param interface : Subordinate interface to inspect
|
||||||
"""
|
"""
|
||||||
self.service = service
|
|
||||||
self.config_file = config_file
|
self.config_file = config_file
|
||||||
self.interface = interface
|
if isinstance(service, list):
|
||||||
|
self.services = service
|
||||||
|
else:
|
||||||
|
self.services = [service]
|
||||||
|
if isinstance(interface, list):
|
||||||
|
self.interfaces = interface
|
||||||
|
else:
|
||||||
|
self.interfaces = [interface]
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
ctxt = {'sections': {}}
|
ctxt = {'sections': {}}
|
||||||
for rid in relation_ids(self.interface):
|
rids = []
|
||||||
|
for interface in self.interfaces:
|
||||||
|
rids.extend(relation_ids(interface))
|
||||||
|
for rid in rids:
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
sub_config = relation_get('subordinate_configuration',
|
sub_config = relation_get('subordinate_configuration',
|
||||||
rid=rid, unit=unit)
|
rid=rid, unit=unit)
|
||||||
@ -1080,33 +1199,37 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||||||
try:
|
try:
|
||||||
sub_config = json.loads(sub_config)
|
sub_config = json.loads(sub_config)
|
||||||
except:
|
except:
|
||||||
log('Could not parse JSON from subordinate_config '
|
log('Could not parse JSON from '
|
||||||
'setting from %s' % rid, level=ERROR)
|
'subordinate_configuration setting from %s'
|
||||||
|
% rid, level=ERROR)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.service not in sub_config:
|
for service in self.services:
|
||||||
log('Found subordinate_config on %s but it contained'
|
if service not in sub_config:
|
||||||
'nothing for %s service' % (rid, self.service),
|
log('Found subordinate_configuration on %s but it '
|
||||||
level=INFO)
|
'contained nothing for %s service'
|
||||||
continue
|
% (rid, service), level=INFO)
|
||||||
|
continue
|
||||||
|
|
||||||
sub_config = sub_config[self.service]
|
sub_config = sub_config[service]
|
||||||
if self.config_file not in sub_config:
|
if self.config_file not in sub_config:
|
||||||
log('Found subordinate_config on %s but it contained'
|
log('Found subordinate_configuration on %s but it '
|
||||||
'nothing for %s' % (rid, self.config_file),
|
'contained nothing for %s'
|
||||||
level=INFO)
|
% (rid, self.config_file), level=INFO)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
sub_config = sub_config[self.config_file]
|
|
||||||
for k, v in six.iteritems(sub_config):
|
|
||||||
if k == 'sections':
|
|
||||||
for section, config_dict in six.iteritems(v):
|
|
||||||
log("adding section '%s'" % (section),
|
|
||||||
level=DEBUG)
|
|
||||||
ctxt[k][section] = config_dict
|
|
||||||
else:
|
|
||||||
ctxt[k] = v
|
|
||||||
|
|
||||||
|
sub_config = sub_config[self.config_file]
|
||||||
|
for k, v in six.iteritems(sub_config):
|
||||||
|
if k == 'sections':
|
||||||
|
for section, config_list in six.iteritems(v):
|
||||||
|
log("adding section '%s'" % (section),
|
||||||
|
level=DEBUG)
|
||||||
|
if ctxt[k].get(section):
|
||||||
|
ctxt[k][section].extend(config_list)
|
||||||
|
else:
|
||||||
|
ctxt[k][section] = config_list
|
||||||
|
else:
|
||||||
|
ctxt[k] = v
|
||||||
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
|
log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG)
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
@ -1143,13 +1266,11 @@ class WorkerConfigContext(OSContextGenerator):
|
|||||||
|
|
||||||
@property
|
@property
|
||||||
def num_cpus(self):
|
def num_cpus(self):
|
||||||
try:
|
# NOTE: use cpu_count if present (16.04 support)
|
||||||
from psutil import NUM_CPUS
|
if hasattr(psutil, 'cpu_count'):
|
||||||
except ImportError:
|
return psutil.cpu_count()
|
||||||
apt_install('python-psutil', fatal=True)
|
else:
|
||||||
from psutil import NUM_CPUS
|
return psutil.NUM_CPUS
|
||||||
|
|
||||||
return NUM_CPUS
|
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
multiplier = config('worker-multiplier') or 0
|
multiplier = config('worker-multiplier') or 0
|
||||||
@ -1283,15 +1404,19 @@ class DataPortContext(NeutronPortContext):
|
|||||||
def __call__(self):
|
def __call__(self):
|
||||||
ports = config('data-port')
|
ports = config('data-port')
|
||||||
if ports:
|
if ports:
|
||||||
|
# Map of {port/mac:bridge}
|
||||||
portmap = parse_data_port_mappings(ports)
|
portmap = parse_data_port_mappings(ports)
|
||||||
ports = portmap.values()
|
ports = portmap.keys()
|
||||||
|
# Resolve provided ports or mac addresses and filter out those
|
||||||
|
# already attached to a bridge.
|
||||||
resolved = self.resolve_ports(ports)
|
resolved = self.resolve_ports(ports)
|
||||||
|
# FIXME: is this necessary?
|
||||||
normalized = {get_nic_hwaddr(port): port for port in resolved
|
normalized = {get_nic_hwaddr(port): port for port in resolved
|
||||||
if port not in ports}
|
if port not in ports}
|
||||||
normalized.update({port: port for port in resolved
|
normalized.update({port: port for port in resolved
|
||||||
if port in ports})
|
if port in ports})
|
||||||
if resolved:
|
if resolved:
|
||||||
return {bridge: normalized[port] for bridge, port in
|
return {normalized[port]: bridge for port, bridge in
|
||||||
six.iteritems(portmap) if port in normalized.keys()}
|
six.iteritems(portmap) if port in normalized.keys()}
|
||||||
|
|
||||||
return None
|
return None
|
||||||
@ -1302,12 +1427,22 @@ class PhyNICMTUContext(DataPortContext):
|
|||||||
def __call__(self):
|
def __call__(self):
|
||||||
ctxt = {}
|
ctxt = {}
|
||||||
mappings = super(PhyNICMTUContext, self).__call__()
|
mappings = super(PhyNICMTUContext, self).__call__()
|
||||||
if mappings and mappings.values():
|
if mappings and mappings.keys():
|
||||||
ports = mappings.values()
|
ports = sorted(mappings.keys())
|
||||||
napi_settings = NeutronAPIContext()()
|
napi_settings = NeutronAPIContext()()
|
||||||
mtu = napi_settings.get('network_device_mtu')
|
mtu = napi_settings.get('network_device_mtu')
|
||||||
|
all_ports = set()
|
||||||
|
# If any of ports is a vlan device, its underlying device must have
|
||||||
|
# mtu applied first.
|
||||||
|
for port in ports:
|
||||||
|
for lport in glob.glob("/sys/class/net/%s/lower_*" % port):
|
||||||
|
lport = os.path.basename(lport)
|
||||||
|
all_ports.add(lport.split('_')[1])
|
||||||
|
|
||||||
|
all_ports = list(all_ports)
|
||||||
|
all_ports.extend(ports)
|
||||||
if mtu:
|
if mtu:
|
||||||
ctxt["devs"] = '\\n'.join(ports)
|
ctxt["devs"] = '\\n'.join(all_ports)
|
||||||
ctxt['mtu'] = mtu
|
ctxt['mtu'] = mtu
|
||||||
|
|
||||||
return ctxt
|
return ctxt
|
||||||
@ -1338,7 +1473,9 @@ class NetworkServiceContext(OSContextGenerator):
|
|||||||
rdata.get('service_protocol') or 'http',
|
rdata.get('service_protocol') or 'http',
|
||||||
'auth_protocol':
|
'auth_protocol':
|
||||||
rdata.get('auth_protocol') or 'http',
|
rdata.get('auth_protocol') or 'http',
|
||||||
|
'api_version':
|
||||||
|
rdata.get('api_version') or '2.0',
|
||||||
}
|
}
|
||||||
if context_complete(ctxt):
|
if self.context_complete(ctxt):
|
||||||
return ctxt
|
return ctxt
|
||||||
return {}
|
return {}
|
||||||
|
@ -50,7 +50,7 @@ def determine_dkms_package():
|
|||||||
if kernel_version() >= (3, 13):
|
if kernel_version() >= (3, 13):
|
||||||
return []
|
return []
|
||||||
else:
|
else:
|
||||||
return ['openvswitch-datapath-dkms']
|
return [headers_package(), 'openvswitch-datapath-dkms']
|
||||||
|
|
||||||
|
|
||||||
# legacy
|
# legacy
|
||||||
@ -70,7 +70,7 @@ def quantum_plugins():
|
|||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=QUANTUM_CONF_DIR)],
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
'services': ['quantum-plugin-openvswitch-agent'],
|
'services': ['quantum-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['quantum-plugin-openvswitch-agent']],
|
['quantum-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['quantum-server',
|
'server_packages': ['quantum-server',
|
||||||
'quantum-plugin-openvswitch'],
|
'quantum-plugin-openvswitch'],
|
||||||
@ -111,7 +111,7 @@ def neutron_plugins():
|
|||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['neutron-plugin-openvswitch-agent']],
|
['neutron-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-openvswitch'],
|
'neutron-plugin-openvswitch'],
|
||||||
@ -155,7 +155,7 @@ def neutron_plugins():
|
|||||||
relation_prefix='neutron',
|
relation_prefix='neutron',
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['neutron-plugin-cisco']],
|
['neutron-plugin-cisco']],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-cisco'],
|
'neutron-plugin-cisco'],
|
||||||
@ -174,7 +174,7 @@ def neutron_plugins():
|
|||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
'nova-api-metadata',
|
'nova-api-metadata',
|
||||||
'etcd'],
|
'etcd'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [determine_dkms_package(),
|
||||||
['calico-compute',
|
['calico-compute',
|
||||||
'bird',
|
'bird',
|
||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
@ -204,11 +204,25 @@ def neutron_plugins():
|
|||||||
database=config('database'),
|
database=config('database'),
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [['plumgrid-lxc'],
|
'packages': ['plumgrid-lxc',
|
||||||
['iovisor-dkms']],
|
'iovisor-dkms'],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-plumgrid'],
|
'neutron-plugin-plumgrid'],
|
||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'midonet': {
|
||||||
|
'config': '/etc/neutron/plugins/midonet/midonet.ini',
|
||||||
|
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [determine_dkms_package()],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'python-neutron-plugin-midonet'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if release >= 'icehouse':
|
if release >= 'icehouse':
|
||||||
@ -219,6 +233,16 @@ def neutron_plugins():
|
|||||||
'neutron-plugin-ml2']
|
'neutron-plugin-ml2']
|
||||||
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
||||||
plugins['nvp'] = plugins['nsx']
|
plugins['nvp'] = plugins['nsx']
|
||||||
|
if release >= 'kilo':
|
||||||
|
plugins['midonet']['driver'] = (
|
||||||
|
'neutron.plugins.midonet.plugin.MidonetPluginV2')
|
||||||
|
if release >= 'liberty':
|
||||||
|
plugins['midonet']['driver'] = (
|
||||||
|
'midonet.neutron.plugin_v1.MidonetPluginV2')
|
||||||
|
plugins['midonet']['server_packages'].remove(
|
||||||
|
'python-neutron-plugin-midonet')
|
||||||
|
plugins['midonet']['server_packages'].append(
|
||||||
|
'python-networking-midonet')
|
||||||
return plugins
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
@ -269,17 +293,30 @@ def network_manager():
|
|||||||
return 'neutron'
|
return 'neutron'
|
||||||
|
|
||||||
|
|
||||||
def parse_mappings(mappings):
|
def parse_mappings(mappings, key_rvalue=False):
|
||||||
|
"""By default mappings are lvalue keyed.
|
||||||
|
|
||||||
|
If key_rvalue is True, the mapping will be reversed to allow multiple
|
||||||
|
configs for the same lvalue.
|
||||||
|
"""
|
||||||
parsed = {}
|
parsed = {}
|
||||||
if mappings:
|
if mappings:
|
||||||
mappings = mappings.split()
|
mappings = mappings.split()
|
||||||
for m in mappings:
|
for m in mappings:
|
||||||
p = m.partition(':')
|
p = m.partition(':')
|
||||||
key = p[0].strip()
|
|
||||||
if p[1]:
|
if key_rvalue:
|
||||||
parsed[key] = p[2].strip()
|
key_index = 2
|
||||||
|
val_index = 0
|
||||||
|
# if there is no rvalue skip to next
|
||||||
|
if not p[1]:
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
parsed[key] = ''
|
key_index = 0
|
||||||
|
val_index = 2
|
||||||
|
|
||||||
|
key = p[key_index].strip()
|
||||||
|
parsed[key] = p[val_index].strip()
|
||||||
|
|
||||||
return parsed
|
return parsed
|
||||||
|
|
||||||
@ -297,25 +334,25 @@ def parse_bridge_mappings(mappings):
|
|||||||
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
||||||
"""Parse data port mappings.
|
"""Parse data port mappings.
|
||||||
|
|
||||||
Mappings must be a space-delimited list of bridge:port mappings.
|
Mappings must be a space-delimited list of bridge:port.
|
||||||
|
|
||||||
Returns dict of the form {bridge:port}.
|
Returns dict of the form {port:bridge} where ports may be mac addresses or
|
||||||
|
interface names.
|
||||||
"""
|
"""
|
||||||
_mappings = parse_mappings(mappings)
|
|
||||||
|
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
|
||||||
|
# proposed for <port> since it may be a mac address which will differ
|
||||||
|
# across units this allowing first-known-good to be chosen.
|
||||||
|
_mappings = parse_mappings(mappings, key_rvalue=True)
|
||||||
if not _mappings or list(_mappings.values()) == ['']:
|
if not _mappings or list(_mappings.values()) == ['']:
|
||||||
if not mappings:
|
if not mappings:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# For backwards-compatibility we need to support port-only provided in
|
# For backwards-compatibility we need to support port-only provided in
|
||||||
# config.
|
# config.
|
||||||
_mappings = {default_bridge: mappings.split()[0]}
|
_mappings = {mappings.split()[0]: default_bridge}
|
||||||
|
|
||||||
bridges = _mappings.keys()
|
|
||||||
ports = _mappings.values()
|
|
||||||
if len(set(bridges)) != len(bridges):
|
|
||||||
raise Exception("It is not allowed to have more than one port "
|
|
||||||
"configured on the same bridge")
|
|
||||||
|
|
||||||
|
ports = _mappings.keys()
|
||||||
if len(set(ports)) != len(ports):
|
if len(set(ports)) != len(ports):
|
||||||
raise Exception("It is not allowed to have the same port configured "
|
raise Exception("It is not allowed to have the same port configured "
|
||||||
"on more than one bridge")
|
"on more than one bridge")
|
||||||
|
@ -18,7 +18,7 @@ import os
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
log,
|
log,
|
||||||
ERROR,
|
ERROR,
|
||||||
@ -29,6 +29,7 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
|||||||
try:
|
try:
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
apt_update(fatal=True)
|
||||||
apt_install('python-jinja2', fatal=True)
|
apt_install('python-jinja2', fatal=True)
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
|
|
||||||
@ -112,7 +113,7 @@ class OSConfigTemplate(object):
|
|||||||
|
|
||||||
def complete_contexts(self):
|
def complete_contexts(self):
|
||||||
'''
|
'''
|
||||||
Return a list of interfaces that have atisfied contexts.
|
Return a list of interfaces that have satisfied contexts.
|
||||||
'''
|
'''
|
||||||
if self._complete_contexts:
|
if self._complete_contexts:
|
||||||
return self._complete_contexts
|
return self._complete_contexts
|
||||||
@ -293,3 +294,30 @@ class OSConfigRenderer(object):
|
|||||||
[interfaces.extend(i.complete_contexts())
|
[interfaces.extend(i.complete_contexts())
|
||||||
for i in six.itervalues(self.templates)]
|
for i in six.itervalues(self.templates)]
|
||||||
return interfaces
|
return interfaces
|
||||||
|
|
||||||
|
def get_incomplete_context_data(self, interfaces):
|
||||||
|
'''
|
||||||
|
Return dictionary of relation status of interfaces and any missing
|
||||||
|
required context data. Example:
|
||||||
|
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
|
||||||
|
'zeromq-configuration': {'related': False}}
|
||||||
|
'''
|
||||||
|
incomplete_context_data = {}
|
||||||
|
|
||||||
|
for i in six.itervalues(self.templates):
|
||||||
|
for context in i.contexts:
|
||||||
|
for interface in interfaces:
|
||||||
|
related = False
|
||||||
|
if interface in context.interfaces:
|
||||||
|
related = context.get_related()
|
||||||
|
missing_data = context.missing_data
|
||||||
|
if missing_data:
|
||||||
|
incomplete_context_data[interface] = {'missing_data': missing_data}
|
||||||
|
if related:
|
||||||
|
if incomplete_context_data.get(interface):
|
||||||
|
incomplete_context_data[interface].update({'related': True})
|
||||||
|
else:
|
||||||
|
incomplete_context_data[interface] = {'related': True}
|
||||||
|
else:
|
||||||
|
incomplete_context_data[interface] = {'related': False}
|
||||||
|
return incomplete_context_data
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
#
|
#
|
||||||
# This file is part of charm-helpers.
|
# This file is part of charm-helpers.
|
||||||
@ -24,8 +22,13 @@ import subprocess
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import re
|
||||||
|
import itertools
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
import tempfile
|
||||||
|
import traceback
|
||||||
|
import uuid
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from charmhelpers.contrib.network import ip
|
from charmhelpers.contrib.network import ip
|
||||||
@ -35,12 +38,18 @@ from charmhelpers.core import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
|
action_fail,
|
||||||
|
action_set,
|
||||||
config,
|
config,
|
||||||
log as juju_log,
|
log as juju_log,
|
||||||
charm_dir,
|
charm_dir,
|
||||||
|
DEBUG,
|
||||||
INFO,
|
INFO,
|
||||||
|
related_units,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
relation_set
|
relation_set,
|
||||||
|
status_set,
|
||||||
|
hook_name
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.lvm import (
|
from charmhelpers.contrib.storage.linux.lvm import (
|
||||||
@ -50,7 +59,9 @@ from charmhelpers.contrib.storage.linux.lvm import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
get_ipv6_addr
|
get_ipv6_addr,
|
||||||
|
is_ipv6,
|
||||||
|
port_has_listener,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.python.packages import (
|
from charmhelpers.contrib.python.packages import (
|
||||||
@ -58,7 +69,7 @@ from charmhelpers.contrib.python.packages import (
|
|||||||
pip_install,
|
pip_install,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
from charmhelpers.core.host import lsb_release, mounts, umount, service_running
|
||||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
||||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||||
@ -69,7 +80,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
|||||||
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
||||||
'restricted main multiverse universe')
|
'restricted main multiverse universe')
|
||||||
|
|
||||||
|
|
||||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||||
('oneiric', 'diablo'),
|
('oneiric', 'diablo'),
|
||||||
('precise', 'essex'),
|
('precise', 'essex'),
|
||||||
@ -80,6 +90,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
@ -93,31 +104,73 @@ OPENSTACK_CODENAMES = OrderedDict([
|
|||||||
('2014.2', 'juno'),
|
('2014.2', 'juno'),
|
||||||
('2015.1', 'kilo'),
|
('2015.1', 'kilo'),
|
||||||
('2015.2', 'liberty'),
|
('2015.2', 'liberty'),
|
||||||
|
('2016.1', 'mitaka'),
|
||||||
])
|
])
|
||||||
|
|
||||||
# The ugly duckling
|
# The ugly duckling - must list releases oldest to newest
|
||||||
SWIFT_CODENAMES = OrderedDict([
|
SWIFT_CODENAMES = OrderedDict([
|
||||||
('1.4.3', 'diablo'),
|
('diablo',
|
||||||
('1.4.8', 'essex'),
|
['1.4.3']),
|
||||||
('1.7.4', 'folsom'),
|
('essex',
|
||||||
('1.8.0', 'grizzly'),
|
['1.4.8']),
|
||||||
('1.7.7', 'grizzly'),
|
('folsom',
|
||||||
('1.7.6', 'grizzly'),
|
['1.7.4']),
|
||||||
('1.10.0', 'havana'),
|
('grizzly',
|
||||||
('1.9.1', 'havana'),
|
['1.7.6', '1.7.7', '1.8.0']),
|
||||||
('1.9.0', 'havana'),
|
('havana',
|
||||||
('1.13.1', 'icehouse'),
|
['1.9.0', '1.9.1', '1.10.0']),
|
||||||
('1.13.0', 'icehouse'),
|
('icehouse',
|
||||||
('1.12.0', 'icehouse'),
|
['1.11.0', '1.12.0', '1.13.0', '1.13.1']),
|
||||||
('1.11.0', 'icehouse'),
|
('juno',
|
||||||
('2.0.0', 'juno'),
|
['2.0.0', '2.1.0', '2.2.0']),
|
||||||
('2.1.0', 'juno'),
|
('kilo',
|
||||||
('2.2.0', 'juno'),
|
['2.2.1', '2.2.2']),
|
||||||
('2.2.1', 'kilo'),
|
('liberty',
|
||||||
('2.2.2', 'kilo'),
|
['2.3.0', '2.4.0', '2.5.0']),
|
||||||
('2.3.0', 'liberty'),
|
('mitaka',
|
||||||
|
['2.5.0']),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
# >= Liberty version->codename mapping
|
||||||
|
PACKAGE_CODENAMES = {
|
||||||
|
'nova-common': OrderedDict([
|
||||||
|
('12.0', 'liberty'),
|
||||||
|
('13.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
'neutron-common': OrderedDict([
|
||||||
|
('7.0', 'liberty'),
|
||||||
|
('8.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
'cinder-common': OrderedDict([
|
||||||
|
('7.0', 'liberty'),
|
||||||
|
('8.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
'keystone': OrderedDict([
|
||||||
|
('8.0', 'liberty'),
|
||||||
|
('9.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
'horizon-common': OrderedDict([
|
||||||
|
('8.0', 'liberty'),
|
||||||
|
('9.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
'ceilometer-common': OrderedDict([
|
||||||
|
('5.0', 'liberty'),
|
||||||
|
('6.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
'heat-common': OrderedDict([
|
||||||
|
('5.0', 'liberty'),
|
||||||
|
('6.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
'glance-common': OrderedDict([
|
||||||
|
('11.0', 'liberty'),
|
||||||
|
('12.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
'openstack-dashboard': OrderedDict([
|
||||||
|
('8.0', 'liberty'),
|
||||||
|
('9.0', 'mitaka'),
|
||||||
|
]),
|
||||||
|
}
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
|
|
||||||
|
|
||||||
@ -167,9 +220,9 @@ def get_os_codename_version(vers):
|
|||||||
error_out(e)
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_codename(codename):
|
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
|
||||||
'''Determine OpenStack version number from codename.'''
|
'''Determine OpenStack version number from codename.'''
|
||||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
for k, v in six.iteritems(version_map):
|
||||||
if v == codename:
|
if v == codename:
|
||||||
return k
|
return k
|
||||||
e = 'Could not derive OpenStack version for '\
|
e = 'Could not derive OpenStack version for '\
|
||||||
@ -177,6 +230,33 @@ def get_os_version_codename(codename):
|
|||||||
error_out(e)
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_codename_swift(codename):
|
||||||
|
'''Determine OpenStack version number of swift from codename.'''
|
||||||
|
for k, v in six.iteritems(SWIFT_CODENAMES):
|
||||||
|
if k == codename:
|
||||||
|
return v[-1]
|
||||||
|
e = 'Could not derive swift version for '\
|
||||||
|
'codename: %s' % codename
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_swift_codename(version):
|
||||||
|
'''Determine OpenStack codename that corresponds to swift version.'''
|
||||||
|
codenames = [k for k, v in six.iteritems(SWIFT_CODENAMES) if version in v]
|
||||||
|
if len(codenames) > 1:
|
||||||
|
# If more than one release codename contains this version we determine
|
||||||
|
# the actual codename based on the highest available install source.
|
||||||
|
for codename in reversed(codenames):
|
||||||
|
releases = UBUNTU_OPENSTACK_RELEASE
|
||||||
|
release = [k for k, v in six.iteritems(releases) if codename in v]
|
||||||
|
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
|
||||||
|
if codename in ret or release[0] in ret:
|
||||||
|
return codename
|
||||||
|
elif len(codenames) == 1:
|
||||||
|
return codenames[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_os_codename_package(package, fatal=True):
|
def get_os_codename_package(package, fatal=True):
|
||||||
'''Derive OpenStack release codename from an installed package.'''
|
'''Derive OpenStack release codename from an installed package.'''
|
||||||
import apt_pkg as apt
|
import apt_pkg as apt
|
||||||
@ -201,20 +281,33 @@ def get_os_codename_package(package, fatal=True):
|
|||||||
error_out(e)
|
error_out(e)
|
||||||
|
|
||||||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||||
|
if 'swift' in pkg.name:
|
||||||
|
# Fully x.y.z match for swift versions
|
||||||
|
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
|
||||||
|
else:
|
||||||
|
# x.y match only for 20XX.X
|
||||||
|
# and ignore patch level for other packages
|
||||||
|
match = re.match('^(\d+)\.(\d+)', vers)
|
||||||
|
|
||||||
try:
|
if match:
|
||||||
if 'swift' in pkg.name:
|
vers = match.group(0)
|
||||||
swift_vers = vers[:5]
|
|
||||||
if swift_vers not in SWIFT_CODENAMES:
|
# >= Liberty independent project versions
|
||||||
# Deal with 1.10.0 upward
|
if (package in PACKAGE_CODENAMES and
|
||||||
swift_vers = vers[:6]
|
vers in PACKAGE_CODENAMES[package]):
|
||||||
return SWIFT_CODENAMES[swift_vers]
|
return PACKAGE_CODENAMES[package][vers]
|
||||||
else:
|
else:
|
||||||
vers = vers[:6]
|
# < Liberty co-ordinated project versions
|
||||||
return OPENSTACK_CODENAMES[vers]
|
try:
|
||||||
except KeyError:
|
if 'swift' in pkg.name:
|
||||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
return get_swift_codename(vers)
|
||||||
error_out(e)
|
else:
|
||||||
|
return OPENSTACK_CODENAMES[vers]
|
||||||
|
except KeyError:
|
||||||
|
if not fatal:
|
||||||
|
return None
|
||||||
|
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_package(pkg, fatal=True):
|
def get_os_version_package(pkg, fatal=True):
|
||||||
@ -226,12 +319,14 @@ def get_os_version_package(pkg, fatal=True):
|
|||||||
|
|
||||||
if 'swift' in pkg:
|
if 'swift' in pkg:
|
||||||
vers_map = SWIFT_CODENAMES
|
vers_map = SWIFT_CODENAMES
|
||||||
|
for cname, version in six.iteritems(vers_map):
|
||||||
|
if cname == codename:
|
||||||
|
return version[-1]
|
||||||
else:
|
else:
|
||||||
vers_map = OPENSTACK_CODENAMES
|
vers_map = OPENSTACK_CODENAMES
|
||||||
|
for version, cname in six.iteritems(vers_map):
|
||||||
for version, cname in six.iteritems(vers_map):
|
if cname == codename:
|
||||||
if cname == codename:
|
return version
|
||||||
return version
|
|
||||||
# e = "Could not determine OpenStack version for package: %s" % pkg
|
# e = "Could not determine OpenStack version for package: %s" % pkg
|
||||||
# error_out(e)
|
# error_out(e)
|
||||||
|
|
||||||
@ -256,12 +351,42 @@ def os_release(package, base='essex'):
|
|||||||
|
|
||||||
|
|
||||||
def import_key(keyid):
|
def import_key(keyid):
|
||||||
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
|
key = keyid.strip()
|
||||||
"--recv-keys %s" % keyid
|
if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
|
||||||
try:
|
key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
|
||||||
subprocess.check_call(cmd.split(' '))
|
juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
|
||||||
except subprocess.CalledProcessError:
|
juju_log("Importing ASCII Armor PGP key", level=DEBUG)
|
||||||
error_out("Error importing repo key %s" % keyid)
|
with tempfile.NamedTemporaryFile() as keyfile:
|
||||||
|
with open(keyfile.name, 'w') as fd:
|
||||||
|
fd.write(key)
|
||||||
|
fd.write("\n")
|
||||||
|
|
||||||
|
cmd = ['apt-key', 'add', keyfile.name]
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
error_out("Error importing PGP key '%s'" % key)
|
||||||
|
else:
|
||||||
|
juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
|
||||||
|
juju_log("Importing PGP key from keyserver", level=DEBUG)
|
||||||
|
cmd = ['apt-key', 'adv', '--keyserver',
|
||||||
|
'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
error_out("Error importing PGP key '%s'" % key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_source_and_pgp_key(input):
|
||||||
|
"""Look for a pgp key ID or ascii-armor key in the given input."""
|
||||||
|
index = input.strip()
|
||||||
|
index = input.rfind('|')
|
||||||
|
if index < 0:
|
||||||
|
return input, None
|
||||||
|
|
||||||
|
key = input[index + 1:].strip('|')
|
||||||
|
source = input[:index]
|
||||||
|
return source, key
|
||||||
|
|
||||||
|
|
||||||
def configure_installation_source(rel):
|
def configure_installation_source(rel):
|
||||||
@ -273,16 +398,16 @@ def configure_installation_source(rel):
|
|||||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
||||||
elif rel[:4] == "ppa:":
|
elif rel[:4] == "ppa:":
|
||||||
src = rel
|
src, key = get_source_and_pgp_key(rel)
|
||||||
|
if key:
|
||||||
|
import_key(key)
|
||||||
|
|
||||||
subprocess.check_call(["add-apt-repository", "-y", src])
|
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||||
elif rel[:3] == "deb":
|
elif rel[:3] == "deb":
|
||||||
l = len(rel.split('|'))
|
src, key = get_source_and_pgp_key(rel)
|
||||||
if l == 2:
|
if key:
|
||||||
src, key = rel.split('|')
|
|
||||||
juju_log("Importing PPA key from keyserver for %s" % src)
|
|
||||||
import_key(key)
|
import_key(key)
|
||||||
elif l == 1:
|
|
||||||
src = rel
|
|
||||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
f.write(src)
|
f.write(src)
|
||||||
elif rel[:6] == 'cloud:':
|
elif rel[:6] == 'cloud:':
|
||||||
@ -327,6 +452,9 @@ def configure_installation_source(rel):
|
|||||||
'liberty': 'trusty-updates/liberty',
|
'liberty': 'trusty-updates/liberty',
|
||||||
'liberty/updates': 'trusty-updates/liberty',
|
'liberty/updates': 'trusty-updates/liberty',
|
||||||
'liberty/proposed': 'trusty-proposed/liberty',
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'mitaka': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/updates': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -392,9 +520,18 @@ def openstack_upgrade_available(package):
|
|||||||
import apt_pkg as apt
|
import apt_pkg as apt
|
||||||
src = config('openstack-origin')
|
src = config('openstack-origin')
|
||||||
cur_vers = get_os_version_package(package)
|
cur_vers = get_os_version_package(package)
|
||||||
available_vers = get_os_version_install_source(src)
|
if "swift" in package:
|
||||||
|
codename = get_os_codename_install_source(src)
|
||||||
|
avail_vers = get_os_version_codename_swift(codename)
|
||||||
|
else:
|
||||||
|
avail_vers = get_os_version_install_source(src)
|
||||||
apt.init()
|
apt.init()
|
||||||
return apt.version_compare(available_vers, cur_vers) == 1
|
if "swift" in package:
|
||||||
|
major_cur_vers = cur_vers.split('.', 1)[0]
|
||||||
|
major_avail_vers = avail_vers.split('.', 1)[0]
|
||||||
|
major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
|
||||||
|
return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
|
||||||
|
return apt.version_compare(avail_vers, cur_vers) == 1
|
||||||
|
|
||||||
|
|
||||||
def ensure_block_device(block_device):
|
def ensure_block_device(block_device):
|
||||||
@ -469,6 +606,12 @@ def sync_db_with_multi_ipv6_addresses(database, database_user,
|
|||||||
relation_prefix=None):
|
relation_prefix=None):
|
||||||
hosts = get_ipv6_addr(dynamic_only=False)
|
hosts = get_ipv6_addr(dynamic_only=False)
|
||||||
|
|
||||||
|
if config('vip'):
|
||||||
|
vips = config('vip').split()
|
||||||
|
for vip in vips:
|
||||||
|
if vip and is_ipv6(vip):
|
||||||
|
hosts.append(vip)
|
||||||
|
|
||||||
kwargs = {'database': database,
|
kwargs = {'database': database,
|
||||||
'username': database_user,
|
'username': database_user,
|
||||||
'hostname': json.dumps(hosts)}
|
'hostname': json.dumps(hosts)}
|
||||||
@ -517,7 +660,7 @@ def _git_yaml_load(projects_yaml):
|
|||||||
return yaml.load(projects_yaml)
|
return yaml.load(projects_yaml)
|
||||||
|
|
||||||
|
|
||||||
def git_clone_and_install(projects_yaml, core_project, depth=1):
|
def git_clone_and_install(projects_yaml, core_project):
|
||||||
"""
|
"""
|
||||||
Clone/install all specified OpenStack repositories.
|
Clone/install all specified OpenStack repositories.
|
||||||
|
|
||||||
@ -567,6 +710,9 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
|
|||||||
for p in projects['repositories']:
|
for p in projects['repositories']:
|
||||||
repo = p['repository']
|
repo = p['repository']
|
||||||
branch = p['branch']
|
branch = p['branch']
|
||||||
|
depth = '1'
|
||||||
|
if 'depth' in p.keys():
|
||||||
|
depth = p['depth']
|
||||||
if p['name'] == 'requirements':
|
if p['name'] == 'requirements':
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||||
parent_dir, http_proxy,
|
parent_dir, http_proxy,
|
||||||
@ -611,19 +757,13 @@ def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
|
|||||||
"""
|
"""
|
||||||
Clone and install a single git repository.
|
Clone and install a single git repository.
|
||||||
"""
|
"""
|
||||||
dest_dir = os.path.join(parent_dir, os.path.basename(repo))
|
|
||||||
|
|
||||||
if not os.path.exists(parent_dir):
|
if not os.path.exists(parent_dir):
|
||||||
juju_log('Directory already exists at {}. '
|
juju_log('Directory already exists at {}. '
|
||||||
'No need to create directory.'.format(parent_dir))
|
'No need to create directory.'.format(parent_dir))
|
||||||
os.mkdir(parent_dir)
|
os.mkdir(parent_dir)
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
repo_dir = install_remote(repo, dest=parent_dir, branch=branch, depth=depth)
|
||||||
repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
|
|
||||||
depth=depth)
|
|
||||||
else:
|
|
||||||
repo_dir = dest_dir
|
|
||||||
|
|
||||||
venv = os.path.join(parent_dir, 'venv')
|
venv = os.path.join(parent_dir, 'venv')
|
||||||
|
|
||||||
@ -704,3 +844,304 @@ def git_yaml_value(projects_yaml, key):
|
|||||||
return projects[key]
|
return projects[key]
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def os_workload_status(configs, required_interfaces, charm_func=None):
|
||||||
|
"""
|
||||||
|
Decorator to set workload status based on complete contexts
|
||||||
|
"""
|
||||||
|
def wrap(f):
|
||||||
|
@wraps(f)
|
||||||
|
def wrapped_f(*args, **kwargs):
|
||||||
|
# Run the original function first
|
||||||
|
f(*args, **kwargs)
|
||||||
|
# Set workload status now that contexts have been
|
||||||
|
# acted on
|
||||||
|
set_os_workload_status(configs, required_interfaces, charm_func)
|
||||||
|
return wrapped_f
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None):
|
||||||
|
"""
|
||||||
|
Set workload status based on complete contexts.
|
||||||
|
status-set missing or incomplete contexts
|
||||||
|
and juju-log details of missing required data.
|
||||||
|
charm_func is a charm specific function to run checking
|
||||||
|
for charm specific requirements such as a VIP setting.
|
||||||
|
|
||||||
|
This function also checks for whether the services defined are ACTUALLY
|
||||||
|
running and that the ports they advertise are open and being listened to.
|
||||||
|
|
||||||
|
@param services - OPTIONAL: a [{'service': <string>, 'ports': [<int>]]
|
||||||
|
The ports are optional.
|
||||||
|
If services is a [<string>] then ports are ignored.
|
||||||
|
@param ports - OPTIONAL: an [<int>] representing ports that shoudl be
|
||||||
|
open.
|
||||||
|
@returns None
|
||||||
|
"""
|
||||||
|
incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
|
||||||
|
state = 'active'
|
||||||
|
missing_relations = []
|
||||||
|
incomplete_relations = []
|
||||||
|
message = None
|
||||||
|
charm_state = None
|
||||||
|
charm_message = None
|
||||||
|
|
||||||
|
for generic_interface in incomplete_rel_data.keys():
|
||||||
|
related_interface = None
|
||||||
|
missing_data = {}
|
||||||
|
# Related or not?
|
||||||
|
for interface in incomplete_rel_data[generic_interface]:
|
||||||
|
if incomplete_rel_data[generic_interface][interface].get('related'):
|
||||||
|
related_interface = interface
|
||||||
|
missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
|
||||||
|
# No relation ID for the generic_interface
|
||||||
|
if not related_interface:
|
||||||
|
juju_log("{} relation is missing and must be related for "
|
||||||
|
"functionality. ".format(generic_interface), 'WARN')
|
||||||
|
state = 'blocked'
|
||||||
|
if generic_interface not in missing_relations:
|
||||||
|
missing_relations.append(generic_interface)
|
||||||
|
else:
|
||||||
|
# Relation ID exists but no related unit
|
||||||
|
if not missing_data:
|
||||||
|
# Edge case relation ID exists but departing
|
||||||
|
if ('departed' in hook_name() or 'broken' in hook_name()) \
|
||||||
|
and related_interface in hook_name():
|
||||||
|
state = 'blocked'
|
||||||
|
if generic_interface not in missing_relations:
|
||||||
|
missing_relations.append(generic_interface)
|
||||||
|
juju_log("{} relation's interface, {}, "
|
||||||
|
"relationship is departed or broken "
|
||||||
|
"and is required for functionality."
|
||||||
|
"".format(generic_interface, related_interface), "WARN")
|
||||||
|
# Normal case relation ID exists but no related unit
|
||||||
|
# (joining)
|
||||||
|
else:
|
||||||
|
juju_log("{} relations's interface, {}, is related but has "
|
||||||
|
"no units in the relation."
|
||||||
|
"".format(generic_interface, related_interface), "INFO")
|
||||||
|
# Related unit exists and data missing on the relation
|
||||||
|
else:
|
||||||
|
juju_log("{} relation's interface, {}, is related awaiting "
|
||||||
|
"the following data from the relationship: {}. "
|
||||||
|
"".format(generic_interface, related_interface,
|
||||||
|
", ".join(missing_data)), "INFO")
|
||||||
|
if state != 'blocked':
|
||||||
|
state = 'waiting'
|
||||||
|
if generic_interface not in incomplete_relations \
|
||||||
|
and generic_interface not in missing_relations:
|
||||||
|
incomplete_relations.append(generic_interface)
|
||||||
|
|
||||||
|
if missing_relations:
|
||||||
|
message = "Missing relations: {}".format(", ".join(missing_relations))
|
||||||
|
if incomplete_relations:
|
||||||
|
message += "; incomplete relations: {}" \
|
||||||
|
"".format(", ".join(incomplete_relations))
|
||||||
|
state = 'blocked'
|
||||||
|
elif incomplete_relations:
|
||||||
|
message = "Incomplete relations: {}" \
|
||||||
|
"".format(", ".join(incomplete_relations))
|
||||||
|
state = 'waiting'
|
||||||
|
|
||||||
|
# Run charm specific checks
|
||||||
|
if charm_func:
|
||||||
|
charm_state, charm_message = charm_func(configs)
|
||||||
|
if charm_state != 'active' and charm_state != 'unknown':
|
||||||
|
state = workload_state_compare(state, charm_state)
|
||||||
|
if message:
|
||||||
|
charm_message = charm_message.replace("Incomplete relations: ",
|
||||||
|
"")
|
||||||
|
message = "{}, {}".format(message, charm_message)
|
||||||
|
else:
|
||||||
|
message = charm_message
|
||||||
|
|
||||||
|
# If the charm thinks the unit is active, check that the actual services
|
||||||
|
# really are active.
|
||||||
|
if services is not None and state == 'active':
|
||||||
|
# if we're passed the dict() then just grab the values as a list.
|
||||||
|
if isinstance(services, dict):
|
||||||
|
services = services.values()
|
||||||
|
# either extract the list of services from the dictionary, or if
|
||||||
|
# it is a simple string, use that. i.e. works with mixed lists.
|
||||||
|
_s = []
|
||||||
|
for s in services:
|
||||||
|
if isinstance(s, dict) and 'service' in s:
|
||||||
|
_s.append(s['service'])
|
||||||
|
if isinstance(s, str):
|
||||||
|
_s.append(s)
|
||||||
|
services_running = [service_running(s) for s in _s]
|
||||||
|
if not all(services_running):
|
||||||
|
not_running = [s for s, running in zip(_s, services_running)
|
||||||
|
if not running]
|
||||||
|
message = ("Services not running that should be: {}"
|
||||||
|
.format(", ".join(not_running)))
|
||||||
|
state = 'blocked'
|
||||||
|
# also verify that the ports that should be open are open
|
||||||
|
# NB, that ServiceManager objects only OPTIONALLY have ports
|
||||||
|
port_map = OrderedDict([(s['service'], s['ports'])
|
||||||
|
for s in services if 'ports' in s])
|
||||||
|
if state == 'active' and port_map:
|
||||||
|
all_ports = list(itertools.chain(*port_map.values()))
|
||||||
|
ports_open = [port_has_listener('0.0.0.0', p)
|
||||||
|
for p in all_ports]
|
||||||
|
if not all(ports_open):
|
||||||
|
not_opened = [p for p, opened in zip(all_ports, ports_open)
|
||||||
|
if not opened]
|
||||||
|
map_not_open = OrderedDict()
|
||||||
|
for service, ports in port_map.items():
|
||||||
|
closed_ports = set(ports).intersection(not_opened)
|
||||||
|
if closed_ports:
|
||||||
|
map_not_open[service] = closed_ports
|
||||||
|
# find which service has missing ports. They are in service
|
||||||
|
# order which makes it a bit easier.
|
||||||
|
message = (
|
||||||
|
"Services with ports not open that should be: {}"
|
||||||
|
.format(
|
||||||
|
", ".join([
|
||||||
|
"{}: [{}]".format(
|
||||||
|
service,
|
||||||
|
", ".join([str(v) for v in ports]))
|
||||||
|
for service, ports in map_not_open.items()])))
|
||||||
|
state = 'blocked'
|
||||||
|
|
||||||
|
if ports is not None and state == 'active':
|
||||||
|
# and we can also check ports which we don't know the service for
|
||||||
|
ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
|
||||||
|
if not all(ports_open):
|
||||||
|
message = (
|
||||||
|
"Ports which should be open, but are not: {}"
|
||||||
|
.format(", ".join([str(p) for p, v in zip(ports, ports_open)
|
||||||
|
if not v])))
|
||||||
|
state = 'blocked'
|
||||||
|
|
||||||
|
# Set to active if all requirements have been met
|
||||||
|
if state == 'active':
|
||||||
|
message = "Unit is ready"
|
||||||
|
juju_log(message, "INFO")
|
||||||
|
|
||||||
|
status_set(state, message)
|
||||||
|
|
||||||
|
|
||||||
|
def workload_state_compare(current_workload_state, workload_state):
|
||||||
|
""" Return highest priority of two states"""
|
||||||
|
hierarchy = {'unknown': -1,
|
||||||
|
'active': 0,
|
||||||
|
'maintenance': 1,
|
||||||
|
'waiting': 2,
|
||||||
|
'blocked': 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
if hierarchy.get(workload_state) is None:
|
||||||
|
workload_state = 'unknown'
|
||||||
|
if hierarchy.get(current_workload_state) is None:
|
||||||
|
current_workload_state = 'unknown'
|
||||||
|
|
||||||
|
# Set workload_state based on hierarchy of statuses
|
||||||
|
if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
|
||||||
|
return current_workload_state
|
||||||
|
else:
|
||||||
|
return workload_state
|
||||||
|
|
||||||
|
|
||||||
|
def incomplete_relation_data(configs, required_interfaces):
|
||||||
|
"""
|
||||||
|
Check complete contexts against required_interfaces
|
||||||
|
Return dictionary of incomplete relation data.
|
||||||
|
|
||||||
|
configs is an OSConfigRenderer object with configs registered
|
||||||
|
|
||||||
|
required_interfaces is a dictionary of required general interfaces
|
||||||
|
with dictionary values of possible specific interfaces.
|
||||||
|
Example:
|
||||||
|
required_interfaces = {'database': ['shared-db', 'pgsql-db']}
|
||||||
|
|
||||||
|
The interface is said to be satisfied if anyone of the interfaces in the
|
||||||
|
list has a complete context.
|
||||||
|
|
||||||
|
Return dictionary of incomplete or missing required contexts with relation
|
||||||
|
status of interfaces and any missing data points. Example:
|
||||||
|
{'message':
|
||||||
|
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
|
||||||
|
'zeromq-configuration': {'related': False}},
|
||||||
|
'identity':
|
||||||
|
{'identity-service': {'related': False}},
|
||||||
|
'database':
|
||||||
|
{'pgsql-db': {'related': False},
|
||||||
|
'shared-db': {'related': True}}}
|
||||||
|
"""
|
||||||
|
complete_ctxts = configs.complete_contexts()
|
||||||
|
incomplete_relations = []
|
||||||
|
for svc_type in required_interfaces.keys():
|
||||||
|
# Avoid duplicates
|
||||||
|
found_ctxt = False
|
||||||
|
for interface in required_interfaces[svc_type]:
|
||||||
|
if interface in complete_ctxts:
|
||||||
|
found_ctxt = True
|
||||||
|
if not found_ctxt:
|
||||||
|
incomplete_relations.append(svc_type)
|
||||||
|
incomplete_context_data = {}
|
||||||
|
for i in incomplete_relations:
|
||||||
|
incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
|
||||||
|
return incomplete_context_data
|
||||||
|
|
||||||
|
|
||||||
|
def do_action_openstack_upgrade(package, upgrade_callback, configs):
|
||||||
|
"""Perform action-managed OpenStack upgrade.
|
||||||
|
|
||||||
|
Upgrades packages to the configured openstack-origin version and sets
|
||||||
|
the corresponding action status as a result.
|
||||||
|
|
||||||
|
If the charm was installed from source we cannot upgrade it.
|
||||||
|
For backwards compatibility a config flag (action-managed-upgrade) must
|
||||||
|
be set for this code to run, otherwise a full service level upgrade will
|
||||||
|
fire on config-changed.
|
||||||
|
|
||||||
|
@param package: package name for determining if upgrade available
|
||||||
|
@param upgrade_callback: function callback to charm's upgrade function
|
||||||
|
@param configs: templating object derived from OSConfigRenderer class
|
||||||
|
|
||||||
|
@return: True if upgrade successful; False if upgrade failed or skipped
|
||||||
|
"""
|
||||||
|
ret = False
|
||||||
|
|
||||||
|
if git_install_requested():
|
||||||
|
action_set({'outcome': 'installed from source, skipped upgrade.'})
|
||||||
|
else:
|
||||||
|
if openstack_upgrade_available(package):
|
||||||
|
if config('action-managed-upgrade'):
|
||||||
|
juju_log('Upgrading OpenStack release')
|
||||||
|
|
||||||
|
try:
|
||||||
|
upgrade_callback(configs=configs)
|
||||||
|
action_set({'outcome': 'success, upgrade completed.'})
|
||||||
|
ret = True
|
||||||
|
except:
|
||||||
|
action_set({'outcome': 'upgrade failed, see traceback.'})
|
||||||
|
action_set({'traceback': traceback.format_exc()})
|
||||||
|
action_fail('do_openstack_upgrade resulted in an '
|
||||||
|
'unexpected error')
|
||||||
|
else:
|
||||||
|
action_set({'outcome': 'action-managed-upgrade config is '
|
||||||
|
'False, skipped upgrade.'})
|
||||||
|
else:
|
||||||
|
action_set({'outcome': 'no upgrade available.'})
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def remote_restart(rel_name, remote_service=None):
|
||||||
|
trigger = {
|
||||||
|
'restart-trigger': str(uuid.uuid4()),
|
||||||
|
}
|
||||||
|
if remote_service:
|
||||||
|
trigger['remote-service'] = remote_service
|
||||||
|
for rid in relation_ids(rel_name):
|
||||||
|
# This subordinate can be related to two seperate services using
|
||||||
|
# different subordinate relations so only issue the restart if
|
||||||
|
# the principle is conencted down the relation we think it is
|
||||||
|
if related_units(relid=rid):
|
||||||
|
relation_set(relation_id=rid,
|
||||||
|
relation_settings=trigger,
|
||||||
|
)
|
||||||
|
@ -59,7 +59,7 @@ def some_hook():
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def leader_get(attribute=None):
|
def leader_get(attribute=None, rid=None):
|
||||||
"""Wrapper to ensure that settings are migrated from the peer relation.
|
"""Wrapper to ensure that settings are migrated from the peer relation.
|
||||||
|
|
||||||
This is to support upgrading an environment that does not support
|
This is to support upgrading an environment that does not support
|
||||||
@ -94,7 +94,8 @@ def leader_get(attribute=None):
|
|||||||
# If attribute not present in leader db, check if this unit has set
|
# If attribute not present in leader db, check if this unit has set
|
||||||
# the attribute in the peer relation
|
# the attribute in the peer relation
|
||||||
if not leader_settings:
|
if not leader_settings:
|
||||||
peer_setting = relation_get(attribute=attribute, unit=local_unit())
|
peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
|
||||||
|
rid=rid)
|
||||||
if peer_setting:
|
if peer_setting:
|
||||||
leader_set(settings={attribute: peer_setting})
|
leader_set(settings={attribute: peer_setting})
|
||||||
leader_settings = peer_setting
|
leader_settings = peer_setting
|
||||||
@ -103,7 +104,7 @@ def leader_get(attribute=None):
|
|||||||
settings_migrated = True
|
settings_migrated = True
|
||||||
migrated.add(attribute)
|
migrated.add(attribute)
|
||||||
else:
|
else:
|
||||||
r_settings = relation_get(unit=local_unit())
|
r_settings = _relation_get(unit=local_unit(), rid=rid)
|
||||||
if r_settings:
|
if r_settings:
|
||||||
for key in set(r_settings.keys()).difference(migrated):
|
for key in set(r_settings.keys()).difference(migrated):
|
||||||
# Leader setting wins
|
# Leader setting wins
|
||||||
@ -151,7 +152,7 @@ def relation_get(attribute=None, unit=None, rid=None):
|
|||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if rid in relation_ids('cluster'):
|
if rid in relation_ids('cluster'):
|
||||||
return leader_get(attribute)
|
return leader_get(attribute, rid)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
|
@ -19,20 +19,35 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import charm_dir, log
|
from charmhelpers.core.hookenv import charm_dir, log
|
||||||
|
|
||||||
try:
|
|
||||||
from pip import main as pip_execute
|
|
||||||
except ImportError:
|
|
||||||
apt_update()
|
|
||||||
apt_install('python-pip')
|
|
||||||
from pip import main as pip_execute
|
|
||||||
|
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
|
||||||
|
def pip_execute(*args, **kwargs):
|
||||||
|
"""Overriden pip_execute() to stop sys.path being changed.
|
||||||
|
|
||||||
|
The act of importing main from the pip module seems to cause add wheels
|
||||||
|
from the /usr/share/python-wheels which are installed by various tools.
|
||||||
|
This function ensures that sys.path remains the same after the call is
|
||||||
|
executed.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
_path = sys.path
|
||||||
|
try:
|
||||||
|
from pip import main as _pip_execute
|
||||||
|
except ImportError:
|
||||||
|
apt_update()
|
||||||
|
apt_install('python-pip')
|
||||||
|
from pip import main as _pip_execute
|
||||||
|
_pip_execute(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
sys.path = _path
|
||||||
|
|
||||||
|
|
||||||
def parse_options(given, available):
|
def parse_options(given, available):
|
||||||
"""Given a set of options, check if available"""
|
"""Given a set of options, check if available"""
|
||||||
for key, value in sorted(given.items()):
|
for key, value in sorted(given.items()):
|
||||||
@ -42,8 +57,12 @@ def parse_options(given, available):
|
|||||||
yield "--{0}={1}".format(key, value)
|
yield "--{0}={1}".format(key, value)
|
||||||
|
|
||||||
|
|
||||||
def pip_install_requirements(requirements, **options):
|
def pip_install_requirements(requirements, constraints=None, **options):
|
||||||
"""Install a requirements file """
|
"""Install a requirements file.
|
||||||
|
|
||||||
|
:param constraints: Path to pip constraints file.
|
||||||
|
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
|
||||||
|
"""
|
||||||
command = ["install"]
|
command = ["install"]
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', )
|
available_options = ('proxy', 'src', 'log', )
|
||||||
@ -51,8 +70,13 @@ def pip_install_requirements(requirements, **options):
|
|||||||
command.append(option)
|
command.append(option)
|
||||||
|
|
||||||
command.append("-r {0}".format(requirements))
|
command.append("-r {0}".format(requirements))
|
||||||
log("Installing from file: {} with options: {}".format(requirements,
|
if constraints:
|
||||||
command))
|
command.append("-c {0}".format(constraints))
|
||||||
|
log("Installing from file: {} with constraints {} "
|
||||||
|
"and options: {}".format(requirements, constraints, command))
|
||||||
|
else:
|
||||||
|
log("Installing from file: {} with options: {}".format(requirements,
|
||||||
|
command))
|
||||||
pip_execute(command)
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,11 +23,14 @@
|
|||||||
# James Page <james.page@ubuntu.com>
|
# James Page <james.page@ubuntu.com>
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
#
|
#
|
||||||
|
import bisect
|
||||||
|
import six
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
|
import uuid
|
||||||
|
|
||||||
from subprocess import (
|
from subprocess import (
|
||||||
check_call,
|
check_call,
|
||||||
@ -35,8 +38,10 @@ from subprocess import (
|
|||||||
CalledProcessError,
|
CalledProcessError,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
|
local_unit,
|
||||||
relation_get,
|
relation_get,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
|
relation_set,
|
||||||
related_units,
|
related_units,
|
||||||
log,
|
log,
|
||||||
DEBUG,
|
DEBUG,
|
||||||
@ -56,6 +61,8 @@ from charmhelpers.fetch import (
|
|||||||
apt_install,
|
apt_install,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.kernel import modprobe
|
||||||
|
|
||||||
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
||||||
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||||
|
|
||||||
@ -67,6 +74,420 @@ log to syslog = {use_syslog}
|
|||||||
err to syslog = {use_syslog}
|
err to syslog = {use_syslog}
|
||||||
clog to syslog = {use_syslog}
|
clog to syslog = {use_syslog}
|
||||||
"""
|
"""
|
||||||
|
# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
|
||||||
|
powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
|
||||||
|
|
||||||
|
|
||||||
|
def validator(value, valid_type, valid_range=None):
|
||||||
|
"""
|
||||||
|
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
||||||
|
Example input:
|
||||||
|
validator(value=1,
|
||||||
|
valid_type=int,
|
||||||
|
valid_range=[0, 2])
|
||||||
|
This says I'm testing value=1. It must be an int inclusive in [0,2]
|
||||||
|
|
||||||
|
:param value: The value to validate
|
||||||
|
:param valid_type: The type that value should be.
|
||||||
|
:param valid_range: A range of values that value can assume.
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
assert isinstance(value, valid_type), "{} is not a {}".format(
|
||||||
|
value,
|
||||||
|
valid_type)
|
||||||
|
if valid_range is not None:
|
||||||
|
assert isinstance(valid_range, list), \
|
||||||
|
"valid_range must be a list, was given {}".format(valid_range)
|
||||||
|
# If we're dealing with strings
|
||||||
|
if valid_type is six.string_types:
|
||||||
|
assert value in valid_range, \
|
||||||
|
"{} is not in the list {}".format(value, valid_range)
|
||||||
|
# Integer, float should have a min and max
|
||||||
|
else:
|
||||||
|
if len(valid_range) != 2:
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid valid_range list of {} for {}. "
|
||||||
|
"List must be [min,max]".format(valid_range, value))
|
||||||
|
assert value >= valid_range[0], \
|
||||||
|
"{} is less than minimum allowed value of {}".format(
|
||||||
|
value, valid_range[0])
|
||||||
|
assert value <= valid_range[1], \
|
||||||
|
"{} is greater than maximum allowed value of {}".format(
|
||||||
|
value, valid_range[1])
|
||||||
|
|
||||||
|
|
||||||
|
class PoolCreationError(Exception):
|
||||||
|
"""
|
||||||
|
A custom error to inform the caller that a pool creation failed. Provides an error message
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, message):
|
||||||
|
super(PoolCreationError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class Pool(object):
|
||||||
|
"""
|
||||||
|
An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
|
||||||
|
Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, service, name):
|
||||||
|
self.service = service
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
# Create the pool if it doesn't exist already
|
||||||
|
# To be implemented by subclasses
|
||||||
|
def create(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def add_cache_tier(self, cache_pool, mode):
|
||||||
|
"""
|
||||||
|
Adds a new cache tier to an existing pool.
|
||||||
|
:param cache_pool: six.string_types. The cache tier pool name to add.
|
||||||
|
:param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# Check the input types and values
|
||||||
|
validator(value=cache_pool, valid_type=six.string_types)
|
||||||
|
validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
|
||||||
|
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
|
||||||
|
|
||||||
|
def remove_cache_tier(self, cache_pool):
|
||||||
|
"""
|
||||||
|
Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
|
||||||
|
:param cache_pool: six.string_types. The cache tier pool name to remove.
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# read-only is easy, writeback is much harder
|
||||||
|
mode = get_cache_mode(cache_pool)
|
||||||
|
if mode == 'readonly':
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
|
elif mode == 'writeback':
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
|
||||||
|
# Flush the cache and wait for it to return
|
||||||
|
check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
|
def get_pgs(self, pool_size):
|
||||||
|
"""
|
||||||
|
:param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
|
||||||
|
erasure coded pools
|
||||||
|
:return: int. The number of pgs to use.
|
||||||
|
"""
|
||||||
|
validator(value=pool_size, valid_type=int)
|
||||||
|
osd_list = get_osds(self.service)
|
||||||
|
if not osd_list:
|
||||||
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
|
# which don't support OSD query from cli
|
||||||
|
return 200
|
||||||
|
|
||||||
|
osd_list_length = len(osd_list)
|
||||||
|
# Calculate based on Ceph best practices
|
||||||
|
if osd_list_length < 5:
|
||||||
|
return 128
|
||||||
|
elif 5 < osd_list_length < 10:
|
||||||
|
return 512
|
||||||
|
elif 10 < osd_list_length < 50:
|
||||||
|
return 4096
|
||||||
|
else:
|
||||||
|
estimate = (osd_list_length * 100) / pool_size
|
||||||
|
# Return the next nearest power of 2
|
||||||
|
index = bisect.bisect_right(powers_of_two, estimate)
|
||||||
|
return powers_of_two[index]
|
||||||
|
|
||||||
|
|
||||||
|
class ReplicatedPool(Pool):
|
||||||
|
def __init__(self, service, name, pg_num=None, replicas=2):
|
||||||
|
super(ReplicatedPool, self).__init__(service=service, name=name)
|
||||||
|
self.replicas = replicas
|
||||||
|
if pg_num is None:
|
||||||
|
self.pg_num = self.get_pgs(self.replicas)
|
||||||
|
else:
|
||||||
|
self.pg_num = pg_num
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if not pool_exists(self.service, self.name):
|
||||||
|
# Create it
|
||||||
|
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
|
||||||
|
self.name, str(self.pg_num)]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# Default jerasure erasure coded pool
|
||||||
|
class ErasurePool(Pool):
|
||||||
|
def __init__(self, service, name, erasure_code_profile="default"):
|
||||||
|
super(ErasurePool, self).__init__(service=service, name=name)
|
||||||
|
self.erasure_code_profile = erasure_code_profile
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if not pool_exists(self.service, self.name):
|
||||||
|
# Try to find the erasure profile information so we can properly size the pgs
|
||||||
|
erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
|
||||||
|
|
||||||
|
# Check for errors
|
||||||
|
if erasure_profile is None:
|
||||||
|
log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
|
||||||
|
level=ERROR)
|
||||||
|
raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
|
||||||
|
if 'k' not in erasure_profile or 'm' not in erasure_profile:
|
||||||
|
# Error
|
||||||
|
log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
|
||||||
|
level=ERROR)
|
||||||
|
raise PoolCreationError(
|
||||||
|
message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
|
||||||
|
|
||||||
|
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
|
||||||
|
# Create it
|
||||||
|
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
|
||||||
|
'erasure', self.erasure_code_profile]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
"""Get an existing erasure code profile if it already exists.
|
||||||
|
Returns json formatted output"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_erasure_profile(service, name):
|
||||||
|
"""
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param name:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
out = check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'erasure-code-profile', 'get',
|
||||||
|
name, '--format=json'])
|
||||||
|
return json.loads(out)
|
||||||
|
except (CalledProcessError, OSError, ValueError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def pool_set(service, pool_name, key, value):
|
||||||
|
"""
|
||||||
|
Sets a value for a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param key: six.string_types
|
||||||
|
:param value:
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def snapshot_pool(service, pool_name, snapshot_name):
|
||||||
|
"""
|
||||||
|
Snapshots a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param snapshot_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_pool_snapshot(service, pool_name, snapshot_name):
|
||||||
|
"""
|
||||||
|
Remove a snapshot from a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param snapshot_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# max_bytes should be an int or long
|
||||||
|
def set_pool_quota(service, pool_name, max_bytes):
|
||||||
|
"""
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param max_bytes: int or long
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
# Set a byte quota on a RADOS pool in ceph.
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
|
||||||
|
'max_bytes', str(max_bytes)]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_pool_quota(service, pool_name):
|
||||||
|
"""
|
||||||
|
Set a byte quota on a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_erasure_profile(service, profile_name):
|
||||||
|
"""
|
||||||
|
Create a new erasure code profile if one does not already exist for it. Updates
|
||||||
|
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
||||||
|
for more details
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param profile_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
|
||||||
|
profile_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
|
||||||
|
failure_domain='host',
|
||||||
|
data_chunks=2, coding_chunks=1,
|
||||||
|
locality=None, durability_estimator=None):
|
||||||
|
"""
|
||||||
|
Create a new erasure code profile if one does not already exist for it. Updates
|
||||||
|
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
||||||
|
for more details
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param profile_name: six.string_types
|
||||||
|
:param erasure_plugin_name: six.string_types
|
||||||
|
:param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
|
||||||
|
'room', 'root', 'row'])
|
||||||
|
:param data_chunks: int
|
||||||
|
:param coding_chunks: int
|
||||||
|
:param locality: int
|
||||||
|
:param durability_estimator: int
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
# Ensure this failure_domain is allowed by Ceph
|
||||||
|
validator(failure_domain, six.string_types,
|
||||||
|
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
|
||||||
|
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
|
||||||
|
'ruleset_failure_domain=' + failure_domain]
|
||||||
|
if locality is not None and durability_estimator is not None:
|
||||||
|
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||||
|
|
||||||
|
# Add plugin specific information
|
||||||
|
if locality is not None:
|
||||||
|
# For local erasure codes
|
||||||
|
cmd.append('l=' + str(locality))
|
||||||
|
if durability_estimator is not None:
|
||||||
|
# For Shec erasure codes
|
||||||
|
cmd.append('c=' + str(durability_estimator))
|
||||||
|
|
||||||
|
if erasure_profile_exists(service, profile_name):
|
||||||
|
cmd.append('--force')
|
||||||
|
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def rename_pool(service, old_name, new_name):
|
||||||
|
"""
|
||||||
|
Rename a Ceph pool from old_name to new_name
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param old_name: six.string_types
|
||||||
|
:param new_name: six.string_types
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
validator(value=old_name, valid_type=six.string_types)
|
||||||
|
validator(value=new_name, valid_type=six.string_types)
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def erasure_profile_exists(service, name):
|
||||||
|
"""
|
||||||
|
Check to see if an Erasure code profile already exists.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param name: six.string_types
|
||||||
|
:return: int or None
|
||||||
|
"""
|
||||||
|
validator(value=name, valid_type=six.string_types)
|
||||||
|
try:
|
||||||
|
check_call(['ceph', '--id', service,
|
||||||
|
'osd', 'erasure-code-profile', 'get',
|
||||||
|
name])
|
||||||
|
return True
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_cache_mode(service, pool_name):
|
||||||
|
"""
|
||||||
|
Find the current caching mode of the pool_name given.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:return: int or None
|
||||||
|
"""
|
||||||
|
validator(value=service, valid_type=six.string_types)
|
||||||
|
validator(value=pool_name, valid_type=six.string_types)
|
||||||
|
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
|
||||||
|
try:
|
||||||
|
osd_json = json.loads(out)
|
||||||
|
for pool in osd_json['pools']:
|
||||||
|
if pool['pool_name'] == pool_name:
|
||||||
|
return pool['cache_mode']
|
||||||
|
return None
|
||||||
|
except ValueError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def pool_exists(service, name):
|
||||||
|
"""Check to see if a RADOS pool already exists."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rados', '--id', service,
|
||||||
|
'lspools']).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def get_osds(service):
|
||||||
|
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||||
|
cluster.
|
||||||
|
"""
|
||||||
|
version = ceph_version()
|
||||||
|
if version and version >= '0.56':
|
||||||
|
return json.loads(check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'ls',
|
||||||
|
'--format=json']).decode('UTF-8'))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def install():
|
def install():
|
||||||
@ -96,53 +517,37 @@ def create_rbd_image(service, pool, image, sizemb):
|
|||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def pool_exists(service, name):
|
def update_pool(client, pool, settings):
|
||||||
"""Check to see if a RADOS pool already exists."""
|
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
|
||||||
try:
|
for k, v in six.iteritems(settings):
|
||||||
out = check_output(['rados', '--id', service,
|
cmd.append(k)
|
||||||
'lspools']).decode('UTF-8')
|
cmd.append(v)
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return name in out
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def get_osds(service):
|
def create_pool(service, name, replicas=3, pg_num=None):
|
||||||
"""Return a list of all Ceph Object Storage Daemons currently in the
|
|
||||||
cluster.
|
|
||||||
"""
|
|
||||||
version = ceph_version()
|
|
||||||
if version and version >= '0.56':
|
|
||||||
return json.loads(check_output(['ceph', '--id', service,
|
|
||||||
'osd', 'ls',
|
|
||||||
'--format=json']).decode('UTF-8'))
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def create_pool(service, name, replicas=3):
|
|
||||||
"""Create a new RADOS pool."""
|
"""Create a new RADOS pool."""
|
||||||
if pool_exists(service, name):
|
if pool_exists(service, name):
|
||||||
log("Ceph pool {} already exists, skipping creation".format(name),
|
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||||
level=WARNING)
|
level=WARNING)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Calculate the number of placement groups based
|
if not pg_num:
|
||||||
# on upstream recommended best practices.
|
# Calculate the number of placement groups based
|
||||||
osds = get_osds(service)
|
# on upstream recommended best practices.
|
||||||
if osds:
|
osds = get_osds(service)
|
||||||
pgnum = (len(osds) * 100 // replicas)
|
if osds:
|
||||||
else:
|
pg_num = (len(osds) * 100 // replicas)
|
||||||
# NOTE(james-page): Default to 200 for older ceph versions
|
else:
|
||||||
# which don't support OSD query from cli
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
pgnum = 200
|
# which don't support OSD query from cli
|
||||||
|
pg_num = 200
|
||||||
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
|
update_pool(service, name, settings={'size': str(replicas)})
|
||||||
str(replicas)]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_pool(service, name):
|
def delete_pool(service, name):
|
||||||
@ -197,10 +602,10 @@ def create_key_file(service, key):
|
|||||||
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
def get_ceph_nodes():
|
def get_ceph_nodes(relation='ceph'):
|
||||||
"""Query named relation 'ceph' to determine current nodes."""
|
"""Query named relation to determine current nodes."""
|
||||||
hosts = []
|
hosts = []
|
||||||
for r_id in relation_ids('ceph'):
|
for r_id in relation_ids(relation):
|
||||||
for unit in related_units(r_id):
|
for unit in related_units(r_id):
|
||||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||||
|
|
||||||
@ -288,17 +693,6 @@ def place_data_on_block_device(blk_device, data_src_dst):
|
|||||||
os.chown(data_src_dst, uid, gid)
|
os.chown(data_src_dst, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
# TODO: re-use
|
|
||||||
def modprobe(module):
|
|
||||||
"""Load a kernel module and configure for auto-load on reboot."""
|
|
||||||
log('Loading kernel module', level=INFO)
|
|
||||||
cmd = ['modprobe', module]
|
|
||||||
check_call(cmd)
|
|
||||||
with open('/etc/modules', 'r+') as modules:
|
|
||||||
if module not in modules.read():
|
|
||||||
modules.write(module)
|
|
||||||
|
|
||||||
|
|
||||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||||
"""Copy files from src to dst."""
|
"""Copy files from src to dst."""
|
||||||
for item in os.listdir(src):
|
for item in os.listdir(src):
|
||||||
@ -363,14 +757,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
|||||||
service_start(svc)
|
service_start(svc)
|
||||||
|
|
||||||
|
|
||||||
def ensure_ceph_keyring(service, user=None, group=None):
|
def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
|
||||||
"""Ensures a ceph keyring is created for a named service and optionally
|
"""Ensures a ceph keyring is created for a named service and optionally
|
||||||
ensures user and group ownership.
|
ensures user and group ownership.
|
||||||
|
|
||||||
Returns False if no ceph key is available in relation state.
|
Returns False if no ceph key is available in relation state.
|
||||||
"""
|
"""
|
||||||
key = None
|
key = None
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids(relation):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
key = relation_get('key', rid=rid, unit=unit)
|
key = relation_get('key', rid=rid, unit=unit)
|
||||||
if key:
|
if key:
|
||||||
@ -411,17 +805,60 @@ class CephBrokerRq(object):
|
|||||||
|
|
||||||
The API is versioned and defaults to version 1.
|
The API is versioned and defaults to version 1.
|
||||||
"""
|
"""
|
||||||
def __init__(self, api_version=1):
|
|
||||||
|
def __init__(self, api_version=1, request_id=None):
|
||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
|
if request_id:
|
||||||
|
self.request_id = request_id
|
||||||
|
else:
|
||||||
|
self.request_id = str(uuid.uuid1())
|
||||||
self.ops = []
|
self.ops = []
|
||||||
|
|
||||||
def add_op_create_pool(self, name, replica_count=3):
|
def add_op_create_pool(self, name, replica_count=3, pg_num=None):
|
||||||
|
"""Adds an operation to create a pool.
|
||||||
|
|
||||||
|
@param pg_num setting: optional setting. If not provided, this value
|
||||||
|
will be calculated by the broker based on how many OSDs are in the
|
||||||
|
cluster at the time of creation. Note that, if provided, this value
|
||||||
|
will be capped at the current available maximum.
|
||||||
|
"""
|
||||||
self.ops.append({'op': 'create-pool', 'name': name,
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
'replicas': replica_count})
|
'replicas': replica_count, 'pg_num': pg_num})
|
||||||
|
|
||||||
|
def set_ops(self, ops):
|
||||||
|
"""Set request ops to provided value.
|
||||||
|
|
||||||
|
Useful for injecting ops that come from a previous request
|
||||||
|
to allow comparisons to ensure validity.
|
||||||
|
"""
|
||||||
|
self.ops = ops
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def request(self):
|
def request(self):
|
||||||
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
|
return json.dumps({'api-version': self.api_version, 'ops': self.ops,
|
||||||
|
'request-id': self.request_id})
|
||||||
|
|
||||||
|
def _ops_equal(self, other):
|
||||||
|
if len(self.ops) == len(other.ops):
|
||||||
|
for req_no in range(0, len(self.ops)):
|
||||||
|
for key in ['replicas', 'name', 'op', 'pg_num']:
|
||||||
|
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, self.__class__):
|
||||||
|
return False
|
||||||
|
if self.api_version == other.api_version and \
|
||||||
|
self._ops_equal(other):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def __ne__(self, other):
|
||||||
|
return not self.__eq__(other)
|
||||||
|
|
||||||
|
|
||||||
class CephBrokerRsp(object):
|
class CephBrokerRsp(object):
|
||||||
@ -431,10 +868,15 @@ class CephBrokerRsp(object):
|
|||||||
|
|
||||||
The API is versioned and defaults to version 1.
|
The API is versioned and defaults to version 1.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, encoded_rsp):
|
def __init__(self, encoded_rsp):
|
||||||
self.api_version = None
|
self.api_version = None
|
||||||
self.rsp = json.loads(encoded_rsp)
|
self.rsp = json.loads(encoded_rsp)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def request_id(self):
|
||||||
|
return self.rsp.get('request-id')
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def exit_code(self):
|
def exit_code(self):
|
||||||
return self.rsp.get('exit-code')
|
return self.rsp.get('exit-code')
|
||||||
@ -442,3 +884,182 @@ class CephBrokerRsp(object):
|
|||||||
@property
|
@property
|
||||||
def exit_msg(self):
|
def exit_msg(self):
|
||||||
return self.rsp.get('stderr')
|
return self.rsp.get('stderr')
|
||||||
|
|
||||||
|
|
||||||
|
# Ceph Broker Conversation:
|
||||||
|
# If a charm needs an action to be taken by ceph it can create a CephBrokerRq
|
||||||
|
# and send that request to ceph via the ceph relation. The CephBrokerRq has a
|
||||||
|
# unique id so that the client can identity which CephBrokerRsp is associated
|
||||||
|
# with the request. Ceph will also respond to each client unit individually
|
||||||
|
# creating a response key per client unit eg glance/0 will get a CephBrokerRsp
|
||||||
|
# via key broker-rsp-glance-0
|
||||||
|
#
|
||||||
|
# To use this the charm can just do something like:
|
||||||
|
#
|
||||||
|
# from charmhelpers.contrib.storage.linux.ceph import (
|
||||||
|
# send_request_if_needed,
|
||||||
|
# is_request_complete,
|
||||||
|
# CephBrokerRq,
|
||||||
|
# )
|
||||||
|
#
|
||||||
|
# @hooks.hook('ceph-relation-changed')
|
||||||
|
# def ceph_changed():
|
||||||
|
# rq = CephBrokerRq()
|
||||||
|
# rq.add_op_create_pool(name='poolname', replica_count=3)
|
||||||
|
#
|
||||||
|
# if is_request_complete(rq):
|
||||||
|
# <Request complete actions>
|
||||||
|
# else:
|
||||||
|
# send_request_if_needed(get_ceph_request())
|
||||||
|
#
|
||||||
|
# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example
|
||||||
|
# of glance having sent a request to ceph which ceph has successfully processed
|
||||||
|
# 'ceph:8': {
|
||||||
|
# 'ceph/0': {
|
||||||
|
# 'auth': 'cephx',
|
||||||
|
# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}',
|
||||||
|
# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}',
|
||||||
|
# 'ceph-public-address': '10.5.44.103',
|
||||||
|
# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==',
|
||||||
|
# 'private-address': '10.5.44.103',
|
||||||
|
# },
|
||||||
|
# 'glance/0': {
|
||||||
|
# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", '
|
||||||
|
# '"ops": [{"replicas": 3, "name": "glance", '
|
||||||
|
# '"op": "create-pool"}]}'),
|
||||||
|
# 'private-address': '10.5.44.109',
|
||||||
|
# },
|
||||||
|
# }
|
||||||
|
|
||||||
|
def get_previous_request(rid):
|
||||||
|
"""Return the last ceph broker request sent on a given relation
|
||||||
|
|
||||||
|
@param rid: Relation id to query for request
|
||||||
|
"""
|
||||||
|
request = None
|
||||||
|
broker_req = relation_get(attribute='broker_req', rid=rid,
|
||||||
|
unit=local_unit())
|
||||||
|
if broker_req:
|
||||||
|
request_data = json.loads(broker_req)
|
||||||
|
request = CephBrokerRq(api_version=request_data['api-version'],
|
||||||
|
request_id=request_data['request-id'])
|
||||||
|
request.set_ops(request_data['ops'])
|
||||||
|
|
||||||
|
return request
|
||||||
|
|
||||||
|
|
||||||
|
def get_request_states(request, relation='ceph'):
|
||||||
|
"""Return a dict of requests per relation id with their corresponding
|
||||||
|
completion state.
|
||||||
|
|
||||||
|
This allows a charm, which has a request for ceph, to see whether there is
|
||||||
|
an equivalent request already being processed and if so what state that
|
||||||
|
request is in.
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
"""
|
||||||
|
complete = []
|
||||||
|
requests = {}
|
||||||
|
for rid in relation_ids(relation):
|
||||||
|
complete = False
|
||||||
|
previous_request = get_previous_request(rid)
|
||||||
|
if request == previous_request:
|
||||||
|
sent = True
|
||||||
|
complete = is_request_complete_for_rid(previous_request, rid)
|
||||||
|
else:
|
||||||
|
sent = False
|
||||||
|
complete = False
|
||||||
|
|
||||||
|
requests[rid] = {
|
||||||
|
'sent': sent,
|
||||||
|
'complete': complete,
|
||||||
|
}
|
||||||
|
|
||||||
|
return requests
|
||||||
|
|
||||||
|
|
||||||
|
def is_request_sent(request, relation='ceph'):
|
||||||
|
"""Check to see if a functionally equivalent request has already been sent
|
||||||
|
|
||||||
|
Returns True if a similair request has been sent
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
"""
|
||||||
|
states = get_request_states(request, relation=relation)
|
||||||
|
for rid in states.keys():
|
||||||
|
if not states[rid]['sent']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def is_request_complete(request, relation='ceph'):
|
||||||
|
"""Check to see if a functionally equivalent request has already been
|
||||||
|
completed
|
||||||
|
|
||||||
|
Returns True if a similair request has been completed
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
"""
|
||||||
|
states = get_request_states(request, relation=relation)
|
||||||
|
for rid in states.keys():
|
||||||
|
if not states[rid]['complete']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def is_request_complete_for_rid(request, rid):
|
||||||
|
"""Check if a given request has been completed on the given relation
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
@param rid: Relation ID
|
||||||
|
"""
|
||||||
|
broker_key = get_broker_rsp_key()
|
||||||
|
for unit in related_units(rid):
|
||||||
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
|
if rdata.get(broker_key):
|
||||||
|
rsp = CephBrokerRsp(rdata.get(broker_key))
|
||||||
|
if rsp.request_id == request.request_id:
|
||||||
|
if not rsp.exit_code:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
# The remote unit sent no reply targeted at this unit so either the
|
||||||
|
# remote ceph cluster does not support unit targeted replies or it
|
||||||
|
# has not processed our request yet.
|
||||||
|
if rdata.get('broker_rsp'):
|
||||||
|
request_data = json.loads(rdata['broker_rsp'])
|
||||||
|
if request_data.get('request-id'):
|
||||||
|
log('Ignoring legacy broker_rsp without unit key as remote '
|
||||||
|
'service supports unit specific replies', level=DEBUG)
|
||||||
|
else:
|
||||||
|
log('Using legacy broker_rsp as remote service does not '
|
||||||
|
'supports unit specific replies', level=DEBUG)
|
||||||
|
rsp = CephBrokerRsp(rdata['broker_rsp'])
|
||||||
|
if not rsp.exit_code:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_broker_rsp_key():
|
||||||
|
"""Return broker response key for this unit
|
||||||
|
|
||||||
|
This is the key that ceph is going to use to pass request status
|
||||||
|
information back to this unit
|
||||||
|
"""
|
||||||
|
return 'broker-rsp-' + local_unit().replace('/', '-')
|
||||||
|
|
||||||
|
|
||||||
|
def send_request_if_needed(request, relation='ceph'):
|
||||||
|
"""Send broker request if an equivalent request has not already been sent
|
||||||
|
|
||||||
|
@param request: A CephBrokerRq object
|
||||||
|
"""
|
||||||
|
if is_request_sent(request, relation=relation):
|
||||||
|
log('Request already sent but not complete, not sending new request',
|
||||||
|
level=DEBUG)
|
||||||
|
else:
|
||||||
|
for rid in relation_ids(relation):
|
||||||
|
log('Sending request {}'.format(request.request_id), level=DEBUG)
|
||||||
|
relation_set(relation_id=rid, broker_req=request.request)
|
||||||
|
@ -76,3 +76,13 @@ def ensure_loopback_device(path, size):
|
|||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
return create_loopback(path)
|
return create_loopback(path)
|
||||||
|
|
||||||
|
|
||||||
|
def is_mapped_loopback_device(device):
|
||||||
|
"""
|
||||||
|
Checks if a given device name is an existing/mapped loopback device.
|
||||||
|
:param device: str: Full path to the device (eg, /dev/loop1).
|
||||||
|
:returns: str: Path to the backing file if is a loopback device
|
||||||
|
empty string otherwise
|
||||||
|
"""
|
||||||
|
return loopback_devices().get(device, "")
|
||||||
|
@ -43,9 +43,10 @@ def zap_disk(block_device):
|
|||||||
|
|
||||||
:param block_device: str: Full path of block device to clean.
|
:param block_device: str: Full path of block device to clean.
|
||||||
'''
|
'''
|
||||||
|
# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
|
||||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
call(['sgdisk', '--zap-all', '--', block_device])
|
||||||
'--clear', block_device])
|
call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
|
||||||
dev_end = check_output(['blockdev', '--getsz',
|
dev_end = check_output(['blockdev', '--getsz',
|
||||||
block_device]).decode('UTF-8')
|
block_device]).decode('UTF-8')
|
||||||
gpt_end = int(dev_end.split()[0]) - 100
|
gpt_end = int(dev_end.split()[0]) - 100
|
||||||
|
@ -18,14 +18,15 @@
|
|||||||
Templating using the python-jinja2 package.
|
Templating using the python-jinja2 package.
|
||||||
"""
|
"""
|
||||||
import six
|
import six
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
try:
|
try:
|
||||||
import jinja2
|
import jinja2
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
apt_update(fatal=True)
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
apt_install(["python3-jinja2"])
|
apt_install(["python3-jinja2"], fatal=True)
|
||||||
else:
|
else:
|
||||||
apt_install(["python-jinja2"])
|
apt_install(["python-jinja2"], fatal=True)
|
||||||
import jinja2
|
import jinja2
|
||||||
|
|
||||||
|
|
||||||
|
@ -74,6 +74,7 @@ def cached(func):
|
|||||||
res = func(*args, **kwargs)
|
res = func(*args, **kwargs)
|
||||||
cache[key] = res
|
cache[key] = res
|
||||||
return res
|
return res
|
||||||
|
wrapper._wrapped = func
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
@ -173,9 +174,19 @@ def relation_type():
|
|||||||
return os.environ.get('JUJU_RELATION', None)
|
return os.environ.get('JUJU_RELATION', None)
|
||||||
|
|
||||||
|
|
||||||
def relation_id():
|
@cached
|
||||||
"""The relation ID for the current relation hook"""
|
def relation_id(relation_name=None, service_or_unit=None):
|
||||||
return os.environ.get('JUJU_RELATION_ID', None)
|
"""The relation ID for the current or a specified relation"""
|
||||||
|
if not relation_name and not service_or_unit:
|
||||||
|
return os.environ.get('JUJU_RELATION_ID', None)
|
||||||
|
elif relation_name and service_or_unit:
|
||||||
|
service_name = service_or_unit.split('/')[0]
|
||||||
|
for relid in relation_ids(relation_name):
|
||||||
|
remote_service = remote_service_name(relid)
|
||||||
|
if remote_service == service_name:
|
||||||
|
return relid
|
||||||
|
else:
|
||||||
|
raise ValueError('Must specify neither or both of relation_name and service_or_unit')
|
||||||
|
|
||||||
|
|
||||||
def local_unit():
|
def local_unit():
|
||||||
@ -193,9 +204,20 @@ def service_name():
|
|||||||
return local_unit().split('/')[0]
|
return local_unit().split('/')[0]
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def remote_service_name(relid=None):
|
||||||
|
"""The remote service name for a given relation-id (or the current relation)"""
|
||||||
|
if relid is None:
|
||||||
|
unit = remote_unit()
|
||||||
|
else:
|
||||||
|
units = related_units(relid)
|
||||||
|
unit = units[0] if units else None
|
||||||
|
return unit.split('/')[0] if unit else None
|
||||||
|
|
||||||
|
|
||||||
def hook_name():
|
def hook_name():
|
||||||
"""The name of the currently executing hook"""
|
"""The name of the currently executing hook"""
|
||||||
return os.path.basename(sys.argv[0])
|
return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
|
||||||
|
|
||||||
|
|
||||||
class Config(dict):
|
class Config(dict):
|
||||||
@ -468,6 +490,76 @@ def relation_types():
|
|||||||
return rel_types
|
return rel_types
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def peer_relation_id():
|
||||||
|
'''Get the peers relation id if a peers relation has been joined, else None.'''
|
||||||
|
md = metadata()
|
||||||
|
section = md.get('peers')
|
||||||
|
if section:
|
||||||
|
for key in section:
|
||||||
|
relids = relation_ids(key)
|
||||||
|
if relids:
|
||||||
|
return relids[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_to_interface(relation_name):
|
||||||
|
"""
|
||||||
|
Given the name of a relation, return the interface that relation uses.
|
||||||
|
|
||||||
|
:returns: The interface name, or ``None``.
|
||||||
|
"""
|
||||||
|
return relation_to_role_and_interface(relation_name)[1]
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_to_role_and_interface(relation_name):
|
||||||
|
"""
|
||||||
|
Given the name of a relation, return the role and the name of the interface
|
||||||
|
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
|
||||||
|
|
||||||
|
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
|
||||||
|
"""
|
||||||
|
_metadata = metadata()
|
||||||
|
for role in ('provides', 'requires', 'peers'):
|
||||||
|
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
|
||||||
|
if interface:
|
||||||
|
return role, interface
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def role_and_interface_to_relations(role, interface_name):
|
||||||
|
"""
|
||||||
|
Given a role and interface name, return a list of relation names for the
|
||||||
|
current charm that use that interface under that role (where role is one
|
||||||
|
of ``provides``, ``requires``, or ``peers``).
|
||||||
|
|
||||||
|
:returns: A list of relation names.
|
||||||
|
"""
|
||||||
|
_metadata = metadata()
|
||||||
|
results = []
|
||||||
|
for relation_name, relation in _metadata.get(role, {}).items():
|
||||||
|
if relation['interface'] == interface_name:
|
||||||
|
results.append(relation_name)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def interface_to_relations(interface_name):
|
||||||
|
"""
|
||||||
|
Given an interface, return a list of relation names for the current
|
||||||
|
charm that use that interface.
|
||||||
|
|
||||||
|
:returns: A list of relation names.
|
||||||
|
"""
|
||||||
|
results = []
|
||||||
|
for role in ('provides', 'requires', 'peers'):
|
||||||
|
results.extend(role_and_interface_to_relations(role, interface_name))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def charm_name():
|
def charm_name():
|
||||||
"""Get the name of the current charm as is specified on metadata.yaml"""
|
"""Get the name of the current charm as is specified on metadata.yaml"""
|
||||||
@ -544,6 +636,38 @@ def unit_private_ip():
|
|||||||
return unit_get('private-address')
|
return unit_get('private-address')
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def storage_get(attribute=None, storage_id=None):
|
||||||
|
"""Get storage attributes"""
|
||||||
|
_args = ['storage-get', '--format=json']
|
||||||
|
if storage_id:
|
||||||
|
_args.extend(('-s', storage_id))
|
||||||
|
if attribute:
|
||||||
|
_args.append(attribute)
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def storage_list(storage_name=None):
|
||||||
|
"""List the storage IDs for the unit"""
|
||||||
|
_args = ['storage-list', '--format=json']
|
||||||
|
if storage_name:
|
||||||
|
_args.append(storage_name)
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
except OSError as e:
|
||||||
|
import errno
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
# storage-list does not exist
|
||||||
|
return []
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
class UnregisteredHookError(Exception):
|
class UnregisteredHookError(Exception):
|
||||||
"""Raised when an undefined hook is called"""
|
"""Raised when an undefined hook is called"""
|
||||||
pass
|
pass
|
||||||
@ -644,6 +768,21 @@ def action_fail(message):
|
|||||||
subprocess.check_call(['action-fail', message])
|
subprocess.check_call(['action-fail', message])
|
||||||
|
|
||||||
|
|
||||||
|
def action_name():
|
||||||
|
"""Get the name of the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_NAME')
|
||||||
|
|
||||||
|
|
||||||
|
def action_uuid():
|
||||||
|
"""Get the UUID of the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_UUID')
|
||||||
|
|
||||||
|
|
||||||
|
def action_tag():
|
||||||
|
"""Get the tag for the currently executing action."""
|
||||||
|
return os.environ.get('JUJU_ACTION_TAG')
|
||||||
|
|
||||||
|
|
||||||
def status_set(workload_state, message):
|
def status_set(workload_state, message):
|
||||||
"""Set the workload state with a message
|
"""Set the workload state with a message
|
||||||
|
|
||||||
@ -673,25 +812,28 @@ def status_set(workload_state, message):
|
|||||||
|
|
||||||
|
|
||||||
def status_get():
|
def status_get():
|
||||||
"""Retrieve the previously set juju workload state
|
"""Retrieve the previously set juju workload state and message
|
||||||
|
|
||||||
|
If the status-get command is not found then assume this is juju < 1.23 and
|
||||||
|
return 'unknown', ""
|
||||||
|
|
||||||
If the status-set command is not found then assume this is juju < 1.23 and
|
|
||||||
return 'unknown'
|
|
||||||
"""
|
"""
|
||||||
cmd = ['status-get']
|
cmd = ['status-get', "--format=json", "--include-data"]
|
||||||
try:
|
try:
|
||||||
raw_status = subprocess.check_output(cmd, universal_newlines=True)
|
raw_status = subprocess.check_output(cmd)
|
||||||
status = raw_status.rstrip()
|
|
||||||
return status
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno == errno.ENOENT:
|
if e.errno == errno.ENOENT:
|
||||||
return 'unknown'
|
return ('unknown', "")
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
else:
|
||||||
|
status = json.loads(raw_status.decode("UTF-8"))
|
||||||
|
return (status["status"], status["message"])
|
||||||
|
|
||||||
|
|
||||||
def translate_exc(from_exc, to_exc):
|
def translate_exc(from_exc, to_exc):
|
||||||
def inner_translate_exc1(f):
|
def inner_translate_exc1(f):
|
||||||
|
@wraps(f)
|
||||||
def inner_translate_exc2(*args, **kwargs):
|
def inner_translate_exc2(*args, **kwargs):
|
||||||
try:
|
try:
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
@ -736,6 +878,40 @@ def leader_set(settings=None, **kwargs):
|
|||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_register(ptype, klass, pid):
|
||||||
|
""" is used while a hook is running to let Juju know that a
|
||||||
|
payload has been started."""
|
||||||
|
cmd = ['payload-register']
|
||||||
|
for x in [ptype, klass, pid]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_unregister(klass, pid):
|
||||||
|
""" is used while a hook is running to let Juju know
|
||||||
|
that a payload has been manually stopped. The <class> and <id> provided
|
||||||
|
must match a payload that has been previously registered with juju using
|
||||||
|
payload-register."""
|
||||||
|
cmd = ['payload-unregister']
|
||||||
|
for x in [klass, pid]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_status_set(klass, pid, status):
|
||||||
|
"""is used to update the current status of a registered payload.
|
||||||
|
The <class> and <id> provided must match a payload that has been previously
|
||||||
|
registered with juju using payload-register. The <status> must be one of the
|
||||||
|
follow: starting, started, stopping, stopped"""
|
||||||
|
cmd = ['payload-status-set']
|
||||||
|
for x in [klass, pid, status]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def juju_version():
|
def juju_version():
|
||||||
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
||||||
|
@ -63,55 +63,86 @@ def service_reload(service_name, restart_on_failure=False):
|
|||||||
return service_result
|
return service_result
|
||||||
|
|
||||||
|
|
||||||
def service_pause(service_name, init_dir=None):
|
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
|
||||||
"""Pause a system service.
|
"""Pause a system service.
|
||||||
|
|
||||||
Stop it, and prevent it from starting again at boot."""
|
Stop it, and prevent it from starting again at boot."""
|
||||||
if init_dir is None:
|
stopped = True
|
||||||
init_dir = "/etc/init"
|
if service_running(service_name):
|
||||||
stopped = service_stop(service_name)
|
stopped = service_stop(service_name)
|
||||||
# XXX: Support systemd too
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
override_path = os.path.join(
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
init_dir, '{}.conf.override'.format(service_name))
|
if init_is_systemd():
|
||||||
with open(override_path, 'w') as fh:
|
service('disable', service_name)
|
||||||
fh.write("manual\n")
|
elif os.path.exists(upstart_file):
|
||||||
|
override_path = os.path.join(
|
||||||
|
init_dir, '{}.override'.format(service_name))
|
||||||
|
with open(override_path, 'w') as fh:
|
||||||
|
fh.write("manual\n")
|
||||||
|
elif os.path.exists(sysv_file):
|
||||||
|
subprocess.check_call(["update-rc.d", service_name, "disable"])
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
|
" SysV {2}".format(
|
||||||
|
service_name, upstart_file, sysv_file))
|
||||||
return stopped
|
return stopped
|
||||||
|
|
||||||
|
|
||||||
def service_resume(service_name, init_dir=None):
|
def service_resume(service_name, init_dir="/etc/init",
|
||||||
|
initd_dir="/etc/init.d"):
|
||||||
"""Resume a system service.
|
"""Resume a system service.
|
||||||
|
|
||||||
Reenable starting again at boot. Start the service"""
|
Reenable starting again at boot. Start the service"""
|
||||||
# XXX: Support systemd too
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
if init_dir is None:
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
init_dir = "/etc/init"
|
if init_is_systemd():
|
||||||
override_path = os.path.join(
|
service('enable', service_name)
|
||||||
init_dir, '{}.conf.override'.format(service_name))
|
elif os.path.exists(upstart_file):
|
||||||
if os.path.exists(override_path):
|
override_path = os.path.join(
|
||||||
os.unlink(override_path)
|
init_dir, '{}.override'.format(service_name))
|
||||||
started = service_start(service_name)
|
if os.path.exists(override_path):
|
||||||
|
os.unlink(override_path)
|
||||||
|
elif os.path.exists(sysv_file):
|
||||||
|
subprocess.check_call(["update-rc.d", service_name, "enable"])
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
|
" SysV {2}".format(
|
||||||
|
service_name, upstart_file, sysv_file))
|
||||||
|
|
||||||
|
started = service_running(service_name)
|
||||||
|
if not started:
|
||||||
|
started = service_start(service_name)
|
||||||
return started
|
return started
|
||||||
|
|
||||||
|
|
||||||
def service(action, service_name):
|
def service(action, service_name):
|
||||||
"""Control a system service"""
|
"""Control a system service"""
|
||||||
cmd = ['service', service_name, action]
|
if init_is_systemd():
|
||||||
|
cmd = ['systemctl', action, service_name]
|
||||||
|
else:
|
||||||
|
cmd = ['service', service_name, action]
|
||||||
return subprocess.call(cmd) == 0
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
def service_running(service):
|
def service_running(service_name):
|
||||||
"""Determine whether a system service is running"""
|
"""Determine whether a system service is running"""
|
||||||
try:
|
if init_is_systemd():
|
||||||
output = subprocess.check_output(
|
return service('is-active', service_name)
|
||||||
['service', service, 'status'],
|
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return False
|
|
||||||
else:
|
else:
|
||||||
if ("start/running" in output or "is running" in output):
|
try:
|
||||||
return True
|
output = subprocess.check_output(
|
||||||
else:
|
['service', service_name, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
return False
|
return False
|
||||||
|
else:
|
||||||
|
if ("start/running" in output or "is running" in output or
|
||||||
|
"up and running" in output):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def service_available(service_name):
|
def service_available(service_name):
|
||||||
@ -126,8 +157,29 @@ def service_available(service_name):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
SYSTEMD_SYSTEM = '/run/systemd/system'
|
||||||
"""Add a user to the system"""
|
|
||||||
|
|
||||||
|
def init_is_systemd():
|
||||||
|
"""Return True if the host system uses systemd, False otherwise."""
|
||||||
|
return os.path.isdir(SYSTEMD_SYSTEM)
|
||||||
|
|
||||||
|
|
||||||
|
def adduser(username, password=None, shell='/bin/bash', system_user=False,
|
||||||
|
primary_group=None, secondary_groups=None):
|
||||||
|
"""Add a user to the system.
|
||||||
|
|
||||||
|
Will log but otherwise succeed if the user already exists.
|
||||||
|
|
||||||
|
:param str username: Username to create
|
||||||
|
:param str password: Password for user; if ``None``, create a system user
|
||||||
|
:param str shell: The default shell for the user
|
||||||
|
:param bool system_user: Whether to create a login or system user
|
||||||
|
:param str primary_group: Primary group for user; defaults to username
|
||||||
|
:param list secondary_groups: Optional list of additional groups
|
||||||
|
|
||||||
|
:returns: The password database entry struct, as returned by `pwd.getpwnam`
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
user_info = pwd.getpwnam(username)
|
user_info = pwd.getpwnam(username)
|
||||||
log('user {0} already exists!'.format(username))
|
log('user {0} already exists!'.format(username))
|
||||||
@ -142,12 +194,32 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
|||||||
'--shell', shell,
|
'--shell', shell,
|
||||||
'--password', password,
|
'--password', password,
|
||||||
])
|
])
|
||||||
|
if not primary_group:
|
||||||
|
try:
|
||||||
|
grp.getgrnam(username)
|
||||||
|
primary_group = username # avoid "group exists" error
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
if primary_group:
|
||||||
|
cmd.extend(['-g', primary_group])
|
||||||
|
if secondary_groups:
|
||||||
|
cmd.extend(['-G', ','.join(secondary_groups)])
|
||||||
cmd.append(username)
|
cmd.append(username)
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
user_info = pwd.getpwnam(username)
|
user_info = pwd.getpwnam(username)
|
||||||
return user_info
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
|
def user_exists(username):
|
||||||
|
"""Check if a user exists"""
|
||||||
|
try:
|
||||||
|
pwd.getpwnam(username)
|
||||||
|
user_exists = True
|
||||||
|
except KeyError:
|
||||||
|
user_exists = False
|
||||||
|
return user_exists
|
||||||
|
|
||||||
|
|
||||||
def add_group(group_name, system_group=False):
|
def add_group(group_name, system_group=False):
|
||||||
"""Add a group to the system"""
|
"""Add a group to the system"""
|
||||||
try:
|
try:
|
||||||
@ -229,14 +301,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
|||||||
|
|
||||||
|
|
||||||
def fstab_remove(mp):
|
def fstab_remove(mp):
|
||||||
"""Remove the given mountpoint entry from /etc/fstab
|
"""Remove the given mountpoint entry from /etc/fstab"""
|
||||||
"""
|
|
||||||
return Fstab.remove_by_mountpoint(mp)
|
return Fstab.remove_by_mountpoint(mp)
|
||||||
|
|
||||||
|
|
||||||
def fstab_add(dev, mp, fs, options=None):
|
def fstab_add(dev, mp, fs, options=None):
|
||||||
"""Adds the given device entry to the /etc/fstab file
|
"""Adds the given device entry to the /etc/fstab file"""
|
||||||
"""
|
|
||||||
return Fstab.add(dev, mp, fs, options=options)
|
return Fstab.add(dev, mp, fs, options=options)
|
||||||
|
|
||||||
|
|
||||||
@ -280,9 +350,19 @@ def mounts():
|
|||||||
return system_mounts
|
return system_mounts
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_mount(mountpoint):
|
||||||
|
"""Mount filesystem using fstab"""
|
||||||
|
cmd_args = ['mount', mountpoint]
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def file_hash(path, hash_type='md5'):
|
def file_hash(path, hash_type='md5'):
|
||||||
"""
|
"""Generate a hash checksum of the contents of 'path' or None if not found.
|
||||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
|
||||||
|
|
||||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||||
such as md5, sha1, sha256, sha512, etc.
|
such as md5, sha1, sha256, sha512, etc.
|
||||||
@ -297,10 +377,9 @@ def file_hash(path, hash_type='md5'):
|
|||||||
|
|
||||||
|
|
||||||
def path_hash(path):
|
def path_hash(path):
|
||||||
"""
|
"""Generate a hash checksum of all files matching 'path'. Standard
|
||||||
Generate a hash checksum of all files matching 'path'. Standard wildcards
|
wildcards like '*' and '?' are supported, see documentation for the 'glob'
|
||||||
like '*' and '?' are supported, see documentation for the 'glob' module for
|
module for more information.
|
||||||
more information.
|
|
||||||
|
|
||||||
:return: dict: A { filename: hash } dictionary for all matched files.
|
:return: dict: A { filename: hash } dictionary for all matched files.
|
||||||
Empty if none found.
|
Empty if none found.
|
||||||
@ -312,8 +391,7 @@ def path_hash(path):
|
|||||||
|
|
||||||
|
|
||||||
def check_hash(path, checksum, hash_type='md5'):
|
def check_hash(path, checksum, hash_type='md5'):
|
||||||
"""
|
"""Validate a file using a cryptographic checksum.
|
||||||
Validate a file using a cryptographic checksum.
|
|
||||||
|
|
||||||
:param str checksum: Value of the checksum used to validate the file.
|
:param str checksum: Value of the checksum used to validate the file.
|
||||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||||
@ -328,6 +406,7 @@ def check_hash(path, checksum, hash_type='md5'):
|
|||||||
|
|
||||||
|
|
||||||
class ChecksumError(ValueError):
|
class ChecksumError(ValueError):
|
||||||
|
"""A class derived from Value error to indicate the checksum failed."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -396,36 +475,92 @@ def pwgen(length=None):
|
|||||||
return(''.join(random_chars))
|
return(''.join(random_chars))
|
||||||
|
|
||||||
|
|
||||||
def list_nics(nic_type):
|
def is_phy_iface(interface):
|
||||||
'''Return a list of nics of given type(s)'''
|
"""Returns True if interface is not virtual, otherwise False."""
|
||||||
|
if interface:
|
||||||
|
sys_net = '/sys/class/net'
|
||||||
|
if os.path.isdir(sys_net):
|
||||||
|
for iface in glob.glob(os.path.join(sys_net, '*')):
|
||||||
|
if '/virtual/' in os.path.realpath(iface):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if interface == os.path.basename(iface):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_bond_master(interface):
|
||||||
|
"""Returns bond master if interface is bond slave otherwise None.
|
||||||
|
|
||||||
|
NOTE: the provided interface is expected to be physical
|
||||||
|
"""
|
||||||
|
if interface:
|
||||||
|
iface_path = '/sys/class/net/%s' % (interface)
|
||||||
|
if os.path.exists(iface_path):
|
||||||
|
if '/virtual/' in os.path.realpath(iface_path):
|
||||||
|
return None
|
||||||
|
|
||||||
|
master = os.path.join(iface_path, 'master')
|
||||||
|
if os.path.exists(master):
|
||||||
|
master = os.path.realpath(master)
|
||||||
|
# make sure it is a bond master
|
||||||
|
if os.path.exists(os.path.join(master, 'bonding')):
|
||||||
|
return os.path.basename(master)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def list_nics(nic_type=None):
|
||||||
|
"""Return a list of nics of given type(s)"""
|
||||||
if isinstance(nic_type, six.string_types):
|
if isinstance(nic_type, six.string_types):
|
||||||
int_types = [nic_type]
|
int_types = [nic_type]
|
||||||
else:
|
else:
|
||||||
int_types = nic_type
|
int_types = nic_type
|
||||||
|
|
||||||
interfaces = []
|
interfaces = []
|
||||||
for int_type in int_types:
|
if nic_type:
|
||||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
for int_type in int_types:
|
||||||
|
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
ip_output = ip_output.split('\n')
|
||||||
|
ip_output = (line for line in ip_output if line)
|
||||||
|
for line in ip_output:
|
||||||
|
if line.split()[1].startswith(int_type):
|
||||||
|
matched = re.search('.*: (' + int_type +
|
||||||
|
r'[0-9]+\.[0-9]+)@.*', line)
|
||||||
|
if matched:
|
||||||
|
iface = matched.groups()[0]
|
||||||
|
else:
|
||||||
|
iface = line.split()[1].replace(":", "")
|
||||||
|
|
||||||
|
if iface not in interfaces:
|
||||||
|
interfaces.append(iface)
|
||||||
|
else:
|
||||||
|
cmd = ['ip', 'a']
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
ip_output = (line for line in ip_output if line)
|
ip_output = (line.strip() for line in ip_output if line)
|
||||||
|
|
||||||
|
key = re.compile('^[0-9]+:\s+(.+):')
|
||||||
for line in ip_output:
|
for line in ip_output:
|
||||||
if line.split()[1].startswith(int_type):
|
matched = re.search(key, line)
|
||||||
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
|
if matched:
|
||||||
if matched:
|
iface = matched.group(1)
|
||||||
interface = matched.groups()[0]
|
iface = iface.partition("@")[0]
|
||||||
else:
|
if iface not in interfaces:
|
||||||
interface = line.split()[1].replace(":", "")
|
interfaces.append(iface)
|
||||||
interfaces.append(interface)
|
|
||||||
|
|
||||||
return interfaces
|
return interfaces
|
||||||
|
|
||||||
|
|
||||||
def set_nic_mtu(nic, mtu):
|
def set_nic_mtu(nic, mtu):
|
||||||
'''Set MTU on a network interface'''
|
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
|
||||||
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def get_nic_mtu(nic):
|
def get_nic_mtu(nic):
|
||||||
|
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
|
||||||
cmd = ['ip', 'addr', 'show', nic]
|
cmd = ['ip', 'addr', 'show', nic]
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
mtu = ""
|
mtu = ""
|
||||||
@ -437,6 +572,7 @@ def get_nic_mtu(nic):
|
|||||||
|
|
||||||
|
|
||||||
def get_nic_hwaddr(nic):
|
def get_nic_hwaddr(nic):
|
||||||
|
"""Return the Media Access Control (MAC) for a network interface."""
|
||||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
hwaddr = ""
|
hwaddr = ""
|
||||||
@ -447,7 +583,7 @@ def get_nic_hwaddr(nic):
|
|||||||
|
|
||||||
|
|
||||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
'''Compare supplied revno with the revno of the installed package
|
"""Compare supplied revno with the revno of the installed package
|
||||||
|
|
||||||
* 1 => Installed revno is greater than supplied arg
|
* 1 => Installed revno is greater than supplied arg
|
||||||
* 0 => Installed revno is the same as supplied arg
|
* 0 => Installed revno is the same as supplied arg
|
||||||
@ -456,7 +592,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||||||
This function imports apt_cache function from charmhelpers.fetch if
|
This function imports apt_cache function from charmhelpers.fetch if
|
||||||
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||||
you call this function, or pass an apt_pkg.Cache() instance.
|
you call this function, or pass an apt_pkg.Cache() instance.
|
||||||
'''
|
"""
|
||||||
import apt_pkg
|
import apt_pkg
|
||||||
if not pkgcache:
|
if not pkgcache:
|
||||||
from charmhelpers.fetch import apt_cache
|
from charmhelpers.fetch import apt_cache
|
||||||
@ -466,15 +602,30 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def chdir(d):
|
def chdir(directory):
|
||||||
|
"""Change the current working directory to a different directory for a code
|
||||||
|
block and return the previous directory after the block exits. Useful to
|
||||||
|
run commands from a specificed directory.
|
||||||
|
|
||||||
|
:param str directory: The directory path to change to for this context.
|
||||||
|
"""
|
||||||
cur = os.getcwd()
|
cur = os.getcwd()
|
||||||
try:
|
try:
|
||||||
yield os.chdir(d)
|
yield os.chdir(directory)
|
||||||
finally:
|
finally:
|
||||||
os.chdir(cur)
|
os.chdir(cur)
|
||||||
|
|
||||||
|
|
||||||
def chownr(path, owner, group, follow_links=True):
|
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
||||||
|
"""Recursively change user and group ownership of files and directories
|
||||||
|
in given path. Doesn't chown path itself by default, only its children.
|
||||||
|
|
||||||
|
:param str path: The string path to start changing ownership.
|
||||||
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
|
:param str group: The group string to use when looking up the gid.
|
||||||
|
:param bool follow_links: Also Chown links if True
|
||||||
|
:param bool chowntopdir: Also chown path itself if True
|
||||||
|
"""
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
gid = grp.getgrnam(group).gr_gid
|
gid = grp.getgrnam(group).gr_gid
|
||||||
if follow_links:
|
if follow_links:
|
||||||
@ -482,6 +633,10 @@ def chownr(path, owner, group, follow_links=True):
|
|||||||
else:
|
else:
|
||||||
chown = os.lchown
|
chown = os.lchown
|
||||||
|
|
||||||
|
if chowntopdir:
|
||||||
|
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
|
||||||
|
if not broken_symlink:
|
||||||
|
chown(path, uid, gid)
|
||||||
for root, dirs, files in os.walk(path):
|
for root, dirs, files in os.walk(path):
|
||||||
for name in dirs + files:
|
for name in dirs + files:
|
||||||
full = os.path.join(root, name)
|
full = os.path.join(root, name)
|
||||||
@ -491,4 +646,28 @@ def chownr(path, owner, group, follow_links=True):
|
|||||||
|
|
||||||
|
|
||||||
def lchownr(path, owner, group):
|
def lchownr(path, owner, group):
|
||||||
|
"""Recursively change user and group ownership of files and directories
|
||||||
|
in a given path, not following symbolic links. See the documentation for
|
||||||
|
'os.lchown' for more information.
|
||||||
|
|
||||||
|
:param str path: The string path to start changing ownership.
|
||||||
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
|
:param str group: The group string to use when looking up the gid.
|
||||||
|
"""
|
||||||
chownr(path, owner, group, follow_links=False)
|
chownr(path, owner, group, follow_links=False)
|
||||||
|
|
||||||
|
|
||||||
|
def get_total_ram():
|
||||||
|
"""The total amount of system RAM in bytes.
|
||||||
|
|
||||||
|
This is what is reported by the OS, and may be overcommitted when
|
||||||
|
there are multiple containers hosted on the same machine.
|
||||||
|
"""
|
||||||
|
with open('/proc/meminfo', 'r') as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
if line:
|
||||||
|
key, value, unit = line.split()
|
||||||
|
if key == 'MemTotal:':
|
||||||
|
assert unit == 'kB', 'Unknown unit'
|
||||||
|
return int(value) * 1024 # Classic, not KiB.
|
||||||
|
raise NotImplementedError()
|
||||||
|
@ -16,7 +16,9 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import host
|
||||||
from charmhelpers.core import templating
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
from charmhelpers.core.services.base import ManagerCallback
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
@ -240,27 +242,50 @@ class TemplateCallback(ManagerCallback):
|
|||||||
|
|
||||||
:param str source: The template source file, relative to
|
:param str source: The template source file, relative to
|
||||||
`$CHARM_DIR/templates`
|
`$CHARM_DIR/templates`
|
||||||
:param str target: The target to write the rendered template to
|
|
||||||
|
:param str target: The target to write the rendered template to (or None)
|
||||||
:param str owner: The owner of the rendered file
|
:param str owner: The owner of the rendered file
|
||||||
:param str group: The group of the rendered file
|
:param str group: The group of the rendered file
|
||||||
:param int perms: The permissions of the rendered file
|
:param int perms: The permissions of the rendered file
|
||||||
|
:param partial on_change_action: functools partial to be executed when
|
||||||
|
rendered file changes
|
||||||
|
:param jinja2 loader template_loader: A jinja2 template loader
|
||||||
|
|
||||||
|
:return str: The rendered template
|
||||||
"""
|
"""
|
||||||
def __init__(self, source, target,
|
def __init__(self, source, target,
|
||||||
owner='root', group='root', perms=0o444):
|
owner='root', group='root', perms=0o444,
|
||||||
|
on_change_action=None, template_loader=None):
|
||||||
self.source = source
|
self.source = source
|
||||||
self.target = target
|
self.target = target
|
||||||
self.owner = owner
|
self.owner = owner
|
||||||
self.group = group
|
self.group = group
|
||||||
self.perms = perms
|
self.perms = perms
|
||||||
|
self.on_change_action = on_change_action
|
||||||
|
self.template_loader = template_loader
|
||||||
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
pre_checksum = ''
|
||||||
|
if self.on_change_action and os.path.isfile(self.target):
|
||||||
|
pre_checksum = host.file_hash(self.target)
|
||||||
service = manager.get_service(service_name)
|
service = manager.get_service(service_name)
|
||||||
context = {}
|
context = {'ctx': {}}
|
||||||
for ctx in service.get('required_data', []):
|
for ctx in service.get('required_data', []):
|
||||||
context.update(ctx)
|
context.update(ctx)
|
||||||
templating.render(self.source, self.target, context,
|
context['ctx'].update(ctx)
|
||||||
self.owner, self.group, self.perms)
|
|
||||||
|
result = templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms,
|
||||||
|
template_loader=self.template_loader)
|
||||||
|
if self.on_change_action:
|
||||||
|
if pre_checksum == host.file_hash(self.target):
|
||||||
|
hookenv.log(
|
||||||
|
'No change detected: {}'.format(self.target),
|
||||||
|
hookenv.DEBUG)
|
||||||
|
else:
|
||||||
|
self.on_change_action()
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases for templates
|
# Convenience aliases for templates
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
def bool_from_string(value):
|
def bool_from_string(value):
|
||||||
@ -40,3 +41,32 @@ def bool_from_string(value):
|
|||||||
|
|
||||||
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
||||||
raise ValueError(msg)
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def bytes_from_string(value):
|
||||||
|
"""Interpret human readable string value as bytes.
|
||||||
|
|
||||||
|
Returns int
|
||||||
|
"""
|
||||||
|
BYTE_POWER = {
|
||||||
|
'K': 1,
|
||||||
|
'KB': 1,
|
||||||
|
'M': 2,
|
||||||
|
'MB': 2,
|
||||||
|
'G': 3,
|
||||||
|
'GB': 3,
|
||||||
|
'T': 4,
|
||||||
|
'TB': 4,
|
||||||
|
'P': 5,
|
||||||
|
'PB': 5,
|
||||||
|
}
|
||||||
|
if isinstance(value, six.string_types):
|
||||||
|
value = six.text_type(value)
|
||||||
|
else:
|
||||||
|
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
|
||||||
|
if not matches:
|
||||||
|
msg = "Unable to interpret string value '%s' as bytes" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
|
||||||
|
@ -21,13 +21,14 @@ from charmhelpers.core import hookenv
|
|||||||
|
|
||||||
|
|
||||||
def render(source, target, context, owner='root', group='root',
|
def render(source, target, context, owner='root', group='root',
|
||||||
perms=0o444, templates_dir=None, encoding='UTF-8'):
|
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
|
||||||
"""
|
"""
|
||||||
Render a template.
|
Render a template.
|
||||||
|
|
||||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
The `target` path should be absolute.
|
The `target` path should be absolute. It can also be `None`, in which
|
||||||
|
case no file will be written.
|
||||||
|
|
||||||
The context should be a dict containing the values to be replaced in the
|
The context should be a dict containing the values to be replaced in the
|
||||||
template.
|
template.
|
||||||
@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root',
|
|||||||
|
|
||||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
The rendered template will be written to the file as well as being returned
|
||||||
|
as a string.
|
||||||
|
|
||||||
Note: Using this requires python-jinja2; if it is not installed, calling
|
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||||
"""
|
"""
|
||||||
@ -52,17 +56,26 @@ def render(source, target, context, owner='root', group='root',
|
|||||||
apt_install('python-jinja2', fatal=True)
|
apt_install('python-jinja2', fatal=True)
|
||||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
|
||||||
if templates_dir is None:
|
if template_loader:
|
||||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
template_env = Environment(loader=template_loader)
|
||||||
loader = Environment(loader=FileSystemLoader(templates_dir))
|
else:
|
||||||
|
if templates_dir is None:
|
||||||
|
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||||
|
template_env = Environment(loader=FileSystemLoader(templates_dir))
|
||||||
try:
|
try:
|
||||||
source = source
|
source = source
|
||||||
template = loader.get_template(source)
|
template = template_env.get_template(source)
|
||||||
except exceptions.TemplateNotFound as e:
|
except exceptions.TemplateNotFound as e:
|
||||||
hookenv.log('Could not load template %s from %s.' %
|
hookenv.log('Could not load template %s from %s.' %
|
||||||
(source, templates_dir),
|
(source, templates_dir),
|
||||||
level=hookenv.ERROR)
|
level=hookenv.ERROR)
|
||||||
raise e
|
raise e
|
||||||
content = template.render(context)
|
content = template.render(context)
|
||||||
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
if target is not None:
|
||||||
host.write_file(target, content.encode(encoding), owner, group, perms)
|
target_dir = os.path.dirname(target)
|
||||||
|
if not os.path.exists(target_dir):
|
||||||
|
# This is a terrible default directory permission, as the file
|
||||||
|
# or its siblings will often contain secrets.
|
||||||
|
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||||
|
host.write_file(target, content.encode(encoding), owner, group, perms)
|
||||||
|
return content
|
||||||
|
@ -152,6 +152,7 @@ associated to the hookname.
|
|||||||
import collections
|
import collections
|
||||||
import contextlib
|
import contextlib
|
||||||
import datetime
|
import datetime
|
||||||
|
import itertools
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import pprint
|
import pprint
|
||||||
@ -164,8 +165,7 @@ __author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
|||||||
class Storage(object):
|
class Storage(object):
|
||||||
"""Simple key value database for local unit state within charms.
|
"""Simple key value database for local unit state within charms.
|
||||||
|
|
||||||
Modifications are automatically committed at hook exit. That's
|
Modifications are not persisted unless :meth:`flush` is called.
|
||||||
currently regardless of exit code.
|
|
||||||
|
|
||||||
To support dicts, lists, integer, floats, and booleans values
|
To support dicts, lists, integer, floats, and booleans values
|
||||||
are automatically json encoded/decoded.
|
are automatically json encoded/decoded.
|
||||||
@ -173,8 +173,11 @@ class Storage(object):
|
|||||||
def __init__(self, path=None):
|
def __init__(self, path=None):
|
||||||
self.db_path = path
|
self.db_path = path
|
||||||
if path is None:
|
if path is None:
|
||||||
self.db_path = os.path.join(
|
if 'UNIT_STATE_DB' in os.environ:
|
||||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
self.db_path = os.environ['UNIT_STATE_DB']
|
||||||
|
else:
|
||||||
|
self.db_path = os.path.join(
|
||||||
|
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||||
self.cursor = self.conn.cursor()
|
self.cursor = self.conn.cursor()
|
||||||
self.revision = None
|
self.revision = None
|
||||||
@ -189,15 +192,8 @@ class Storage(object):
|
|||||||
self.conn.close()
|
self.conn.close()
|
||||||
self._closed = True
|
self._closed = True
|
||||||
|
|
||||||
def _scoped_query(self, stmt, params=None):
|
|
||||||
if params is None:
|
|
||||||
params = []
|
|
||||||
return stmt, params
|
|
||||||
|
|
||||||
def get(self, key, default=None, record=False):
|
def get(self, key, default=None, record=False):
|
||||||
self.cursor.execute(
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
*self._scoped_query(
|
|
||||||
'select data from kv where key=?', [key]))
|
|
||||||
result = self.cursor.fetchone()
|
result = self.cursor.fetchone()
|
||||||
if not result:
|
if not result:
|
||||||
return default
|
return default
|
||||||
@ -206,33 +202,81 @@ class Storage(object):
|
|||||||
return json.loads(result[0])
|
return json.loads(result[0])
|
||||||
|
|
||||||
def getrange(self, key_prefix, strip=False):
|
def getrange(self, key_prefix, strip=False):
|
||||||
stmt = "select key, data from kv where key like '%s%%'" % key_prefix
|
"""
|
||||||
self.cursor.execute(*self._scoped_query(stmt))
|
Get a range of keys starting with a common prefix as a mapping of
|
||||||
|
keys to values.
|
||||||
|
|
||||||
|
:param str key_prefix: Common prefix among all keys
|
||||||
|
:param bool strip: Optionally strip the common prefix from the key
|
||||||
|
names in the returned dict
|
||||||
|
:return dict: A (possibly empty) dict of key-value mappings
|
||||||
|
"""
|
||||||
|
self.cursor.execute("select key, data from kv where key like ?",
|
||||||
|
['%s%%' % key_prefix])
|
||||||
result = self.cursor.fetchall()
|
result = self.cursor.fetchall()
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
return None
|
return {}
|
||||||
if not strip:
|
if not strip:
|
||||||
key_prefix = ''
|
key_prefix = ''
|
||||||
return dict([
|
return dict([
|
||||||
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
||||||
|
|
||||||
def update(self, mapping, prefix=""):
|
def update(self, mapping, prefix=""):
|
||||||
|
"""
|
||||||
|
Set the values of multiple keys at once.
|
||||||
|
|
||||||
|
:param dict mapping: Mapping of keys to values
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in `mapping`
|
||||||
|
before setting
|
||||||
|
"""
|
||||||
for k, v in mapping.items():
|
for k, v in mapping.items():
|
||||||
self.set("%s%s" % (prefix, k), v)
|
self.set("%s%s" % (prefix, k), v)
|
||||||
|
|
||||||
def unset(self, key):
|
def unset(self, key):
|
||||||
|
"""
|
||||||
|
Remove a key from the database entirely.
|
||||||
|
"""
|
||||||
self.cursor.execute('delete from kv where key=?', [key])
|
self.cursor.execute('delete from kv where key=?', [key])
|
||||||
if self.revision and self.cursor.rowcount:
|
if self.revision and self.cursor.rowcount:
|
||||||
self.cursor.execute(
|
self.cursor.execute(
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
[key, self.revision, json.dumps('DELETED')])
|
[key, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
|
def unsetrange(self, keys=None, prefix=""):
|
||||||
|
"""
|
||||||
|
Remove a range of keys starting with a common prefix, from the database
|
||||||
|
entirely.
|
||||||
|
|
||||||
|
:param list keys: List of keys to remove.
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in ``keys``
|
||||||
|
before removing.
|
||||||
|
"""
|
||||||
|
if keys is not None:
|
||||||
|
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||||
|
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||||
|
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||||
|
else:
|
||||||
|
self.cursor.execute('delete from kv where key like ?',
|
||||||
|
['%s%%' % prefix])
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
|
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
def set(self, key, value):
|
def set(self, key, value):
|
||||||
|
"""
|
||||||
|
Set a value in the database.
|
||||||
|
|
||||||
|
:param str key: Key to set the value for
|
||||||
|
:param value: Any JSON-serializable value to be set
|
||||||
|
"""
|
||||||
serialized = json.dumps(value)
|
serialized = json.dumps(value)
|
||||||
|
|
||||||
self.cursor.execute(
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
'select data from kv where key=?', [key])
|
|
||||||
exists = self.cursor.fetchone()
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
# Skip mutations to the same value
|
# Skip mutations to the same value
|
||||||
|
@ -90,6 +90,22 @@ CLOUD_ARCHIVE_POCKETS = {
|
|||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||||
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||||
|
# Liberty
|
||||||
|
'liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty/updates': 'trusty-updates/liberty',
|
||||||
|
'trusty-updates/liberty': 'trusty-updates/liberty',
|
||||||
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
||||||
|
# Mitaka
|
||||||
|
'mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka/updates': 'trusty-updates/mitaka',
|
||||||
|
'trusty-updates/mitaka': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
|
||||||
}
|
}
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
@ -217,12 +233,12 @@ def apt_purge(packages, fatal=False):
|
|||||||
|
|
||||||
def apt_mark(packages, mark, fatal=False):
|
def apt_mark(packages, mark, fatal=False):
|
||||||
"""Flag one or more packages using apt-mark"""
|
"""Flag one or more packages using apt-mark"""
|
||||||
|
log("Marking {} as {}".format(packages, mark))
|
||||||
cmd = ['apt-mark', mark]
|
cmd = ['apt-mark', mark]
|
||||||
if isinstance(packages, six.string_types):
|
if isinstance(packages, six.string_types):
|
||||||
cmd.append(packages)
|
cmd.append(packages)
|
||||||
else:
|
else:
|
||||||
cmd.extend(packages)
|
cmd.extend(packages)
|
||||||
log("Holding {}".format(packages))
|
|
||||||
|
|
||||||
if fatal:
|
if fatal:
|
||||||
subprocess.check_call(cmd, universal_newlines=True)
|
subprocess.check_call(cmd, universal_newlines=True)
|
||||||
@ -403,7 +419,7 @@ def plugins(fetch_handlers=None):
|
|||||||
importlib.import_module(package),
|
importlib.import_module(package),
|
||||||
classname)
|
classname)
|
||||||
plugin_list.append(handler_class())
|
plugin_list.append(handler_class())
|
||||||
except (ImportError, AttributeError):
|
except NotImplementedError:
|
||||||
# Skip missing plugins so that they can be ommitted from
|
# Skip missing plugins so that they can be ommitted from
|
||||||
# installation if desired
|
# installation if desired
|
||||||
log("FetchHandler {} not found, skipping plugin".format(
|
log("FetchHandler {} not found, skipping plugin".format(
|
||||||
|
@ -108,7 +108,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
install_opener(opener)
|
install_opener(opener)
|
||||||
response = urlopen(source)
|
response = urlopen(source)
|
||||||
try:
|
try:
|
||||||
with open(dest, 'w') as dest_file:
|
with open(dest, 'wb') as dest_file:
|
||||||
dest_file.write(response.read())
|
dest_file.write(response.read())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if os.path.isfile(dest):
|
if os.path.isfile(dest):
|
||||||
|
@ -15,60 +15,50 @@
|
|||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from subprocess import check_call
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource,
|
||||||
|
filter_installed_packages,
|
||||||
|
apt_install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
raise ImportError('bzrlib does not support Python3')
|
|
||||||
|
|
||||||
try:
|
if filter_installed_packages(['bzr']) != []:
|
||||||
from bzrlib.branch import Branch
|
apt_install(['bzr'])
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
if filter_installed_packages(['bzr']) != []:
|
||||||
except ImportError:
|
raise NotImplementedError('Unable to install bzr')
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-bzrlib")
|
|
||||||
from bzrlib.branch import Branch
|
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
|
||||||
|
|
||||||
|
|
||||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||||
"""Handler for bazaar branches via generic and lp URLs"""
|
"""Handler for bazaar branches via generic and lp URLs"""
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
|
||||||
return False
|
return False
|
||||||
|
elif not url_parts.scheme:
|
||||||
|
return os.path.exists(os.path.join(source, '.bzr'))
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def branch(self, source, dest):
|
def branch(self, source, dest):
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
# If we use lp:branchname scheme we need to load plugins
|
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
if url_parts.scheme == "lp":
|
if os.path.exists(dest):
|
||||||
from bzrlib.plugin import load_plugins
|
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
|
||||||
load_plugins()
|
else:
|
||||||
try:
|
check_call(['bzr', 'branch', source, dest])
|
||||||
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
|
|
||||||
except errors.AlreadyControlDirError:
|
|
||||||
local_branch = Branch.open(dest)
|
|
||||||
try:
|
|
||||||
remote_branch = Branch.open(source)
|
|
||||||
remote_branch.push(local_branch)
|
|
||||||
tree = workingtree.WorkingTree.open(dest)
|
|
||||||
tree.update()
|
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def install(self, source):
|
def install(self, source, dest=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
if dest:
|
||||||
branch_name)
|
dest_dir = os.path.join(dest, branch_name)
|
||||||
|
else:
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
|
branch_name)
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
mkdir(dest_dir, perms=0o755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
try:
|
try:
|
||||||
|
@ -15,24 +15,18 @@
|
|||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from subprocess import check_call, CalledProcessError
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource,
|
||||||
|
filter_installed_packages,
|
||||||
|
apt_install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
|
||||||
|
|
||||||
import six
|
if filter_installed_packages(['git']) != []:
|
||||||
if six.PY3:
|
apt_install(['git'])
|
||||||
raise ImportError('GitPython does not support Python 3')
|
if filter_installed_packages(['git']) != []:
|
||||||
|
raise NotImplementedError('Unable to install git')
|
||||||
try:
|
|
||||||
from git import Repo
|
|
||||||
except ImportError:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-git")
|
|
||||||
from git import Repo
|
|
||||||
|
|
||||||
from git.exc import GitCommandError # noqa E402
|
|
||||||
|
|
||||||
|
|
||||||
class GitUrlFetchHandler(BaseFetchHandler):
|
class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
@ -40,19 +34,24 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
# TODO (mattyw) no support for ssh git@ yet
|
# TODO (mattyw) no support for ssh git@ yet
|
||||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
if url_parts.scheme not in ('http', 'https', 'git', ''):
|
||||||
return False
|
return False
|
||||||
|
elif not url_parts.scheme:
|
||||||
|
return os.path.exists(os.path.join(source, '.git'))
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def clone(self, source, dest, branch, depth=None):
|
def clone(self, source, dest, branch="master", depth=None):
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
|
|
||||||
if depth:
|
if os.path.exists(dest):
|
||||||
Repo.clone_from(source, dest, branch=branch, depth=depth)
|
cmd = ['git', '-C', dest, 'pull', source, branch]
|
||||||
else:
|
else:
|
||||||
Repo.clone_from(source, dest, branch=branch)
|
cmd = ['git', 'clone', source, dest, '--branch', branch]
|
||||||
|
if depth:
|
||||||
|
cmd.extend(['--depth', depth])
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
def install(self, source, branch="master", dest=None, depth=None):
|
def install(self, source, branch="master", dest=None, depth=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
@ -62,11 +61,9 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
else:
|
else:
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
branch_name)
|
branch_name)
|
||||||
if not os.path.exists(dest_dir):
|
|
||||||
mkdir(dest_dir, perms=0o755)
|
|
||||||
try:
|
try:
|
||||||
self.clone(source, dest_dir, branch, depth)
|
self.clone(source, dest_dir, branch, depth)
|
||||||
except GitCommandError as e:
|
except CalledProcessError as e:
|
||||||
raise UnhandledSource(e)
|
raise UnhandledSource(e)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UnhandledSource(e.strerror)
|
raise UnhandledSource(e.strerror)
|
||||||
|
@ -117,12 +117,12 @@ class NeutronPGPluginContext(context.NeutronContext):
|
|||||||
else:
|
else:
|
||||||
pg_ctxt['nova_metadata_proxy_secret'] = 'plumgrid'
|
pg_ctxt['nova_metadata_proxy_secret'] = 'plumgrid'
|
||||||
|
|
||||||
neutron_api_settings = _container_settings()
|
#neutron_api_settings = _container_settings()
|
||||||
pg_ctxt['admin_user'] = neutron_api_settings['service_username']
|
#pg_ctxt['admin_user'] = neutron_api_settings['service_username']
|
||||||
pg_ctxt['admin_password'] = neutron_api_settings['service_password']
|
#pg_ctxt['admin_password'] = neutron_api_settings['service_password']
|
||||||
pg_ctxt['admin_tenant_name'] = neutron_api_settings['service_tenant']
|
#pg_ctxt['admin_tenant_name'] = neutron_api_settings['service_tenant']
|
||||||
pg_ctxt['service_protocol'] = neutron_api_settings['auth_protocol']
|
#pg_ctxt['service_protocol'] = neutron_api_settings['auth_protocol']
|
||||||
pg_ctxt['auth_port'] = neutron_api_settings['auth_port']
|
#pg_ctxt['auth_port'] = neutron_api_settings['auth_port']
|
||||||
pg_ctxt['auth_host'] = neutron_api_settings['auth_host']
|
#pg_ctxt['auth_host'] = neutron_api_settings['auth_host']
|
||||||
|
|
||||||
return pg_ctxt
|
return pg_ctxt
|
||||||
|
@ -12,6 +12,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
Hooks,
|
Hooks,
|
||||||
UnregisteredHookError,
|
UnregisteredHookError,
|
||||||
log,
|
log,
|
||||||
|
relation_set
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
@ -32,6 +33,10 @@ from neutron_plumgrid_utils import (
|
|||||||
ensure_files,
|
ensure_files,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.utils import (
|
||||||
|
os_release,
|
||||||
|
)
|
||||||
|
|
||||||
hooks = Hooks()
|
hooks = Hooks()
|
||||||
CONFIGS = register_configs()
|
CONFIGS = register_configs()
|
||||||
|
|
||||||
@ -78,6 +83,23 @@ def relation_changed():
|
|||||||
CONFIGS.write_all()
|
CONFIGS.write_all()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook("neutron-plugin-api-subordinate-relation-joined")
|
||||||
|
def neutron_plugin_joined():
|
||||||
|
# create plugin config
|
||||||
|
release = os_release('neutron-server', base='kilo')
|
||||||
|
print "#############"
|
||||||
|
print release
|
||||||
|
plugin = "neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" \
|
||||||
|
if release == 'kilo'\
|
||||||
|
else "networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2"
|
||||||
|
settings = { "neutron-plugin": "plumgrid",
|
||||||
|
"core-plugin": plugin,
|
||||||
|
"neutron-plugin-config": "/etc/neutron/plugins/plumgrid/plumgrid.ini",
|
||||||
|
"service-plugins": " ",
|
||||||
|
"quota-driver": " "}
|
||||||
|
relation_set(relation_settings=settings)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('stop')
|
@hooks.hook('stop')
|
||||||
def stop():
|
def stop():
|
||||||
'''
|
'''
|
||||||
|
@ -24,11 +24,13 @@ TEMPLATES = 'templates/'
|
|||||||
|
|
||||||
PG_PACKAGES = [
|
PG_PACKAGES = [
|
||||||
'plumgrid-pythonlib',
|
'plumgrid-pythonlib',
|
||||||
|
'neutron-plugin-plumgrid'
|
||||||
]
|
]
|
||||||
|
|
||||||
NEUTRON_CONF_DIR = "/etc/neutron"
|
NEUTRON_CONF_DIR = "/etc/neutron"
|
||||||
|
|
||||||
SU_FILE = '/etc/sudoers.d/neutron_sudoers'
|
SU_FILE = '/etc/sudoers.d/neutron_sudoers'
|
||||||
|
PLUMGRID_CONF = '%s/plugins/plumgrid/plumgrid.ini' % NEUTRON_CONF_DIR
|
||||||
PGLIB_CONF = '%s/plugins/plumgrid/plumlib.ini' % NEUTRON_CONF_DIR
|
PGLIB_CONF = '%s/plugins/plumgrid/plumlib.ini' % NEUTRON_CONF_DIR
|
||||||
|
|
||||||
BASE_RESOURCE_MAP = OrderedDict([
|
BASE_RESOURCE_MAP = OrderedDict([
|
||||||
@ -36,6 +38,10 @@ BASE_RESOURCE_MAP = OrderedDict([
|
|||||||
'services': [],
|
'services': [],
|
||||||
'contexts': [neutron_plumgrid_context.NeutronPGPluginContext()],
|
'contexts': [neutron_plumgrid_context.NeutronPGPluginContext()],
|
||||||
}),
|
}),
|
||||||
|
(PLUMGRID_CONF, {
|
||||||
|
'services': ['neutron-server'],
|
||||||
|
'contexts': [neutron_plumgrid_context.NeutronPGPluginContext()],
|
||||||
|
}),
|
||||||
(PGLIB_CONF, {
|
(PGLIB_CONF, {
|
||||||
'services': ['neutron-server'],
|
'services': ['neutron-server'],
|
||||||
'contexts': [neutron_plumgrid_context.NeutronPGPluginContext()],
|
'contexts': [neutron_plumgrid_context.NeutronPGPluginContext()],
|
||||||
|
@ -18,9 +18,14 @@ tags:
|
|||||||
- openstack
|
- openstack
|
||||||
subordinate: true
|
subordinate: true
|
||||||
provides:
|
provides:
|
||||||
|
neutron-plugin-api-subordinate:
|
||||||
|
interface: neutron-plugin-api-subordinate
|
||||||
|
scope: container
|
||||||
plumgrid-plugin:
|
plumgrid-plugin:
|
||||||
interface: plumgrid-plugin
|
interface: plumgrid-plugin
|
||||||
requires:
|
requires:
|
||||||
container:
|
container:
|
||||||
interface: neutron-plugin-api
|
interface: juju-info
|
||||||
scope: container
|
scope: container
|
||||||
|
identity-admin:
|
||||||
|
interface: keystone-admin
|
||||||
|
Loading…
Reference in New Issue
Block a user