Sync charm-helpers
Change-Id: I2bc8e741519b9ebbeacc790d19db8f8eea903781
This commit is contained in:
parent
dfc0e8bfe2
commit
e62a646a4f
@ -1,57 +0,0 @@
|
||||
==========
|
||||
Commandant
|
||||
==========
|
||||
|
||||
-----------------------------------------------------
|
||||
Automatic command-line interfaces to Python functions
|
||||
-----------------------------------------------------
|
||||
|
||||
One of the benefits of ``libvirt`` is the uniformity of the interface: the C API (as well as the bindings in other languages) is a set of functions that accept parameters that are nearly identical to the command-line arguments. If you run ``virsh``, you get an interactive command prompt that supports all of the same commands that your shell scripts use as ``virsh`` subcommands.
|
||||
|
||||
Command execution and stdio manipulation is the greatest common factor across all development systems in the POSIX environment. By exposing your functions as commands that manipulate streams of text, you can make life easier for all the Ruby and Erlang and Go programmers in your life.
|
||||
|
||||
Goals
|
||||
=====
|
||||
|
||||
* Single decorator to expose a function as a command.
|
||||
* now two decorators - one "automatic" and one that allows authors to manipulate the arguments for fine-grained control.(MW)
|
||||
* Automatic analysis of function signature through ``inspect.getargspec()``
|
||||
* Command argument parser built automatically with ``argparse``
|
||||
* Interactive interpreter loop object made with ``Cmd``
|
||||
* Options to output structured return value data via ``pprint``, ``yaml`` or ``json`` dumps.
|
||||
|
||||
Other Important Features that need writing
|
||||
------------------------------------------
|
||||
|
||||
* Help and Usage documentation can be automatically generated, but it will be important to let users override this behaviour
|
||||
* The decorator should allow specifying further parameters to the parser's add_argument() calls, to specify types or to make arguments behave as boolean flags, etc.
|
||||
- Filename arguments are important, as good practice is for functions to accept file objects as parameters.
|
||||
- choices arguments help to limit bad input before the function is called
|
||||
* Some automatic behaviour could make for better defaults, once the user can override them.
|
||||
- We could automatically detect arguments that default to False or True, and automatically support --no-foo for foo=True.
|
||||
- We could automatically support hyphens as alternates for underscores
|
||||
- Arguments defaulting to sequence types could support the ``append`` action.
|
||||
|
||||
|
||||
-----------------------------------------------------
|
||||
Implementing subcommands
|
||||
-----------------------------------------------------
|
||||
|
||||
(WIP)
|
||||
|
||||
So as to avoid dependencies on the cli module, subcommands should be defined separately from their implementations. The recommmendation would be to place definitions into separate modules near the implementations which they expose.
|
||||
|
||||
Some examples::
|
||||
|
||||
from charmhelpers.cli import CommandLine
|
||||
from charmhelpers.payload import execd
|
||||
from charmhelpers.foo import bar
|
||||
|
||||
cli = CommandLine()
|
||||
|
||||
cli.subcommand(execd.execd_run)
|
||||
|
||||
@cli.subcommand_builder("bar", help="Bar baz qux")
|
||||
def barcmd_builder(subparser):
|
||||
subparser.add_argument('argument1', help="yackety")
|
||||
return bar
|
@ -1,205 +0,0 @@
|
||||
# Copyright 2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''
|
||||
A Pythonic API to interact with the charm hook environment.
|
||||
|
||||
:author: Stuart Bishop <stuart.bishop@canonical.com>
|
||||
'''
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
from collections import OrderedDict
|
||||
if six.PY3:
|
||||
from collections import UserDict # pragma: nocover
|
||||
else:
|
||||
from UserDict import IterableUserDict as UserDict # pragma: nocover
|
||||
|
||||
|
||||
class Relations(OrderedDict):
|
||||
'''Mapping relation name -> relation id -> Relation.
|
||||
|
||||
>>> rels = Relations()
|
||||
>>> rels['sprog']['sprog:12']['client/6']['widget']
|
||||
'remote widget'
|
||||
>>> rels['sprog']['sprog:12'].local['widget'] = 'local widget'
|
||||
>>> rels['sprog']['sprog:12'].local['widget']
|
||||
'local widget'
|
||||
>>> rels.peer.local['widget']
|
||||
'local widget on the peer relation'
|
||||
'''
|
||||
def __init__(self):
|
||||
super(Relations, self).__init__()
|
||||
for relname in sorted(hookenv.relation_types()):
|
||||
self[relname] = OrderedDict()
|
||||
relids = hookenv.relation_ids(relname)
|
||||
relids.sort(key=lambda x: int(x.split(':', 1)[-1]))
|
||||
for relid in relids:
|
||||
self[relname][relid] = Relation(relid)
|
||||
|
||||
@property
|
||||
def peer(self):
|
||||
peer_relid = hookenv.peer_relation_id()
|
||||
for rels in self.values():
|
||||
if peer_relid in rels:
|
||||
return rels[peer_relid]
|
||||
|
||||
|
||||
class Relation(OrderedDict):
|
||||
'''Mapping of unit -> remote RelationInfo for a relation.
|
||||
|
||||
This is an OrderedDict mapping, ordered numerically by
|
||||
by unit number.
|
||||
|
||||
Also provides access to the local RelationInfo, and peer RelationInfo
|
||||
instances by the 'local' and 'peers' attributes.
|
||||
|
||||
>>> r = Relation('sprog:12')
|
||||
>>> r.keys()
|
||||
['client/9', 'client/10'] # Ordered numerically
|
||||
>>> r['client/10']['widget'] # A remote RelationInfo setting
|
||||
'remote widget'
|
||||
>>> r.local['widget'] # The local RelationInfo setting
|
||||
'local widget'
|
||||
'''
|
||||
relid = None # The relation id.
|
||||
relname = None # The relation name (also known as relation type).
|
||||
service = None # The remote service name, if known.
|
||||
local = None # The local end's RelationInfo.
|
||||
peers = None # Map of peer -> RelationInfo. None if no peer relation.
|
||||
|
||||
def __init__(self, relid):
|
||||
remote_units = hookenv.related_units(relid)
|
||||
remote_units.sort(key=lambda u: int(u.split('/', 1)[-1]))
|
||||
super(Relation, self).__init__((unit, RelationInfo(relid, unit))
|
||||
for unit in remote_units)
|
||||
|
||||
self.relname = relid.split(':', 1)[0]
|
||||
self.relid = relid
|
||||
self.local = RelationInfo(relid, hookenv.local_unit())
|
||||
|
||||
for relinfo in self.values():
|
||||
self.service = relinfo.service
|
||||
break
|
||||
|
||||
# If we have peers, and they have joined both the provided peer
|
||||
# relation and this relation, we can peek at their data too.
|
||||
# This is useful for creating consensus without leadership.
|
||||
peer_relid = hookenv.peer_relation_id()
|
||||
if peer_relid and peer_relid != relid:
|
||||
peers = hookenv.related_units(peer_relid)
|
||||
if peers:
|
||||
peers.sort(key=lambda u: int(u.split('/', 1)[-1]))
|
||||
self.peers = OrderedDict((peer, RelationInfo(relid, peer))
|
||||
for peer in peers)
|
||||
else:
|
||||
self.peers = OrderedDict()
|
||||
else:
|
||||
self.peers = None
|
||||
|
||||
def __str__(self):
|
||||
return '{} ({})'.format(self.relid, self.service)
|
||||
|
||||
|
||||
class RelationInfo(UserDict):
|
||||
'''The bag of data at an end of a relation.
|
||||
|
||||
Every unit participating in a relation has a single bag of
|
||||
data associated with that relation. This is that bag.
|
||||
|
||||
The bag of data for the local unit may be updated. Remote data
|
||||
is immutable and will remain static for the duration of the hook.
|
||||
|
||||
Changes made to the local units relation data only become visible
|
||||
to other units after the hook completes successfully. If the hook
|
||||
does not complete successfully, the changes are rolled back.
|
||||
|
||||
Unlike standard Python mappings, setting an item to None is the
|
||||
same as deleting it.
|
||||
|
||||
>>> relinfo = RelationInfo('db:12') # Default is the local unit.
|
||||
>>> relinfo['user'] = 'fred'
|
||||
>>> relinfo['user']
|
||||
'fred'
|
||||
>>> relinfo['user'] = None
|
||||
>>> 'fred' in relinfo
|
||||
False
|
||||
|
||||
This class wraps hookenv.relation_get and hookenv.relation_set.
|
||||
All caching is left up to these two methods to avoid synchronization
|
||||
issues. Data is only loaded on demand.
|
||||
'''
|
||||
relid = None # The relation id.
|
||||
relname = None # The relation name (also know as the relation type).
|
||||
unit = None # The unit id.
|
||||
number = None # The unit number (integer).
|
||||
service = None # The service name.
|
||||
|
||||
def __init__(self, relid, unit):
|
||||
self.relname = relid.split(':', 1)[0]
|
||||
self.relid = relid
|
||||
self.unit = unit
|
||||
self.service, num = self.unit.split('/', 1)
|
||||
self.number = int(num)
|
||||
|
||||
def __str__(self):
|
||||
return '{} ({})'.format(self.relid, self.unit)
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
return hookenv.relation_get(rid=self.relid, unit=self.unit)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if self.unit != hookenv.local_unit():
|
||||
raise TypeError('Attempting to set {} on remote unit {}'
|
||||
''.format(key, self.unit))
|
||||
if value is not None and not isinstance(value, six.string_types):
|
||||
# We don't do implicit casting. This would cause simple
|
||||
# types like integers to be read back as strings in subsequent
|
||||
# hooks, and mutable types would require a lot of wrapping
|
||||
# to ensure relation-set gets called when they are mutated.
|
||||
raise ValueError('Only string values allowed')
|
||||
hookenv.relation_set(self.relid, {key: value})
|
||||
|
||||
def __delitem__(self, key):
|
||||
# Deleting a key and setting it to null is the same thing in
|
||||
# Juju relations.
|
||||
self[key] = None
|
||||
|
||||
|
||||
class Leader(UserDict):
|
||||
def __init__(self):
|
||||
pass # Don't call superclass initializer, as it will nuke self.data
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
return hookenv.leader_get()
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if not hookenv.is_leader():
|
||||
raise TypeError('Not the leader. Cannot change leader settings.')
|
||||
if value is not None and not isinstance(value, six.string_types):
|
||||
# We don't do implicit casting. This would cause simple
|
||||
# types like integers to be read back as strings in subsequent
|
||||
# hooks, and mutable types would require a lot of wrapping
|
||||
# to ensure leader-set gets called when they are mutated.
|
||||
raise ValueError('Only string values allowed')
|
||||
hookenv.leader_set({key: value})
|
||||
|
||||
def __delitem__(self, key):
|
||||
# Deleting a key and setting it to null is the same thing in
|
||||
# Juju leadership settings.
|
||||
self[key] = None
|
@ -1,13 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
@ -1,99 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import amulet
|
||||
import os
|
||||
import six
|
||||
|
||||
|
||||
class AmuletDeployment(object):
|
||||
"""Amulet deployment.
|
||||
|
||||
This class provides generic Amulet deployment and test runner
|
||||
methods.
|
||||
"""
|
||||
|
||||
def __init__(self, series=None):
|
||||
"""Initialize the deployment environment."""
|
||||
self.series = None
|
||||
|
||||
if series:
|
||||
self.series = series
|
||||
self.d = amulet.Deployment(series=self.series)
|
||||
else:
|
||||
self.d = amulet.Deployment()
|
||||
|
||||
def _add_services(self, this_service, other_services):
|
||||
"""Add services.
|
||||
|
||||
Add services to the deployment where this_service is the local charm
|
||||
that we're testing and other_services are the other services that
|
||||
are being used in the local amulet tests.
|
||||
"""
|
||||
if this_service['name'] != os.path.basename(os.getcwd()):
|
||||
s = this_service['name']
|
||||
msg = "The charm's root directory name needs to be {}".format(s)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
if 'units' not in this_service:
|
||||
this_service['units'] = 1
|
||||
|
||||
self.d.add(this_service['name'], units=this_service['units'],
|
||||
constraints=this_service.get('constraints'),
|
||||
storage=this_service.get('storage'))
|
||||
|
||||
for svc in other_services:
|
||||
if 'location' in svc:
|
||||
branch_location = svc['location']
|
||||
elif self.series:
|
||||
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
|
||||
else:
|
||||
branch_location = None
|
||||
|
||||
if 'units' not in svc:
|
||||
svc['units'] = 1
|
||||
|
||||
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
|
||||
constraints=svc.get('constraints'),
|
||||
storage=svc.get('storage'))
|
||||
|
||||
def _add_relations(self, relations):
|
||||
"""Add all of the relations for the services."""
|
||||
for k, v in six.iteritems(relations):
|
||||
self.d.relate(k, v)
|
||||
|
||||
def _configure_services(self, configs):
|
||||
"""Configure all of the services."""
|
||||
for service, config in six.iteritems(configs):
|
||||
self.d.configure(service, config)
|
||||
|
||||
def _deploy(self):
|
||||
"""Deploy environment and wait for all hooks to finish executing."""
|
||||
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
|
||||
try:
|
||||
self.d.setup(timeout=timeout)
|
||||
self.d.sentry.wait(timeout=timeout)
|
||||
except amulet.helpers.TimeoutError:
|
||||
amulet.raise_status(
|
||||
amulet.FAIL,
|
||||
msg="Deployment timed out ({}s)".format(timeout)
|
||||
)
|
||||
except Exception:
|
||||
raise
|
||||
|
||||
def run_tests(self):
|
||||
"""Run all of the methods that are prefixed with 'test_'."""
|
||||
for test in dir(self):
|
||||
if test.startswith('test_'):
|
||||
getattr(self, test)()
|
@ -1,820 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import amulet
|
||||
import distro_info
|
||||
import six
|
||||
from six.moves import configparser
|
||||
if six.PY3:
|
||||
from urllib import parse as urlparse
|
||||
else:
|
||||
import urlparse
|
||||
|
||||
|
||||
class AmuletUtils(object):
|
||||
"""Amulet utilities.
|
||||
|
||||
This class provides common utility functions that are used by Amulet
|
||||
tests.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level=logging.ERROR):
|
||||
self.log = self.get_logger(level=log_level)
|
||||
self.ubuntu_releases = self.get_ubuntu_releases()
|
||||
|
||||
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
|
||||
"""Get a logger object that will log to stdout."""
|
||||
log = logging
|
||||
logger = log.getLogger(name)
|
||||
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||
"%(levelname)s: %(message)s")
|
||||
|
||||
handler = log.StreamHandler(stream=sys.stdout)
|
||||
handler.setLevel(level)
|
||||
handler.setFormatter(fmt)
|
||||
|
||||
logger.addHandler(handler)
|
||||
logger.setLevel(level)
|
||||
|
||||
return logger
|
||||
|
||||
def valid_ip(self, ip):
|
||||
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def valid_url(self, url):
|
||||
p = re.compile(
|
||||
r'^(?:http|ftp)s?://'
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
|
||||
r'localhost|'
|
||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
|
||||
r'(?::\d+)?'
|
||||
r'(?:/?|[/?]\S+)$',
|
||||
re.IGNORECASE)
|
||||
if p.match(url):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_ubuntu_release_from_sentry(self, sentry_unit):
|
||||
"""Get Ubuntu release codename from sentry unit.
|
||||
|
||||
:param sentry_unit: amulet sentry/service unit pointer
|
||||
:returns: list of strings - release codename, failure message
|
||||
"""
|
||||
msg = None
|
||||
cmd = 'lsb_release -cs'
|
||||
release, code = sentry_unit.run(cmd)
|
||||
if code == 0:
|
||||
self.log.debug('{} lsb_release: {}'.format(
|
||||
sentry_unit.info['unit_name'], release))
|
||||
else:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, release, code))
|
||||
if release not in self.ubuntu_releases:
|
||||
msg = ("Release ({}) not found in Ubuntu releases "
|
||||
"({})".format(release, self.ubuntu_releases))
|
||||
return release, msg
|
||||
|
||||
def validate_services(self, commands):
|
||||
"""Validate that lists of commands succeed on service units. Can be
|
||||
used to verify system services are running on the corresponding
|
||||
service units.
|
||||
|
||||
:param commands: dict with sentry keys and arbitrary command list vals
|
||||
:returns: None if successful, Failure string message otherwise
|
||||
"""
|
||||
self.log.debug('Checking status of system services...')
|
||||
|
||||
# /!\ DEPRECATION WARNING (beisner):
|
||||
# New and existing tests should be rewritten to use
|
||||
# validate_services_by_name() as it is aware of init systems.
|
||||
self.log.warn('DEPRECATION WARNING: use '
|
||||
'validate_services_by_name instead of validate_services '
|
||||
'due to init system differences.')
|
||||
|
||||
for k, v in six.iteritems(commands):
|
||||
for cmd in v:
|
||||
output, code = k.run(cmd)
|
||||
self.log.debug('{} `{}` returned '
|
||||
'{}'.format(k.info['unit_name'],
|
||||
cmd, code))
|
||||
if code != 0:
|
||||
return "command `{}` returned {}".format(cmd, str(code))
|
||||
return None
|
||||
|
||||
def validate_services_by_name(self, sentry_services):
|
||||
"""Validate system service status by service name, automatically
|
||||
detecting init system based on Ubuntu release codename.
|
||||
|
||||
:param sentry_services: dict with sentry keys and svc list values
|
||||
:returns: None if successful, Failure string message otherwise
|
||||
"""
|
||||
self.log.debug('Checking status of system services...')
|
||||
|
||||
# Point at which systemd became a thing
|
||||
systemd_switch = self.ubuntu_releases.index('vivid')
|
||||
|
||||
for sentry_unit, services_list in six.iteritems(sentry_services):
|
||||
# Get lsb_release codename from unit
|
||||
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
|
||||
if ret:
|
||||
return ret
|
||||
|
||||
for service_name in services_list:
|
||||
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||
service_name in ['rabbitmq-server', 'apache2',
|
||||
'memcached']):
|
||||
# init is systemd (or regular sysv)
|
||||
cmd = 'sudo service {} status'.format(service_name)
|
||||
output, code = sentry_unit.run(cmd)
|
||||
service_running = code == 0
|
||||
elif self.ubuntu_releases.index(release) < systemd_switch:
|
||||
# init is upstart
|
||||
cmd = 'sudo status {}'.format(service_name)
|
||||
output, code = sentry_unit.run(cmd)
|
||||
service_running = code == 0 and "start/running" in output
|
||||
|
||||
self.log.debug('{} `{}` returned '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code))
|
||||
if not service_running:
|
||||
return u"command `{}` returned {} {}".format(
|
||||
cmd, output, str(code))
|
||||
return None
|
||||
|
||||
def _get_config(self, unit, filename):
|
||||
"""Get a ConfigParser object for parsing a unit's config file."""
|
||||
file_contents = unit.file_contents(filename)
|
||||
|
||||
# NOTE(beisner): by default, ConfigParser does not handle options
|
||||
# with no value, such as the flags used in the mysql my.cnf file.
|
||||
# https://bugs.python.org/issue7005
|
||||
config = configparser.ConfigParser(allow_no_value=True)
|
||||
config.readfp(io.StringIO(file_contents))
|
||||
return config
|
||||
|
||||
def validate_config_data(self, sentry_unit, config_file, section,
|
||||
expected):
|
||||
"""Validate config file data.
|
||||
|
||||
Verify that the specified section of the config file contains
|
||||
the expected option key:value pairs.
|
||||
|
||||
Compare expected dictionary data vs actual dictionary data.
|
||||
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||
longs, or can be a function that evaluates a variable and returns a
|
||||
bool.
|
||||
"""
|
||||
self.log.debug('Validating config file data ({} in {} on {})'
|
||||
'...'.format(section, config_file,
|
||||
sentry_unit.info['unit_name']))
|
||||
config = self._get_config(sentry_unit, config_file)
|
||||
|
||||
if section != 'DEFAULT' and not config.has_section(section):
|
||||
return "section [{}] does not exist".format(section)
|
||||
|
||||
for k in expected.keys():
|
||||
if not config.has_option(section, k):
|
||||
return "section [{}] is missing option {}".format(section, k)
|
||||
|
||||
actual = config.get(section, k)
|
||||
v = expected[k]
|
||||
if (isinstance(v, six.string_types) or
|
||||
isinstance(v, bool) or
|
||||
isinstance(v, six.integer_types)):
|
||||
# handle explicit values
|
||||
if actual != v:
|
||||
return "section [{}] {}:{} != expected {}:{}".format(
|
||||
section, k, actual, k, expected[k])
|
||||
# handle function pointers, such as not_null or valid_ip
|
||||
elif not v(actual):
|
||||
return "section [{}] {}:{} != expected {}:{}".format(
|
||||
section, k, actual, k, expected[k])
|
||||
return None
|
||||
|
||||
def _validate_dict_data(self, expected, actual):
|
||||
"""Validate dictionary data.
|
||||
|
||||
Compare expected dictionary data vs actual dictionary data.
|
||||
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||
longs, or can be a function that evaluates a variable and returns a
|
||||
bool.
|
||||
"""
|
||||
self.log.debug('actual: {}'.format(repr(actual)))
|
||||
self.log.debug('expected: {}'.format(repr(expected)))
|
||||
|
||||
for k, v in six.iteritems(expected):
|
||||
if k in actual:
|
||||
if (isinstance(v, six.string_types) or
|
||||
isinstance(v, bool) or
|
||||
isinstance(v, six.integer_types)):
|
||||
# handle explicit values
|
||||
if v != actual[k]:
|
||||
return "{}:{}".format(k, actual[k])
|
||||
# handle function pointers, such as not_null or valid_ip
|
||||
elif not v(actual[k]):
|
||||
return "{}:{}".format(k, actual[k])
|
||||
else:
|
||||
return "key '{}' does not exist".format(k)
|
||||
return None
|
||||
|
||||
def validate_relation_data(self, sentry_unit, relation, expected):
|
||||
"""Validate actual relation data based on expected relation data."""
|
||||
actual = sentry_unit.relation(relation[0], relation[1])
|
||||
return self._validate_dict_data(expected, actual)
|
||||
|
||||
def _validate_list_data(self, expected, actual):
|
||||
"""Compare expected list vs actual list data."""
|
||||
for e in expected:
|
||||
if e not in actual:
|
||||
return "expected item {} not found in actual list".format(e)
|
||||
return None
|
||||
|
||||
def not_null(self, string):
|
||||
if string is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def _get_file_mtime(self, sentry_unit, filename):
|
||||
"""Get last modification time of file."""
|
||||
return sentry_unit.file_stat(filename)['mtime']
|
||||
|
||||
def _get_dir_mtime(self, sentry_unit, directory):
|
||||
"""Get last modification time of directory."""
|
||||
return sentry_unit.directory_stat(directory)['mtime']
|
||||
|
||||
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
|
||||
"""Get start time of a process based on the last modification time
|
||||
of the /proc/pid directory.
|
||||
|
||||
:sentry_unit: The sentry unit to check for the service on
|
||||
:service: service name to look for in process table
|
||||
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||
:returns: epoch time of service process start
|
||||
:param commands: list of bash commands
|
||||
:param sentry_units: list of sentry unit pointers
|
||||
:returns: None if successful; Failure message otherwise
|
||||
"""
|
||||
pid_list = self.get_process_id_list(
|
||||
sentry_unit, service, pgrep_full=pgrep_full)
|
||||
pid = pid_list[0]
|
||||
proc_dir = '/proc/{}'.format(pid)
|
||||
self.log.debug('Pid for {} on {}: {}'.format(
|
||||
service, sentry_unit.info['unit_name'], pid))
|
||||
|
||||
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||
|
||||
def service_restarted(self, sentry_unit, service, filename,
|
||||
pgrep_full=None, sleep_time=20):
|
||||
"""Check if service was restarted.
|
||||
|
||||
Compare a service's start time vs a file's last modification time
|
||||
(such as a config file for that service) to determine if the service
|
||||
has been restarted.
|
||||
"""
|
||||
# /!\ DEPRECATION WARNING (beisner):
|
||||
# This method is prone to races in that no before-time is known.
|
||||
# Use validate_service_config_changed instead.
|
||||
|
||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||
# deprecation WARNS. lp1474030
|
||||
self.log.warn('DEPRECATION WARNING: use '
|
||||
'validate_service_config_changed instead of '
|
||||
'service_restarted due to known races.')
|
||||
|
||||
time.sleep(sleep_time)
|
||||
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||
self._get_file_mtime(sentry_unit, filename)):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def service_restarted_since(self, sentry_unit, mtime, service,
|
||||
pgrep_full=None, sleep_time=20,
|
||||
retry_count=30, retry_sleep_time=10):
|
||||
"""Check if service was been started after a given time.
|
||||
|
||||
Args:
|
||||
sentry_unit (sentry): The sentry unit to check for the service on
|
||||
mtime (float): The epoch time to check against
|
||||
service (string): service name to look for in process table
|
||||
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||
sleep_time (int): Initial sleep time (s) before looking for file
|
||||
retry_sleep_time (int): Time (s) to sleep between retries
|
||||
retry_count (int): If file is not found, how many times to retry
|
||||
|
||||
Returns:
|
||||
bool: True if service found and its start time it newer than mtime,
|
||||
False if service is older than mtime or if service was
|
||||
not found.
|
||||
"""
|
||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||
# deprecation WARNS. lp1474030
|
||||
|
||||
unit_name = sentry_unit.info['unit_name']
|
||||
self.log.debug('Checking that %s service restarted since %s on '
|
||||
'%s' % (service, mtime, unit_name))
|
||||
time.sleep(sleep_time)
|
||||
proc_start_time = None
|
||||
tries = 0
|
||||
while tries <= retry_count and not proc_start_time:
|
||||
try:
|
||||
proc_start_time = self._get_proc_start_time(sentry_unit,
|
||||
service,
|
||||
pgrep_full)
|
||||
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||
'OK'.format(tries, service, unit_name))
|
||||
except IOError as e:
|
||||
# NOTE(beisner) - race avoidance, proc may not exist yet.
|
||||
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||
self.log.debug('Attempt {} to get {} proc start time on {} '
|
||||
'failed\n{}'.format(tries, service,
|
||||
unit_name, e))
|
||||
time.sleep(retry_sleep_time)
|
||||
tries += 1
|
||||
|
||||
if not proc_start_time:
|
||||
self.log.warn('No proc start time found, assuming service did '
|
||||
'not start')
|
||||
return False
|
||||
if proc_start_time >= mtime:
|
||||
self.log.debug('Proc start time is newer than provided mtime'
|
||||
'(%s >= %s) on %s (OK)' % (proc_start_time,
|
||||
mtime, unit_name))
|
||||
return True
|
||||
else:
|
||||
self.log.warn('Proc start time (%s) is older than provided mtime '
|
||||
'(%s) on %s, service did not '
|
||||
'restart' % (proc_start_time, mtime, unit_name))
|
||||
return False
|
||||
|
||||
def config_updated_since(self, sentry_unit, filename, mtime,
|
||||
sleep_time=20, retry_count=30,
|
||||
retry_sleep_time=10):
|
||||
"""Check if file was modified after a given time.
|
||||
|
||||
Args:
|
||||
sentry_unit (sentry): The sentry unit to check the file mtime on
|
||||
filename (string): The file to check mtime of
|
||||
mtime (float): The epoch time to check against
|
||||
sleep_time (int): Initial sleep time (s) before looking for file
|
||||
retry_sleep_time (int): Time (s) to sleep between retries
|
||||
retry_count (int): If file is not found, how many times to retry
|
||||
|
||||
Returns:
|
||||
bool: True if file was modified more recently than mtime, False if
|
||||
file was modified before mtime, or if file not found.
|
||||
"""
|
||||
unit_name = sentry_unit.info['unit_name']
|
||||
self.log.debug('Checking that %s updated since %s on '
|
||||
'%s' % (filename, mtime, unit_name))
|
||||
time.sleep(sleep_time)
|
||||
file_mtime = None
|
||||
tries = 0
|
||||
while tries <= retry_count and not file_mtime:
|
||||
try:
|
||||
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
||||
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||
'OK'.format(tries, filename, unit_name))
|
||||
except IOError as e:
|
||||
# NOTE(beisner) - race avoidance, file may not exist yet.
|
||||
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
||||
self.log.debug('Attempt {} to get {} file mtime on {} '
|
||||
'failed\n{}'.format(tries, filename,
|
||||
unit_name, e))
|
||||
time.sleep(retry_sleep_time)
|
||||
tries += 1
|
||||
|
||||
if not file_mtime:
|
||||
self.log.warn('Could not determine file mtime, assuming '
|
||||
'file does not exist')
|
||||
return False
|
||||
|
||||
if file_mtime >= mtime:
|
||||
self.log.debug('File mtime is newer than provided mtime '
|
||||
'(%s >= %s) on %s (OK)' % (file_mtime,
|
||||
mtime, unit_name))
|
||||
return True
|
||||
else:
|
||||
self.log.warn('File mtime is older than provided mtime'
|
||||
'(%s < on %s) on %s' % (file_mtime,
|
||||
mtime, unit_name))
|
||||
return False
|
||||
|
||||
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
||||
filename, pgrep_full=None,
|
||||
sleep_time=20, retry_count=30,
|
||||
retry_sleep_time=10):
|
||||
"""Check service and file were updated after mtime
|
||||
|
||||
Args:
|
||||
sentry_unit (sentry): The sentry unit to check for the service on
|
||||
mtime (float): The epoch time to check against
|
||||
service (string): service name to look for in process table
|
||||
filename (string): The file to check mtime of
|
||||
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
||||
sleep_time (int): Initial sleep in seconds to pass to test helpers
|
||||
retry_count (int): If service is not found, how many times to retry
|
||||
retry_sleep_time (int): Time in seconds to wait between retries
|
||||
|
||||
Typical Usage:
|
||||
u = OpenStackAmuletUtils(ERROR)
|
||||
...
|
||||
mtime = u.get_sentry_time(self.cinder_sentry)
|
||||
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
|
||||
if not u.validate_service_config_changed(self.cinder_sentry,
|
||||
mtime,
|
||||
'cinder-api',
|
||||
'/etc/cinder/cinder.conf')
|
||||
amulet.raise_status(amulet.FAIL, msg='update failed')
|
||||
Returns:
|
||||
bool: True if both service and file where updated/restarted after
|
||||
mtime, False if service is older than mtime or if service was
|
||||
not found or if filename was modified before mtime.
|
||||
"""
|
||||
|
||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
||||
# deprecation WARNS. lp1474030
|
||||
|
||||
service_restart = self.service_restarted_since(
|
||||
sentry_unit, mtime,
|
||||
service,
|
||||
pgrep_full=pgrep_full,
|
||||
sleep_time=sleep_time,
|
||||
retry_count=retry_count,
|
||||
retry_sleep_time=retry_sleep_time)
|
||||
|
||||
config_update = self.config_updated_since(
|
||||
sentry_unit,
|
||||
filename,
|
||||
mtime,
|
||||
sleep_time=sleep_time,
|
||||
retry_count=retry_count,
|
||||
retry_sleep_time=retry_sleep_time)
|
||||
|
||||
return service_restart and config_update
|
||||
|
||||
def get_sentry_time(self, sentry_unit):
|
||||
"""Return current epoch time on a sentry"""
|
||||
cmd = "date +'%s'"
|
||||
return float(sentry_unit.run(cmd)[0])
|
||||
|
||||
def relation_error(self, name, data):
|
||||
return 'unexpected relation data in {} - {}'.format(name, data)
|
||||
|
||||
def endpoint_error(self, name, data):
|
||||
return 'unexpected endpoint data in {} - {}'.format(name, data)
|
||||
|
||||
def get_ubuntu_releases(self):
|
||||
"""Return a list of all Ubuntu releases in order of release."""
|
||||
_d = distro_info.UbuntuDistroInfo()
|
||||
_release_list = _d.all
|
||||
return _release_list
|
||||
|
||||
def file_to_url(self, file_rel_path):
|
||||
"""Convert a relative file path to a file URL."""
|
||||
_abs_path = os.path.abspath(file_rel_path)
|
||||
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
||||
|
||||
def check_commands_on_units(self, commands, sentry_units):
|
||||
"""Check that all commands in a list exit zero on all
|
||||
sentry units in a list.
|
||||
|
||||
:param commands: list of bash commands
|
||||
:param sentry_units: list of sentry unit pointers
|
||||
:returns: None if successful; Failure message otherwise
|
||||
"""
|
||||
self.log.debug('Checking exit codes for {} commands on {} '
|
||||
'sentry units...'.format(len(commands),
|
||||
len(sentry_units)))
|
||||
for sentry_unit in sentry_units:
|
||||
for cmd in commands:
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code == 0:
|
||||
self.log.debug('{} `{}` returned {} '
|
||||
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code))
|
||||
else:
|
||||
return ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
return None
|
||||
|
||||
def get_process_id_list(self, sentry_unit, process_name,
|
||||
expect_success=True, pgrep_full=False):
|
||||
"""Get a list of process ID(s) from a single sentry juju unit
|
||||
for a single process name.
|
||||
|
||||
:param sentry_unit: Amulet sentry instance (juju unit)
|
||||
:param process_name: Process name
|
||||
:param expect_success: If False, expect the PID to be missing,
|
||||
raise if it is present.
|
||||
:returns: List of process IDs
|
||||
"""
|
||||
if pgrep_full:
|
||||
cmd = 'pgrep -f "{}"'.format(process_name)
|
||||
else:
|
||||
cmd = 'pidof -x "{}"'.format(process_name)
|
||||
if not expect_success:
|
||||
cmd += " || exit 0 && exit 1"
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code != 0:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
return str(output).split()
|
||||
|
||||
def get_unit_process_ids(
|
||||
self, unit_processes, expect_success=True, pgrep_full=False):
|
||||
"""Construct a dict containing unit sentries, process names, and
|
||||
process IDs.
|
||||
|
||||
:param unit_processes: A dictionary of Amulet sentry instance
|
||||
to list of process names.
|
||||
:param expect_success: if False expect the processes to not be
|
||||
running, raise if they are.
|
||||
:returns: Dictionary of Amulet sentry instance to dictionary
|
||||
of process names to PIDs.
|
||||
"""
|
||||
pid_dict = {}
|
||||
for sentry_unit, process_list in six.iteritems(unit_processes):
|
||||
pid_dict[sentry_unit] = {}
|
||||
for process in process_list:
|
||||
pids = self.get_process_id_list(
|
||||
sentry_unit, process, expect_success=expect_success,
|
||||
pgrep_full=pgrep_full)
|
||||
pid_dict[sentry_unit].update({process: pids})
|
||||
return pid_dict
|
||||
|
||||
def validate_unit_process_ids(self, expected, actual):
|
||||
"""Validate process id quantities for services on units."""
|
||||
self.log.debug('Checking units for running processes...')
|
||||
self.log.debug('Expected PIDs: {}'.format(expected))
|
||||
self.log.debug('Actual PIDs: {}'.format(actual))
|
||||
|
||||
if len(actual) != len(expected):
|
||||
return ('Unit count mismatch. expected, actual: {}, '
|
||||
'{} '.format(len(expected), len(actual)))
|
||||
|
||||
for (e_sentry, e_proc_names) in six.iteritems(expected):
|
||||
e_sentry_name = e_sentry.info['unit_name']
|
||||
if e_sentry in actual.keys():
|
||||
a_proc_names = actual[e_sentry]
|
||||
else:
|
||||
return ('Expected sentry ({}) not found in actual dict data.'
|
||||
'{}'.format(e_sentry_name, e_sentry))
|
||||
|
||||
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
|
||||
return ('Process name count mismatch. expected, actual: {}, '
|
||||
'{}'.format(len(expected), len(actual)))
|
||||
|
||||
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
|
||||
zip(e_proc_names.items(), a_proc_names.items()):
|
||||
if e_proc_name != a_proc_name:
|
||||
return ('Process name mismatch. expected, actual: {}, '
|
||||
'{}'.format(e_proc_name, a_proc_name))
|
||||
|
||||
a_pids_length = len(a_pids)
|
||||
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
|
||||
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||
e_pids, a_pids_length,
|
||||
a_pids))
|
||||
|
||||
# If expected is a list, ensure at least one PID quantity match
|
||||
if isinstance(e_pids, list) and \
|
||||
a_pids_length not in e_pids:
|
||||
return fail_msg
|
||||
# If expected is not bool and not list,
|
||||
# ensure PID quantities match
|
||||
elif not isinstance(e_pids, bool) and \
|
||||
not isinstance(e_pids, list) and \
|
||||
a_pids_length != e_pids:
|
||||
return fail_msg
|
||||
# If expected is bool True, ensure 1 or more PIDs exist
|
||||
elif isinstance(e_pids, bool) and \
|
||||
e_pids is True and a_pids_length < 1:
|
||||
return fail_msg
|
||||
# If expected is bool False, ensure 0 PIDs exist
|
||||
elif isinstance(e_pids, bool) and \
|
||||
e_pids is False and a_pids_length != 0:
|
||||
return fail_msg
|
||||
else:
|
||||
self.log.debug('PID check OK: {} {} {}: '
|
||||
'{}'.format(e_sentry_name, e_proc_name,
|
||||
e_pids, a_pids))
|
||||
return None
|
||||
|
||||
def validate_list_of_identical_dicts(self, list_of_dicts):
|
||||
"""Check that all dicts within a list are identical."""
|
||||
hashes = []
|
||||
for _dict in list_of_dicts:
|
||||
hashes.append(hash(frozenset(_dict.items())))
|
||||
|
||||
self.log.debug('Hashes: {}'.format(hashes))
|
||||
if len(set(hashes)) == 1:
|
||||
self.log.debug('Dicts within list are identical')
|
||||
else:
|
||||
return 'Dicts within list are not identical'
|
||||
|
||||
return None
|
||||
|
||||
def validate_sectionless_conf(self, file_contents, expected):
|
||||
"""A crude conf parser. Useful to inspect configuration files which
|
||||
do not have section headers (as would be necessary in order to use
|
||||
the configparser). Such as openstack-dashboard or rabbitmq confs."""
|
||||
for line in file_contents.split('\n'):
|
||||
if '=' in line:
|
||||
args = line.split('=')
|
||||
if len(args) <= 1:
|
||||
continue
|
||||
key = args[0].strip()
|
||||
value = args[1].strip()
|
||||
if key in expected.keys():
|
||||
if expected[key] != value:
|
||||
msg = ('Config mismatch. Expected, actual: {}, '
|
||||
'{}'.format(expected[key], value))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
def get_unit_hostnames(self, units):
|
||||
"""Return a dict of juju unit names to hostnames."""
|
||||
host_names = {}
|
||||
for unit in units:
|
||||
host_names[unit.info['unit_name']] = \
|
||||
str(unit.file_contents('/etc/hostname').strip())
|
||||
self.log.debug('Unit host names: {}'.format(host_names))
|
||||
return host_names
|
||||
|
||||
def run_cmd_unit(self, sentry_unit, cmd):
|
||||
"""Run a command on a unit, return the output and exit code."""
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code == 0:
|
||||
self.log.debug('{} `{}` command returned {} '
|
||||
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code))
|
||||
else:
|
||||
msg = ('{} `{}` command returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
return str(output), code
|
||||
|
||||
def file_exists_on_unit(self, sentry_unit, file_name):
|
||||
"""Check if a file exists on a unit."""
|
||||
try:
|
||||
sentry_unit.file_stat(file_name)
|
||||
return True
|
||||
except IOError:
|
||||
return False
|
||||
except Exception as e:
|
||||
msg = 'Error checking file {}: {}'.format(file_name, e)
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
def file_contents_safe(self, sentry_unit, file_name,
|
||||
max_wait=60, fatal=False):
|
||||
"""Get file contents from a sentry unit. Wrap amulet file_contents
|
||||
with retry logic to address races where a file checks as existing,
|
||||
but no longer exists by the time file_contents is called.
|
||||
Return None if file not found. Optionally raise if fatal is True."""
|
||||
unit_name = sentry_unit.info['unit_name']
|
||||
file_contents = False
|
||||
tries = 0
|
||||
while not file_contents and tries < (max_wait / 4):
|
||||
try:
|
||||
file_contents = sentry_unit.file_contents(file_name)
|
||||
except IOError:
|
||||
self.log.debug('Attempt {} to open file {} from {} '
|
||||
'failed'.format(tries, file_name,
|
||||
unit_name))
|
||||
time.sleep(4)
|
||||
tries += 1
|
||||
|
||||
if file_contents:
|
||||
return file_contents
|
||||
elif not fatal:
|
||||
return None
|
||||
elif fatal:
|
||||
msg = 'Failed to get file contents from unit.'
|
||||
amulet.raise_status(amulet.FAIL, msg)
|
||||
|
||||
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
|
||||
"""Open a TCP socket to check for a listening sevice on a host.
|
||||
|
||||
:param host: host name or IP address, default to localhost
|
||||
:param port: TCP port number, default to 22
|
||||
:param timeout: Connect timeout, default to 15 seconds
|
||||
:returns: True if successful, False if connect failed
|
||||
"""
|
||||
|
||||
# Resolve host name if possible
|
||||
try:
|
||||
connect_host = socket.gethostbyname(host)
|
||||
host_human = "{} ({})".format(connect_host, host)
|
||||
except socket.error as e:
|
||||
self.log.warn('Unable to resolve address: '
|
||||
'{} ({}) Trying anyway!'.format(host, e))
|
||||
connect_host = host
|
||||
host_human = connect_host
|
||||
|
||||
# Attempt socket connection
|
||||
try:
|
||||
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
knock.settimeout(timeout)
|
||||
knock.connect((connect_host, port))
|
||||
knock.close()
|
||||
self.log.debug('Socket connect OK for host '
|
||||
'{} on port {}.'.format(host_human, port))
|
||||
return True
|
||||
except socket.error as e:
|
||||
self.log.debug('Socket connect FAIL for'
|
||||
' {} port {} ({})'.format(host_human, port, e))
|
||||
return False
|
||||
|
||||
def port_knock_units(self, sentry_units, port=22,
|
||||
timeout=15, expect_success=True):
|
||||
"""Open a TCP socket to check for a listening sevice on each
|
||||
listed juju unit.
|
||||
|
||||
:param sentry_units: list of sentry unit pointers
|
||||
:param port: TCP port number, default to 22
|
||||
:param timeout: Connect timeout, default to 15 seconds
|
||||
:expect_success: True by default, set False to invert logic
|
||||
:returns: None if successful, Failure message otherwise
|
||||
"""
|
||||
for unit in sentry_units:
|
||||
host = unit.info['public-address']
|
||||
connected = self.port_knock_tcp(host, port, timeout)
|
||||
if not connected and expect_success:
|
||||
return 'Socket connect failed.'
|
||||
elif connected and not expect_success:
|
||||
return 'Socket connected unexpectedly.'
|
||||
|
||||
def get_uuid_epoch_stamp(self):
|
||||
"""Returns a stamp string based on uuid4 and epoch time. Useful in
|
||||
generating test messages which need to be unique-ish."""
|
||||
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
||||
|
||||
# amulet juju action helpers:
|
||||
def run_action(self, unit_sentry, action,
|
||||
_check_output=subprocess.check_output,
|
||||
params=None):
|
||||
"""Translate to amulet's built in run_action(). Deprecated.
|
||||
|
||||
Run the named action on a given unit sentry.
|
||||
|
||||
params a dict of parameters to use
|
||||
_check_output parameter is no longer used
|
||||
|
||||
@return action_id.
|
||||
"""
|
||||
self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been '
|
||||
'deprecated for amulet.run_action')
|
||||
return unit_sentry.run_action(action, action_args=params)
|
||||
|
||||
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
||||
"""Wait for a given action, returning if it completed or not.
|
||||
|
||||
action_id a string action uuid
|
||||
_check_output parameter is no longer used
|
||||
"""
|
||||
data = amulet.actions.get_action_output(action_id, full_output=True)
|
||||
return data.get(u"status") == "completed"
|
||||
|
||||
def status_get(self, unit):
|
||||
"""Return the current service status of this unit."""
|
||||
raw_status, return_code = unit.run(
|
||||
"status-get --format=json --include-data")
|
||||
if return_code != 0:
|
||||
return ("unknown", "")
|
||||
status = json.loads(raw_status)
|
||||
return (status["status"], status["message"])
|
@ -1,252 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Copyright 2013 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
"""Charm Helpers ansible - declare the state of your machines.
|
||||
|
||||
This helper enables you to declare your machine state, rather than
|
||||
program it procedurally (and have to test each change to your procedures).
|
||||
Your install hook can be as simple as::
|
||||
|
||||
{{{
|
||||
import charmhelpers.contrib.ansible
|
||||
|
||||
|
||||
def install():
|
||||
charmhelpers.contrib.ansible.install_ansible_support()
|
||||
charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
|
||||
}}}
|
||||
|
||||
and won't need to change (nor will its tests) when you change the machine
|
||||
state.
|
||||
|
||||
All of your juju config and relation-data are available as template
|
||||
variables within your playbooks and templates. An install playbook looks
|
||||
something like::
|
||||
|
||||
{{{
|
||||
---
|
||||
- hosts: localhost
|
||||
user: root
|
||||
|
||||
tasks:
|
||||
- name: Add private repositories.
|
||||
template:
|
||||
src: ../templates/private-repositories.list.jinja2
|
||||
dest: /etc/apt/sources.list.d/private.list
|
||||
|
||||
- name: Update the cache.
|
||||
apt: update_cache=yes
|
||||
|
||||
- name: Install dependencies.
|
||||
apt: pkg={{ item }}
|
||||
with_items:
|
||||
- python-mimeparse
|
||||
- python-webob
|
||||
- sunburnt
|
||||
|
||||
- name: Setup groups.
|
||||
group: name={{ item.name }} gid={{ item.gid }}
|
||||
with_items:
|
||||
- { name: 'deploy_user', gid: 1800 }
|
||||
- { name: 'service_user', gid: 1500 }
|
||||
|
||||
...
|
||||
}}}
|
||||
|
||||
Read more online about `playbooks`_ and standard ansible `modules`_.
|
||||
|
||||
.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
|
||||
.. _modules: http://www.ansibleworks.com/docs/modules.html
|
||||
|
||||
A further feature os the ansible hooks is to provide a light weight "action"
|
||||
scripting tool. This is a decorator that you apply to a function, and that
|
||||
function can now receive cli args, and can pass extra args to the playbook.
|
||||
|
||||
e.g.
|
||||
|
||||
|
||||
@hooks.action()
|
||||
def some_action(amount, force="False"):
|
||||
"Usage: some-action AMOUNT [force=True]" # <-- shown on error
|
||||
# process the arguments
|
||||
# do some calls
|
||||
# return extra-vars to be passed to ansible-playbook
|
||||
return {
|
||||
'amount': int(amount),
|
||||
'type': force,
|
||||
}
|
||||
|
||||
You can now create a symlink to hooks.py that can be invoked like a hook, but
|
||||
with cli params:
|
||||
|
||||
# link actions/some-action to hooks/hooks.py
|
||||
|
||||
actions/some-action amount=10 force=true
|
||||
|
||||
"""
|
||||
import os
|
||||
import stat
|
||||
import subprocess
|
||||
import functools
|
||||
|
||||
import charmhelpers.contrib.templating.contexts
|
||||
import charmhelpers.core.host
|
||||
import charmhelpers.core.hookenv
|
||||
import charmhelpers.fetch
|
||||
|
||||
|
||||
charm_dir = os.environ.get('CHARM_DIR', '')
|
||||
ansible_hosts_path = '/etc/ansible/hosts'
|
||||
# Ansible will automatically include any vars in the following
|
||||
# file in its inventory when run locally.
|
||||
ansible_vars_path = '/etc/ansible/host_vars/localhost'
|
||||
|
||||
|
||||
def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'):
|
||||
"""Installs the ansible package.
|
||||
|
||||
By default it is installed from the `PPA`_ linked from
|
||||
the ansible `website`_ or from a ppa specified by a charm config..
|
||||
|
||||
.. _PPA: https://launchpad.net/~rquillo/+archive/ansible
|
||||
.. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
|
||||
|
||||
If from_ppa is empty, you must ensure that the package is available
|
||||
from a configured repository.
|
||||
"""
|
||||
if from_ppa:
|
||||
charmhelpers.fetch.add_source(ppa_location)
|
||||
charmhelpers.fetch.apt_update(fatal=True)
|
||||
charmhelpers.fetch.apt_install('ansible')
|
||||
with open(ansible_hosts_path, 'w+') as hosts_file:
|
||||
hosts_file.write('localhost ansible_connection=local ansible_remote_tmp=/root/.ansible/tmp')
|
||||
|
||||
|
||||
def apply_playbook(playbook, tags=None, extra_vars=None):
|
||||
tags = tags or []
|
||||
tags = ",".join(tags)
|
||||
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
||||
ansible_vars_path, namespace_separator='__',
|
||||
allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
|
||||
|
||||
# we want ansible's log output to be unbuffered
|
||||
env = os.environ.copy()
|
||||
env['PYTHONUNBUFFERED'] = "1"
|
||||
call = [
|
||||
'ansible-playbook',
|
||||
'-c',
|
||||
'local',
|
||||
playbook,
|
||||
]
|
||||
if tags:
|
||||
call.extend(['--tags', '{}'.format(tags)])
|
||||
if extra_vars:
|
||||
extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()]
|
||||
call.extend(['--extra-vars', " ".join(extra)])
|
||||
subprocess.check_call(call, env=env)
|
||||
|
||||
|
||||
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
|
||||
"""Run a playbook with the hook-name as the tag.
|
||||
|
||||
This helper builds on the standard hookenv.Hooks helper,
|
||||
but additionally runs the playbook with the hook-name specified
|
||||
using --tags (ie. running all the tasks tagged with the hook-name).
|
||||
|
||||
Example::
|
||||
|
||||
hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
|
||||
|
||||
# All the tasks within my_machine_state.yaml tagged with 'install'
|
||||
# will be run automatically after do_custom_work()
|
||||
@hooks.hook()
|
||||
def install():
|
||||
do_custom_work()
|
||||
|
||||
# For most of your hooks, you won't need to do anything other
|
||||
# than run the tagged tasks for the hook:
|
||||
@hooks.hook('config-changed', 'start', 'stop')
|
||||
def just_use_playbook():
|
||||
pass
|
||||
|
||||
# As a convenience, you can avoid the above noop function by specifying
|
||||
# the hooks which are handled by ansible-only and they'll be registered
|
||||
# for you:
|
||||
# hooks = AnsibleHooks(
|
||||
# 'playbooks/my_machine_state.yaml',
|
||||
# default_hooks=['config-changed', 'start', 'stop'])
|
||||
|
||||
if __name__ == "__main__":
|
||||
# execute a hook based on the name the program is called by
|
||||
hooks.execute(sys.argv)
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, playbook_path, default_hooks=None):
|
||||
"""Register any hooks handled by ansible."""
|
||||
super(AnsibleHooks, self).__init__()
|
||||
|
||||
self._actions = {}
|
||||
self.playbook_path = playbook_path
|
||||
|
||||
default_hooks = default_hooks or []
|
||||
|
||||
def noop(*args, **kwargs):
|
||||
pass
|
||||
|
||||
for hook in default_hooks:
|
||||
self.register(hook, noop)
|
||||
|
||||
def register_action(self, name, function):
|
||||
"""Register a hook"""
|
||||
self._actions[name] = function
|
||||
|
||||
def execute(self, args):
|
||||
"""Execute the hook followed by the playbook using the hook as tag."""
|
||||
hook_name = os.path.basename(args[0])
|
||||
extra_vars = None
|
||||
if hook_name in self._actions:
|
||||
extra_vars = self._actions[hook_name](args[1:])
|
||||
else:
|
||||
super(AnsibleHooks, self).execute(args)
|
||||
|
||||
charmhelpers.contrib.ansible.apply_playbook(
|
||||
self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
|
||||
|
||||
def action(self, *action_names):
|
||||
"""Decorator, registering them as actions"""
|
||||
def action_wrapper(decorated):
|
||||
|
||||
@functools.wraps(decorated)
|
||||
def wrapper(argv):
|
||||
kwargs = dict(arg.split('=') for arg in argv)
|
||||
try:
|
||||
return decorated(**kwargs)
|
||||
except TypeError as e:
|
||||
if decorated.__doc__:
|
||||
e.args += (decorated.__doc__,)
|
||||
raise
|
||||
|
||||
self.register_action(decorated.__name__, wrapper)
|
||||
if '_' in decorated.__name__:
|
||||
self.register_action(
|
||||
decorated.__name__.replace('_', '-'), wrapper)
|
||||
|
||||
return wrapper
|
||||
|
||||
return action_wrapper
|
@ -1,124 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import subprocess
|
||||
import time
|
||||
import os
|
||||
from distutils.spawn import find_executable
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
in_relation_hook,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
relation_get,
|
||||
)
|
||||
|
||||
|
||||
def action_set(key, val):
|
||||
if find_executable('action-set'):
|
||||
action_cmd = ['action-set']
|
||||
|
||||
if isinstance(val, dict):
|
||||
for k, v in iter(val.items()):
|
||||
action_set('%s.%s' % (key, k), v)
|
||||
return True
|
||||
|
||||
action_cmd.append('%s=%s' % (key, val))
|
||||
subprocess.check_call(action_cmd)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class Benchmark():
|
||||
"""
|
||||
Helper class for the `benchmark` interface.
|
||||
|
||||
:param list actions: Define the actions that are also benchmarks
|
||||
|
||||
From inside the benchmark-relation-changed hook, you would
|
||||
Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
|
||||
|
||||
Examples:
|
||||
|
||||
siege = Benchmark(['siege'])
|
||||
siege.start()
|
||||
[... run siege ...]
|
||||
# The higher the score, the better the benchmark
|
||||
siege.set_composite_score(16.70, 'trans/sec', 'desc')
|
||||
siege.finish()
|
||||
|
||||
|
||||
"""
|
||||
|
||||
BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
|
||||
|
||||
required_keys = [
|
||||
'hostname',
|
||||
'port',
|
||||
'graphite_port',
|
||||
'graphite_endpoint',
|
||||
'api_port'
|
||||
]
|
||||
|
||||
def __init__(self, benchmarks=None):
|
||||
if in_relation_hook():
|
||||
if benchmarks is not None:
|
||||
for rid in sorted(relation_ids('benchmark')):
|
||||
relation_set(relation_id=rid, relation_settings={
|
||||
'benchmarks': ",".join(benchmarks)
|
||||
})
|
||||
|
||||
# Check the relation data
|
||||
config = {}
|
||||
for key in self.required_keys:
|
||||
val = relation_get(key)
|
||||
if val is not None:
|
||||
config[key] = val
|
||||
else:
|
||||
# We don't have all of the required keys
|
||||
config = {}
|
||||
break
|
||||
|
||||
if len(config):
|
||||
with open(self.BENCHMARK_CONF, 'w') as f:
|
||||
for key, val in iter(config.items()):
|
||||
f.write("%s=%s\n" % (key, val))
|
||||
|
||||
@staticmethod
|
||||
def start():
|
||||
action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
||||
|
||||
"""
|
||||
If the collectd charm is also installed, tell it to send a snapshot
|
||||
of the current profile data.
|
||||
"""
|
||||
COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
|
||||
if os.path.exists(COLLECT_PROFILE_DATA):
|
||||
subprocess.check_output([COLLECT_PROFILE_DATA])
|
||||
|
||||
@staticmethod
|
||||
def finish():
|
||||
action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
||||
|
||||
@staticmethod
|
||||
def set_composite_score(value, units, direction='asc'):
|
||||
"""
|
||||
Set the composite score for a benchmark run. This is a single number
|
||||
representative of the benchmark results. This could be the most
|
||||
important metric, or an amalgamation of metric scores.
|
||||
"""
|
||||
return action_set(
|
||||
"meta.composite",
|
||||
{'value': value, 'units': units, 'direction': direction}
|
||||
)
|
@ -1,4 +0,0 @@
|
||||
Source lp:charm-tools/trunk
|
||||
|
||||
charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py
|
||||
charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py
|
@ -1,203 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import warnings
|
||||
warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa
|
||||
|
||||
import operator
|
||||
import tempfile
|
||||
import time
|
||||
import yaml
|
||||
import subprocess
|
||||
|
||||
import six
|
||||
if six.PY3:
|
||||
from urllib.request import urlopen
|
||||
from urllib.error import (HTTPError, URLError)
|
||||
else:
|
||||
from urllib2 import (urlopen, HTTPError, URLError)
|
||||
|
||||
"""Helper functions for writing Juju charms in Python."""
|
||||
|
||||
__metaclass__ = type
|
||||
__all__ = [
|
||||
# 'get_config', # core.hookenv.config()
|
||||
# 'log', # core.hookenv.log()
|
||||
# 'log_entry', # core.hookenv.log()
|
||||
# 'log_exit', # core.hookenv.log()
|
||||
# 'relation_get', # core.hookenv.relation_get()
|
||||
# 'relation_set', # core.hookenv.relation_set()
|
||||
# 'relation_ids', # core.hookenv.relation_ids()
|
||||
# 'relation_list', # core.hookenv.relation_units()
|
||||
# 'config_get', # core.hookenv.config()
|
||||
# 'unit_get', # core.hookenv.unit_get()
|
||||
# 'open_port', # core.hookenv.open_port()
|
||||
# 'close_port', # core.hookenv.close_port()
|
||||
# 'service_control', # core.host.service()
|
||||
'unit_info', # client-side, NOT IMPLEMENTED
|
||||
'wait_for_machine', # client-side, NOT IMPLEMENTED
|
||||
'wait_for_page_contents', # client-side, NOT IMPLEMENTED
|
||||
'wait_for_relation', # client-side, NOT IMPLEMENTED
|
||||
'wait_for_unit', # client-side, NOT IMPLEMENTED
|
||||
]
|
||||
|
||||
|
||||
SLEEP_AMOUNT = 0.1
|
||||
|
||||
|
||||
# We create a juju_status Command here because it makes testing much,
|
||||
# much easier.
|
||||
def juju_status():
|
||||
subprocess.check_call(['juju', 'status'])
|
||||
|
||||
# re-implemented as charmhelpers.fetch.configure_sources()
|
||||
# def configure_source(update=False):
|
||||
# source = config_get('source')
|
||||
# if ((source.startswith('ppa:') or
|
||||
# source.startswith('cloud:') or
|
||||
# source.startswith('http:'))):
|
||||
# run('add-apt-repository', source)
|
||||
# if source.startswith("http:"):
|
||||
# run('apt-key', 'import', config_get('key'))
|
||||
# if update:
|
||||
# run('apt-get', 'update')
|
||||
|
||||
|
||||
# DEPRECATED: client-side only
|
||||
def make_charm_config_file(charm_config):
|
||||
charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
|
||||
charm_config_file.write(yaml.dump(charm_config))
|
||||
charm_config_file.flush()
|
||||
# The NamedTemporaryFile instance is returned instead of just the name
|
||||
# because we want to take advantage of garbage collection-triggered
|
||||
# deletion of the temp file when it goes out of scope in the caller.
|
||||
return charm_config_file
|
||||
|
||||
|
||||
# DEPRECATED: client-side only
|
||||
def unit_info(service_name, item_name, data=None, unit=None):
|
||||
if data is None:
|
||||
data = yaml.safe_load(juju_status())
|
||||
service = data['services'].get(service_name)
|
||||
if service is None:
|
||||
# XXX 2012-02-08 gmb:
|
||||
# This allows us to cope with the race condition that we
|
||||
# have between deploying a service and having it come up in
|
||||
# `juju status`. We could probably do with cleaning it up so
|
||||
# that it fails a bit more noisily after a while.
|
||||
return ''
|
||||
units = service['units']
|
||||
if unit is not None:
|
||||
item = units[unit][item_name]
|
||||
else:
|
||||
# It might seem odd to sort the units here, but we do it to
|
||||
# ensure that when no unit is specified, the first unit for the
|
||||
# service (or at least the one with the lowest number) is the
|
||||
# one whose data gets returned.
|
||||
sorted_unit_names = sorted(units.keys())
|
||||
item = units[sorted_unit_names[0]][item_name]
|
||||
return item
|
||||
|
||||
|
||||
# DEPRECATED: client-side only
|
||||
def get_machine_data():
|
||||
return yaml.safe_load(juju_status())['machines']
|
||||
|
||||
|
||||
# DEPRECATED: client-side only
|
||||
def wait_for_machine(num_machines=1, timeout=300):
|
||||
"""Wait `timeout` seconds for `num_machines` machines to come up.
|
||||
|
||||
This wait_for... function can be called by other wait_for functions
|
||||
whose timeouts might be too short in situations where only a bare
|
||||
Juju setup has been bootstrapped.
|
||||
|
||||
:return: A tuple of (num_machines, time_taken). This is used for
|
||||
testing.
|
||||
"""
|
||||
# You may think this is a hack, and you'd be right. The easiest way
|
||||
# to tell what environment we're working in (LXC vs EC2) is to check
|
||||
# the dns-name of the first machine. If it's localhost we're in LXC
|
||||
# and we can just return here.
|
||||
if get_machine_data()[0]['dns-name'] == 'localhost':
|
||||
return 1, 0
|
||||
start_time = time.time()
|
||||
while True:
|
||||
# Drop the first machine, since it's the Zookeeper and that's
|
||||
# not a machine that we need to wait for. This will only work
|
||||
# for EC2 environments, which is why we return early above if
|
||||
# we're in LXC.
|
||||
machine_data = get_machine_data()
|
||||
non_zookeeper_machines = [
|
||||
machine_data[key] for key in list(machine_data.keys())[1:]]
|
||||
if len(non_zookeeper_machines) >= num_machines:
|
||||
all_machines_running = True
|
||||
for machine in non_zookeeper_machines:
|
||||
if machine.get('instance-state') != 'running':
|
||||
all_machines_running = False
|
||||
break
|
||||
if all_machines_running:
|
||||
break
|
||||
if time.time() - start_time >= timeout:
|
||||
raise RuntimeError('timeout waiting for service to start')
|
||||
time.sleep(SLEEP_AMOUNT)
|
||||
return num_machines, time.time() - start_time
|
||||
|
||||
|
||||
# DEPRECATED: client-side only
|
||||
def wait_for_unit(service_name, timeout=480):
|
||||
"""Wait `timeout` seconds for a given service name to come up."""
|
||||
wait_for_machine(num_machines=1)
|
||||
start_time = time.time()
|
||||
while True:
|
||||
state = unit_info(service_name, 'agent-state')
|
||||
if 'error' in state or state == 'started':
|
||||
break
|
||||
if time.time() - start_time >= timeout:
|
||||
raise RuntimeError('timeout waiting for service to start')
|
||||
time.sleep(SLEEP_AMOUNT)
|
||||
if state != 'started':
|
||||
raise RuntimeError('unit did not start, agent-state: ' + state)
|
||||
|
||||
|
||||
# DEPRECATED: client-side only
|
||||
def wait_for_relation(service_name, relation_name, timeout=120):
|
||||
"""Wait `timeout` seconds for a given relation to come up."""
|
||||
start_time = time.time()
|
||||
while True:
|
||||
relation = unit_info(service_name, 'relations').get(relation_name)
|
||||
if relation is not None and relation['state'] == 'up':
|
||||
break
|
||||
if time.time() - start_time >= timeout:
|
||||
raise RuntimeError('timeout waiting for relation to be up')
|
||||
time.sleep(SLEEP_AMOUNT)
|
||||
|
||||
|
||||
# DEPRECATED: client-side only
|
||||
def wait_for_page_contents(url, contents, timeout=120, validate=None):
|
||||
if validate is None:
|
||||
validate = operator.contains
|
||||
start_time = time.time()
|
||||
while True:
|
||||
try:
|
||||
stream = urlopen(url)
|
||||
except (HTTPError, URLError):
|
||||
pass
|
||||
else:
|
||||
page = stream.read()
|
||||
if validate(page, contents):
|
||||
return page
|
||||
if time.time() - start_time >= timeout:
|
||||
raise RuntimeError('timeout waiting for contents of ' + url)
|
||||
time.sleep(SLEEP_AMOUNT)
|
@ -1,14 +0,0 @@
|
||||
Source: lp:charmsupport/trunk
|
||||
|
||||
charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py
|
||||
charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py
|
||||
charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py
|
||||
charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py
|
||||
charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py
|
||||
|
||||
charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py
|
||||
charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py
|
||||
charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py
|
||||
charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py
|
||||
|
||||
charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport
|
@ -33,6 +33,7 @@ from charmhelpers.core.hookenv import (
|
||||
hook_name,
|
||||
local_unit,
|
||||
log,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
relations_of_type,
|
||||
@ -260,11 +261,23 @@ class NRPE(object):
|
||||
relation = relation_ids('nrpe-external-master')
|
||||
if relation:
|
||||
log("Setting charm primary status {}".format(primary))
|
||||
for rid in relation_ids('nrpe-external-master'):
|
||||
for rid in relation:
|
||||
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
|
||||
self.remove_check_queue = set()
|
||||
|
||||
def add_check(self, *args, **kwargs):
|
||||
shortname = None
|
||||
if kwargs.get('shortname') is None:
|
||||
if len(args) > 0:
|
||||
shortname = args[0]
|
||||
else:
|
||||
shortname = kwargs['shortname']
|
||||
|
||||
self.checks.append(Check(*args, **kwargs))
|
||||
try:
|
||||
self.remove_check_queue.remove(shortname)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def remove_check(self, *args, **kwargs):
|
||||
if kwargs.get('shortname') is None:
|
||||
@ -281,6 +294,7 @@ class NRPE(object):
|
||||
|
||||
check = Check(*args, **kwargs)
|
||||
check.remove(self.hostname)
|
||||
self.remove_check_queue.add(kwargs['shortname'])
|
||||
|
||||
def write(self):
|
||||
try:
|
||||
@ -313,8 +327,25 @@ class NRPE(object):
|
||||
monitor_ids = relation_ids("local-monitors") + \
|
||||
relation_ids("nrpe-external-master")
|
||||
for rid in monitor_ids:
|
||||
reldata = relation_get(unit=local_unit(), rid=rid)
|
||||
if 'monitors' in reldata:
|
||||
# update the existing set of monitors with the new data
|
||||
old_monitors = yaml.safe_load(reldata['monitors'])
|
||||
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
|
||||
# remove keys that are in the remove_check_queue
|
||||
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
|
||||
if k not in self.remove_check_queue}
|
||||
# update/add nrpe_monitors
|
||||
old_nrpe_monitors.update(nrpe_monitors)
|
||||
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
|
||||
# write back to the relation
|
||||
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
|
||||
else:
|
||||
# write a brand new set of monitors, as no existing ones.
|
||||
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
||||
|
||||
self.remove_check_queue.clear()
|
||||
|
||||
|
||||
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
||||
"""
|
||||
|
@ -1,11 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
@ -1,577 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Helper for working with a MySQL database"""
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import platform
|
||||
import os
|
||||
import glob
|
||||
import six
|
||||
|
||||
# from string import upper
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
CompareHostReleases,
|
||||
lsb_release,
|
||||
mkdir,
|
||||
pwgen,
|
||||
write_file
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
config as config_get,
|
||||
relation_get,
|
||||
related_units,
|
||||
unit_get,
|
||||
log,
|
||||
DEBUG,
|
||||
INFO,
|
||||
WARNING,
|
||||
leader_get,
|
||||
leader_set,
|
||||
is_leader,
|
||||
)
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
apt_update,
|
||||
filter_installed_packages,
|
||||
)
|
||||
from charmhelpers.contrib.network.ip import get_host_ip
|
||||
|
||||
try:
|
||||
import MySQLdb
|
||||
except ImportError:
|
||||
apt_update(fatal=True)
|
||||
if six.PY2:
|
||||
apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
|
||||
else:
|
||||
apt_install(filter_installed_packages(['python3-mysqldb']), fatal=True)
|
||||
import MySQLdb
|
||||
|
||||
|
||||
class MySQLSetPasswordError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MySQLHelper(object):
|
||||
|
||||
def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
|
||||
migrate_passwd_to_leader_storage=True,
|
||||
delete_ondisk_passwd_file=True):
|
||||
self.host = host
|
||||
# Password file path templates
|
||||
self.root_passwd_file_template = rpasswdf_template
|
||||
self.user_passwd_file_template = upasswdf_template
|
||||
|
||||
self.migrate_passwd_to_leader_storage = migrate_passwd_to_leader_storage
|
||||
# If we migrate we have the option to delete local copy of root passwd
|
||||
self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
|
||||
self.connection = None
|
||||
|
||||
def connect(self, user='root', password=None, host=None):
|
||||
if host is None:
|
||||
host = self.host
|
||||
log("Opening db connection for %s@%s" % (user, host), level=DEBUG)
|
||||
self.connection = MySQLdb.connect(user=user, host=host,
|
||||
passwd=password)
|
||||
|
||||
def database_exists(self, db_name):
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute("SHOW DATABASES")
|
||||
databases = [i[0] for i in cursor.fetchall()]
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
return db_name in databases
|
||||
|
||||
def create_database(self, db_name):
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute("CREATE DATABASE `{}` CHARACTER SET UTF8"
|
||||
.format(db_name))
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def grant_exists(self, db_name, db_user, remote_ip):
|
||||
cursor = self.connection.cursor()
|
||||
priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
|
||||
"TO '{}'@'{}'".format(db_name, db_user, remote_ip)
|
||||
try:
|
||||
cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
|
||||
remote_ip))
|
||||
grants = [i[0] for i in cursor.fetchall()]
|
||||
except MySQLdb.OperationalError:
|
||||
return False
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
# TODO: review for different grants
|
||||
return priv_string in grants
|
||||
|
||||
def create_grant(self, db_name, db_user, remote_ip, password):
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
# TODO: review for different grants
|
||||
cursor.execute("GRANT ALL PRIVILEGES ON `{}`.* TO '{}'@'{}' "
|
||||
"IDENTIFIED BY '{}'".format(db_name,
|
||||
db_user,
|
||||
remote_ip,
|
||||
password))
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def create_admin_grant(self, db_user, remote_ip, password):
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
|
||||
"IDENTIFIED BY '{}'".format(db_user,
|
||||
remote_ip,
|
||||
password))
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def cleanup_grant(self, db_user, remote_ip):
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute("DROP FROM mysql.user WHERE user='{}' "
|
||||
"AND HOST='{}'".format(db_user,
|
||||
remote_ip))
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def flush_priviledges(self):
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute("FLUSH PRIVILEGES")
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def execute(self, sql):
|
||||
"""Execute arbitary SQL against the database."""
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute(sql)
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def select(self, sql):
|
||||
"""
|
||||
Execute arbitrary SQL select query against the database
|
||||
and return the results.
|
||||
|
||||
:param sql: SQL select query to execute
|
||||
:type sql: string
|
||||
:returns: SQL select query result
|
||||
:rtype: list of lists
|
||||
:raises: MySQLdb.Error
|
||||
"""
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute(sql)
|
||||
results = [list(i) for i in cursor.fetchall()]
|
||||
finally:
|
||||
cursor.close()
|
||||
return results
|
||||
|
||||
def migrate_passwords_to_leader_storage(self, excludes=None):
|
||||
"""Migrate any passwords storage on disk to leader storage."""
|
||||
if not is_leader():
|
||||
log("Skipping password migration as not the lead unit",
|
||||
level=DEBUG)
|
||||
return
|
||||
dirname = os.path.dirname(self.root_passwd_file_template)
|
||||
path = os.path.join(dirname, '*.passwd')
|
||||
for f in glob.glob(path):
|
||||
if excludes and f in excludes:
|
||||
log("Excluding %s from leader storage migration" % (f),
|
||||
level=DEBUG)
|
||||
continue
|
||||
|
||||
key = os.path.basename(f)
|
||||
with open(f, 'r') as passwd:
|
||||
_value = passwd.read().strip()
|
||||
|
||||
try:
|
||||
leader_set(settings={key: _value})
|
||||
|
||||
if self.delete_ondisk_passwd_file:
|
||||
os.unlink(f)
|
||||
except ValueError:
|
||||
# NOTE cluster relation not yet ready - skip for now
|
||||
pass
|
||||
|
||||
def get_mysql_password_on_disk(self, username=None, password=None):
|
||||
"""Retrieve, generate or store a mysql password for the provided
|
||||
username on disk."""
|
||||
if username:
|
||||
template = self.user_passwd_file_template
|
||||
passwd_file = template.format(username)
|
||||
else:
|
||||
passwd_file = self.root_passwd_file_template
|
||||
|
||||
_password = None
|
||||
if os.path.exists(passwd_file):
|
||||
log("Using existing password file '%s'" % passwd_file, level=DEBUG)
|
||||
with open(passwd_file, 'r') as passwd:
|
||||
_password = passwd.read().strip()
|
||||
else:
|
||||
log("Generating new password file '%s'" % passwd_file, level=DEBUG)
|
||||
if not os.path.isdir(os.path.dirname(passwd_file)):
|
||||
# NOTE: need to ensure this is not mysql root dir (which needs
|
||||
# to be mysql readable)
|
||||
mkdir(os.path.dirname(passwd_file), owner='root', group='root',
|
||||
perms=0o770)
|
||||
# Force permissions - for some reason the chmod in makedirs
|
||||
# fails
|
||||
os.chmod(os.path.dirname(passwd_file), 0o770)
|
||||
|
||||
_password = password or pwgen(length=32)
|
||||
write_file(passwd_file, _password, owner='root', group='root',
|
||||
perms=0o660)
|
||||
|
||||
return _password
|
||||
|
||||
def passwd_keys(self, username):
|
||||
"""Generator to return keys used to store passwords in peer store.
|
||||
|
||||
NOTE: we support both legacy and new format to support mysql
|
||||
charm prior to refactor. This is necessary to avoid LP 1451890.
|
||||
"""
|
||||
keys = []
|
||||
if username == 'mysql':
|
||||
log("Bad username '%s'" % (username), level=WARNING)
|
||||
|
||||
if username:
|
||||
# IMPORTANT: *newer* format must be returned first
|
||||
keys.append('mysql-%s.passwd' % (username))
|
||||
keys.append('%s.passwd' % (username))
|
||||
else:
|
||||
keys.append('mysql.passwd')
|
||||
|
||||
for key in keys:
|
||||
yield key
|
||||
|
||||
def get_mysql_password(self, username=None, password=None):
|
||||
"""Retrieve, generate or store a mysql password for the provided
|
||||
username using peer relation cluster."""
|
||||
excludes = []
|
||||
|
||||
# First check peer relation.
|
||||
try:
|
||||
for key in self.passwd_keys(username):
|
||||
_password = leader_get(key)
|
||||
if _password:
|
||||
break
|
||||
|
||||
# If root password available don't update peer relation from local
|
||||
if _password and not username:
|
||||
excludes.append(self.root_passwd_file_template)
|
||||
|
||||
except ValueError:
|
||||
# cluster relation is not yet started; use on-disk
|
||||
_password = None
|
||||
|
||||
# If none available, generate new one
|
||||
if not _password:
|
||||
_password = self.get_mysql_password_on_disk(username, password)
|
||||
|
||||
# Put on wire if required
|
||||
if self.migrate_passwd_to_leader_storage:
|
||||
self.migrate_passwords_to_leader_storage(excludes=excludes)
|
||||
|
||||
return _password
|
||||
|
||||
def get_mysql_root_password(self, password=None):
|
||||
"""Retrieve or generate mysql root password for service units."""
|
||||
return self.get_mysql_password(username=None, password=password)
|
||||
|
||||
def set_mysql_password(self, username, password):
|
||||
"""Update a mysql password for the provided username changing the
|
||||
leader settings
|
||||
|
||||
To update root's password pass `None` in the username
|
||||
"""
|
||||
|
||||
if username is None:
|
||||
username = 'root'
|
||||
|
||||
# get root password via leader-get, it may be that in the past (when
|
||||
# changes to root-password were not supported) the user changed the
|
||||
# password, so leader-get is more reliable source than
|
||||
# config.previous('root-password').
|
||||
rel_username = None if username == 'root' else username
|
||||
cur_passwd = self.get_mysql_password(rel_username)
|
||||
|
||||
# password that needs to be set
|
||||
new_passwd = password
|
||||
|
||||
# update password for all users (e.g. root@localhost, root@::1, etc)
|
||||
try:
|
||||
self.connect(user=username, password=cur_passwd)
|
||||
cursor = self.connection.cursor()
|
||||
except MySQLdb.OperationalError as ex:
|
||||
raise MySQLSetPasswordError(('Cannot connect using password in '
|
||||
'leader settings (%s)') % ex, ex)
|
||||
|
||||
try:
|
||||
# NOTE(freyes): Due to skip-name-resolve root@$HOSTNAME account
|
||||
# fails when using SET PASSWORD so using UPDATE against the
|
||||
# mysql.user table is needed, but changes to this table are not
|
||||
# replicated across the cluster, so this update needs to run in
|
||||
# all the nodes. More info at
|
||||
# http://galeracluster.com/documentation-webpages/userchanges.html
|
||||
release = CompareHostReleases(lsb_release()['DISTRIB_CODENAME'])
|
||||
if release < 'bionic':
|
||||
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET password = "
|
||||
"PASSWORD( %s ) WHERE user = %s;")
|
||||
else:
|
||||
# PXC 5.7 (introduced in Bionic) uses authentication_string
|
||||
SQL_UPDATE_PASSWD = ("UPDATE mysql.user SET "
|
||||
"authentication_string = "
|
||||
"PASSWORD( %s ) WHERE user = %s;")
|
||||
cursor.execute(SQL_UPDATE_PASSWD, (new_passwd, username))
|
||||
cursor.execute('FLUSH PRIVILEGES;')
|
||||
self.connection.commit()
|
||||
except MySQLdb.OperationalError as ex:
|
||||
raise MySQLSetPasswordError('Cannot update password: %s' % str(ex),
|
||||
ex)
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
# check the password was changed
|
||||
try:
|
||||
self.connect(user=username, password=new_passwd)
|
||||
self.execute('select 1;')
|
||||
except MySQLdb.OperationalError as ex:
|
||||
raise MySQLSetPasswordError(('Cannot connect using new password: '
|
||||
'%s') % str(ex), ex)
|
||||
|
||||
if not is_leader():
|
||||
log('Only the leader can set a new password in the relation',
|
||||
level=DEBUG)
|
||||
return
|
||||
|
||||
for key in self.passwd_keys(rel_username):
|
||||
_password = leader_get(key)
|
||||
if _password:
|
||||
log('Updating password for %s (%s)' % (key, rel_username),
|
||||
level=DEBUG)
|
||||
leader_set(settings={key: new_passwd})
|
||||
|
||||
def set_mysql_root_password(self, password):
|
||||
self.set_mysql_password('root', password)
|
||||
|
||||
def normalize_address(self, hostname):
|
||||
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
|
||||
if config_get('prefer-ipv6'):
|
||||
# TODO: add support for ipv6 dns
|
||||
return hostname
|
||||
|
||||
if hostname != unit_get('private-address'):
|
||||
return get_host_ip(hostname, fallback=hostname)
|
||||
|
||||
# Otherwise assume localhost
|
||||
return '127.0.0.1'
|
||||
|
||||
def get_allowed_units(self, database, username, relation_id=None):
|
||||
"""Get list of units with access grants for database with username.
|
||||
|
||||
This is typically used to provide shared-db relations with a list of
|
||||
which units have been granted access to the given database.
|
||||
"""
|
||||
self.connect(password=self.get_mysql_root_password())
|
||||
allowed_units = set()
|
||||
for unit in related_units(relation_id):
|
||||
settings = relation_get(rid=relation_id, unit=unit)
|
||||
# First check for setting with prefix, then without
|
||||
for attr in ["%s_hostname" % (database), 'hostname']:
|
||||
hosts = settings.get(attr, None)
|
||||
if hosts:
|
||||
break
|
||||
|
||||
if hosts:
|
||||
# hostname can be json-encoded list of hostnames
|
||||
try:
|
||||
hosts = json.loads(hosts)
|
||||
except ValueError:
|
||||
hosts = [hosts]
|
||||
else:
|
||||
hosts = [settings['private-address']]
|
||||
|
||||
if hosts:
|
||||
for host in hosts:
|
||||
host = self.normalize_address(host)
|
||||
if self.grant_exists(database, username, host):
|
||||
log("Grant exists for host '%s' on db '%s'" %
|
||||
(host, database), level=DEBUG)
|
||||
if unit not in allowed_units:
|
||||
allowed_units.add(unit)
|
||||
else:
|
||||
log("Grant does NOT exist for host '%s' on db '%s'" %
|
||||
(host, database), level=DEBUG)
|
||||
else:
|
||||
log("No hosts found for grant check", level=INFO)
|
||||
|
||||
return allowed_units
|
||||
|
||||
def configure_db(self, hostname, database, username, admin=False):
|
||||
"""Configure access to database for username from hostname."""
|
||||
self.connect(password=self.get_mysql_root_password())
|
||||
if not self.database_exists(database):
|
||||
self.create_database(database)
|
||||
|
||||
remote_ip = self.normalize_address(hostname)
|
||||
password = self.get_mysql_password(username)
|
||||
if not self.grant_exists(database, username, remote_ip):
|
||||
if not admin:
|
||||
self.create_grant(database, username, remote_ip, password)
|
||||
else:
|
||||
self.create_admin_grant(username, remote_ip, password)
|
||||
self.flush_priviledges()
|
||||
|
||||
return password
|
||||
|
||||
|
||||
class PerconaClusterHelper(object):
|
||||
|
||||
# Going for the biggest page size to avoid wasted bytes.
|
||||
# InnoDB page size is 16MB
|
||||
|
||||
DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
|
||||
DEFAULT_INNODB_BUFFER_FACTOR = 0.50
|
||||
DEFAULT_INNODB_BUFFER_SIZE_MAX = 512 * 1024 * 1024
|
||||
|
||||
# Validation and lookups for InnoDB configuration
|
||||
INNODB_VALID_BUFFERING_VALUES = [
|
||||
'none',
|
||||
'inserts',
|
||||
'deletes',
|
||||
'changes',
|
||||
'purges',
|
||||
'all'
|
||||
]
|
||||
INNODB_FLUSH_CONFIG_VALUES = {
|
||||
'fast': 2,
|
||||
'safest': 1,
|
||||
'unsafe': 0,
|
||||
}
|
||||
|
||||
def human_to_bytes(self, human):
|
||||
"""Convert human readable configuration options to bytes."""
|
||||
num_re = re.compile('^[0-9]+$')
|
||||
if num_re.match(human):
|
||||
return human
|
||||
|
||||
factors = {
|
||||
'K': 1024,
|
||||
'M': 1048576,
|
||||
'G': 1073741824,
|
||||
'T': 1099511627776
|
||||
}
|
||||
modifier = human[-1]
|
||||
if modifier in factors:
|
||||
return int(human[:-1]) * factors[modifier]
|
||||
|
||||
if modifier == '%':
|
||||
total_ram = self.human_to_bytes(self.get_mem_total())
|
||||
if self.is_32bit_system() and total_ram > self.sys_mem_limit():
|
||||
total_ram = self.sys_mem_limit()
|
||||
factor = int(human[:-1]) * 0.01
|
||||
pctram = total_ram * factor
|
||||
return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
|
||||
|
||||
raise ValueError("Can only convert K,M,G, or T")
|
||||
|
||||
def is_32bit_system(self):
|
||||
"""Determine whether system is 32 or 64 bit."""
|
||||
try:
|
||||
return sys.maxsize < 2 ** 32
|
||||
except OverflowError:
|
||||
return False
|
||||
|
||||
def sys_mem_limit(self):
|
||||
"""Determine the default memory limit for the current service unit."""
|
||||
if platform.machine() in ['armv7l']:
|
||||
_mem_limit = self.human_to_bytes('2700M') # experimentally determined
|
||||
else:
|
||||
# Limit for x86 based 32bit systems
|
||||
_mem_limit = self.human_to_bytes('4G')
|
||||
|
||||
return _mem_limit
|
||||
|
||||
def get_mem_total(self):
|
||||
"""Calculate the total memory in the current service unit."""
|
||||
with open('/proc/meminfo') as meminfo_file:
|
||||
for line in meminfo_file:
|
||||
key, mem = line.split(':', 2)
|
||||
if key == 'MemTotal':
|
||||
mtot, modifier = mem.strip().split(' ')
|
||||
return '%s%s' % (mtot, modifier[0].upper())
|
||||
|
||||
def parse_config(self):
|
||||
"""Parse charm configuration and calculate values for config files."""
|
||||
config = config_get()
|
||||
mysql_config = {}
|
||||
if 'max-connections' in config:
|
||||
mysql_config['max_connections'] = config['max-connections']
|
||||
|
||||
if 'wait-timeout' in config:
|
||||
mysql_config['wait_timeout'] = config['wait-timeout']
|
||||
|
||||
if 'innodb-flush-log-at-trx-commit' in config:
|
||||
mysql_config['innodb_flush_log_at_trx_commit'] = \
|
||||
config['innodb-flush-log-at-trx-commit']
|
||||
elif 'tuning-level' in config:
|
||||
mysql_config['innodb_flush_log_at_trx_commit'] = \
|
||||
self.INNODB_FLUSH_CONFIG_VALUES.get(config['tuning-level'], 1)
|
||||
|
||||
if ('innodb-change-buffering' in config and
|
||||
config['innodb-change-buffering'] in self.INNODB_VALID_BUFFERING_VALUES):
|
||||
mysql_config['innodb_change_buffering'] = config['innodb-change-buffering']
|
||||
|
||||
if 'innodb-io-capacity' in config:
|
||||
mysql_config['innodb_io_capacity'] = config['innodb-io-capacity']
|
||||
|
||||
# Set a sane default key_buffer size
|
||||
mysql_config['key_buffer'] = self.human_to_bytes('32M')
|
||||
total_memory = self.human_to_bytes(self.get_mem_total())
|
||||
|
||||
dataset_bytes = config.get('dataset-size', None)
|
||||
innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None)
|
||||
|
||||
if innodb_buffer_pool_size:
|
||||
innodb_buffer_pool_size = self.human_to_bytes(
|
||||
innodb_buffer_pool_size)
|
||||
elif dataset_bytes:
|
||||
log("Option 'dataset-size' has been deprecated, please use"
|
||||
"innodb_buffer_pool_size option instead", level="WARN")
|
||||
innodb_buffer_pool_size = self.human_to_bytes(
|
||||
dataset_bytes)
|
||||
else:
|
||||
# NOTE(jamespage): pick the smallest of 50% of RAM or 512MB
|
||||
# to ensure that deployments in containers
|
||||
# without constraints don't try to consume
|
||||
# silly amounts of memory.
|
||||
innodb_buffer_pool_size = min(
|
||||
int(total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR),
|
||||
self.DEFAULT_INNODB_BUFFER_SIZE_MAX
|
||||
)
|
||||
|
||||
if innodb_buffer_pool_size > total_memory:
|
||||
log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
|
||||
innodb_buffer_pool_size,
|
||||
total_memory), level='WARN')
|
||||
|
||||
mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size
|
||||
return mysql_config
|
0
charmhelpers/contrib/hardening/defaults/__init__.py
Normal file
0
charmhelpers/contrib/hardening/defaults/__init__.py
Normal file
@ -1,13 +0,0 @@
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
@ -1,153 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||
|
||||
import six
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
apt_update,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
INFO,
|
||||
)
|
||||
|
||||
try:
|
||||
from netifaces import interfaces as network_interfaces
|
||||
except ImportError:
|
||||
if six.PY2:
|
||||
apt_install('python-netifaces')
|
||||
else:
|
||||
apt_install('python3-netifaces')
|
||||
from netifaces import interfaces as network_interfaces
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.core.kernel import modprobe
|
||||
|
||||
REQUIRED_MODULES = (
|
||||
"mlx4_ib",
|
||||
"mlx4_en",
|
||||
"mlx4_core",
|
||||
"ib_ipath",
|
||||
"ib_mthca",
|
||||
"ib_srpt",
|
||||
"ib_srp",
|
||||
"ib_ucm",
|
||||
"ib_isert",
|
||||
"ib_iser",
|
||||
"ib_ipoib",
|
||||
"ib_cm",
|
||||
"ib_uverbs"
|
||||
"ib_umad",
|
||||
"ib_sa",
|
||||
"ib_mad",
|
||||
"ib_core",
|
||||
"ib_addr",
|
||||
"rdma_ucm",
|
||||
)
|
||||
|
||||
REQUIRED_PACKAGES = (
|
||||
"ibutils",
|
||||
"infiniband-diags",
|
||||
"ibverbs-utils",
|
||||
)
|
||||
|
||||
IPOIB_DRIVERS = (
|
||||
"ib_ipoib",
|
||||
)
|
||||
|
||||
ABI_VERSION_FILE = "/sys/class/infiniband_mad/abi_version"
|
||||
|
||||
|
||||
class DeviceInfo(object):
|
||||
pass
|
||||
|
||||
|
||||
def install_packages():
|
||||
apt_update()
|
||||
apt_install(REQUIRED_PACKAGES, fatal=True)
|
||||
|
||||
|
||||
def load_modules():
|
||||
for module in REQUIRED_MODULES:
|
||||
modprobe(module, persist=True)
|
||||
|
||||
|
||||
def is_enabled():
|
||||
"""Check if infiniband is loaded on the system"""
|
||||
return os.path.exists(ABI_VERSION_FILE)
|
||||
|
||||
|
||||
def stat():
|
||||
"""Return full output of ibstat"""
|
||||
return subprocess.check_output(["ibstat"])
|
||||
|
||||
|
||||
def devices():
|
||||
"""Returns a list of IB enabled devices"""
|
||||
return subprocess.check_output(['ibstat', '-l']).splitlines()
|
||||
|
||||
|
||||
def device_info(device):
|
||||
"""Returns a DeviceInfo object with the current device settings"""
|
||||
|
||||
status = subprocess.check_output([
|
||||
'ibstat', device, '-s']).splitlines()
|
||||
|
||||
regexes = {
|
||||
"CA type: (.*)": "device_type",
|
||||
"Number of ports: (.*)": "num_ports",
|
||||
"Firmware version: (.*)": "fw_ver",
|
||||
"Hardware version: (.*)": "hw_ver",
|
||||
"Node GUID: (.*)": "node_guid",
|
||||
"System image GUID: (.*)": "sys_guid",
|
||||
}
|
||||
|
||||
device = DeviceInfo()
|
||||
|
||||
for line in status:
|
||||
for expression, key in regexes.items():
|
||||
matches = re.search(expression, line)
|
||||
if matches:
|
||||
setattr(device, key, matches.group(1))
|
||||
|
||||
return device
|
||||
|
||||
|
||||
def ipoib_interfaces():
|
||||
"""Return a list of IPOIB capable ethernet interfaces"""
|
||||
interfaces = []
|
||||
|
||||
for interface in network_interfaces():
|
||||
try:
|
||||
driver = re.search('^driver: (.+)$', subprocess.check_output([
|
||||
'ethtool', '-i',
|
||||
interface]), re.M).group(1)
|
||||
|
||||
if driver in IPOIB_DRIVERS:
|
||||
interfaces.append(interface)
|
||||
except Exception:
|
||||
log("Skipping interface %s" % interface, level=INFO)
|
||||
continue
|
||||
|
||||
return interfaces
|
@ -1,265 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
''' Helpers for interacting with OpenvSwitch '''
|
||||
import hashlib
|
||||
import subprocess
|
||||
import os
|
||||
import six
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log, WARNING, INFO, DEBUG
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
service
|
||||
)
|
||||
|
||||
BRIDGE_TEMPLATE = """\
|
||||
# This veth pair is required when neutron data-port is mapped to an existing linux bridge. lp:1635067
|
||||
|
||||
auto {linuxbridge_port}
|
||||
iface {linuxbridge_port} inet manual
|
||||
pre-up ip link add name {linuxbridge_port} type veth peer name {ovsbridge_port}
|
||||
pre-up ip link set {ovsbridge_port} master {bridge}
|
||||
pre-up ip link set {ovsbridge_port} up
|
||||
up ip link set {linuxbridge_port} up
|
||||
down ip link del {linuxbridge_port}
|
||||
"""
|
||||
|
||||
MAX_KERNEL_INTERFACE_NAME_LEN = 15
|
||||
|
||||
|
||||
def add_bridge(name, datapath_type=None):
|
||||
''' Add the named bridge to openvswitch '''
|
||||
log('Creating bridge {}'.format(name))
|
||||
cmd = ["ovs-vsctl", "--", "--may-exist", "add-br", name]
|
||||
if datapath_type is not None:
|
||||
cmd += ['--', 'set', 'bridge', name,
|
||||
'datapath_type={}'.format(datapath_type)]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def del_bridge(name):
|
||||
''' Delete the named bridge from openvswitch '''
|
||||
log('Deleting bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
||||
|
||||
|
||||
def add_bridge_port(name, port, promisc=False):
|
||||
''' Add a port to the named openvswitch bridge '''
|
||||
log('Adding port {} to bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
|
||||
name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "up"])
|
||||
if promisc:
|
||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
|
||||
else:
|
||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
||||
|
||||
|
||||
def del_bridge_port(name, port):
|
||||
''' Delete a port from the named openvswitch bridge '''
|
||||
log('Deleting port {} from bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
||||
name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "down"])
|
||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
||||
|
||||
|
||||
def add_ovsbridge_linuxbridge(name, bridge):
|
||||
''' Add linux bridge to the named openvswitch bridge
|
||||
:param name: Name of ovs bridge to be added to Linux bridge
|
||||
:param bridge: Name of Linux bridge to be added to ovs bridge
|
||||
:returns: True if veth is added between ovs bridge and linux bridge,
|
||||
False otherwise'''
|
||||
try:
|
||||
import netifaces
|
||||
except ImportError:
|
||||
if six.PY2:
|
||||
apt_install('python-netifaces', fatal=True)
|
||||
else:
|
||||
apt_install('python3-netifaces', fatal=True)
|
||||
import netifaces
|
||||
|
||||
# NOTE(jamespage):
|
||||
# Older code supported addition of a linuxbridge directly
|
||||
# to an OVS bridge; ensure we don't break uses on upgrade
|
||||
existing_ovs_bridge = port_to_br(bridge)
|
||||
if existing_ovs_bridge is not None:
|
||||
log('Linuxbridge {} is already directly in use'
|
||||
' by OVS bridge {}'.format(bridge, existing_ovs_bridge),
|
||||
level=INFO)
|
||||
return
|
||||
|
||||
# NOTE(jamespage):
|
||||
# preserve existing naming because interfaces may already exist.
|
||||
ovsbridge_port = "veth-" + name
|
||||
linuxbridge_port = "veth-" + bridge
|
||||
if (len(ovsbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN or
|
||||
len(linuxbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN):
|
||||
# NOTE(jamespage):
|
||||
# use parts of hashed bridgename (openstack style) when
|
||||
# a bridge name exceeds 15 chars
|
||||
hashed_bridge = hashlib.sha256(bridge.encode('UTF-8')).hexdigest()
|
||||
base = '{}-{}'.format(hashed_bridge[:8], hashed_bridge[-2:])
|
||||
ovsbridge_port = "cvo{}".format(base)
|
||||
linuxbridge_port = "cvb{}".format(base)
|
||||
|
||||
interfaces = netifaces.interfaces()
|
||||
for interface in interfaces:
|
||||
if interface == ovsbridge_port or interface == linuxbridge_port:
|
||||
log('Interface {} already exists'.format(interface), level=INFO)
|
||||
return
|
||||
|
||||
log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name),
|
||||
level=INFO)
|
||||
|
||||
check_for_eni_source()
|
||||
|
||||
with open('/etc/network/interfaces.d/{}.cfg'.format(
|
||||
linuxbridge_port), 'w') as config:
|
||||
config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port,
|
||||
ovsbridge_port=ovsbridge_port,
|
||||
bridge=bridge))
|
||||
|
||||
subprocess.check_call(["ifup", linuxbridge_port])
|
||||
add_bridge_port(name, linuxbridge_port)
|
||||
|
||||
|
||||
def is_linuxbridge_interface(port):
|
||||
''' Check if the interface is a linuxbridge bridge
|
||||
:param port: Name of an interface to check whether it is a Linux bridge
|
||||
:returns: True if port is a Linux bridge'''
|
||||
|
||||
if os.path.exists('/sys/class/net/' + port + '/bridge'):
|
||||
log('Interface {} is a Linux bridge'.format(port), level=DEBUG)
|
||||
return True
|
||||
else:
|
||||
log('Interface {} is not a Linux bridge'.format(port), level=DEBUG)
|
||||
return False
|
||||
|
||||
|
||||
def set_manager(manager):
|
||||
''' Set the controller for the local openvswitch '''
|
||||
log('Setting manager for local ovs to {}'.format(manager))
|
||||
subprocess.check_call(['ovs-vsctl', 'set-manager',
|
||||
'ssl:{}'.format(manager)])
|
||||
|
||||
|
||||
def set_Open_vSwitch_column_value(column_value):
|
||||
"""
|
||||
Calls ovs-vsctl and sets the 'column_value' in the Open_vSwitch table.
|
||||
|
||||
:param column_value:
|
||||
See http://www.openvswitch.org//ovs-vswitchd.conf.db.5.pdf for
|
||||
details of the relevant values.
|
||||
:type str
|
||||
:raises CalledProcessException: possibly ovsdb-server is not running
|
||||
"""
|
||||
log('Setting {} in the Open_vSwitch table'.format(column_value))
|
||||
subprocess.check_call(['ovs-vsctl', 'set', 'Open_vSwitch', '.', column_value])
|
||||
|
||||
|
||||
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
|
||||
|
||||
|
||||
def get_certificate():
|
||||
''' Read openvswitch certificate from disk '''
|
||||
if os.path.exists(CERT_PATH):
|
||||
log('Reading ovs certificate from {}'.format(CERT_PATH))
|
||||
with open(CERT_PATH, 'r') as cert:
|
||||
full_cert = cert.read()
|
||||
begin_marker = "-----BEGIN CERTIFICATE-----"
|
||||
end_marker = "-----END CERTIFICATE-----"
|
||||
begin_index = full_cert.find(begin_marker)
|
||||
end_index = full_cert.rfind(end_marker)
|
||||
if end_index == -1 or begin_index == -1:
|
||||
raise RuntimeError("Certificate does not contain valid begin"
|
||||
" and end markers.")
|
||||
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
|
||||
return full_cert
|
||||
else:
|
||||
log('Certificate not found', level=WARNING)
|
||||
return None
|
||||
|
||||
|
||||
def check_for_eni_source():
|
||||
''' Juju removes the source line when setting up interfaces,
|
||||
replace if missing '''
|
||||
|
||||
with open('/etc/network/interfaces', 'r') as eni:
|
||||
for line in eni:
|
||||
if line == 'source /etc/network/interfaces.d/*':
|
||||
return
|
||||
with open('/etc/network/interfaces', 'a') as eni:
|
||||
eni.write('\nsource /etc/network/interfaces.d/*')
|
||||
|
||||
|
||||
def full_restart():
|
||||
''' Full restart and reload of openvswitch '''
|
||||
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
|
||||
service('start', 'openvswitch-force-reload-kmod')
|
||||
else:
|
||||
service('force-reload-kmod', 'openvswitch-switch')
|
||||
|
||||
|
||||
def enable_ipfix(bridge, target,
|
||||
cache_active_timeout=60,
|
||||
cache_max_flows=128,
|
||||
sampling=64):
|
||||
'''Enable IPFIX on bridge to target.
|
||||
:param bridge: Bridge to monitor
|
||||
:param target: IPFIX remote endpoint
|
||||
:param cache_active_timeout: The maximum period in seconds for
|
||||
which an IPFIX flow record is cached
|
||||
and aggregated before being sent
|
||||
:param cache_max_flows: The maximum number of IPFIX flow records
|
||||
that can be cached at a time
|
||||
:param sampling: The rate at which packets should be sampled and
|
||||
sent to each target collector
|
||||
'''
|
||||
cmd = [
|
||||
'ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
|
||||
'--id=@i', 'create', 'IPFIX',
|
||||
'targets="{}"'.format(target),
|
||||
'sampling={}'.format(sampling),
|
||||
'cache_active_timeout={}'.format(cache_active_timeout),
|
||||
'cache_max_flows={}'.format(cache_max_flows),
|
||||
]
|
||||
log('Enabling IPfix on {}.'.format(bridge))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def disable_ipfix(bridge):
|
||||
'''Diable IPFIX on target bridge.
|
||||
:param bridge: Bridge to modify
|
||||
'''
|
||||
cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix']
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def port_to_br(port):
|
||||
'''Determine the bridge that contains a port
|
||||
:param port: Name of port to check for
|
||||
:returns str: OVS bridge containing port or None if not found
|
||||
'''
|
||||
try:
|
||||
return subprocess.check_output(
|
||||
['ovs-vsctl', 'port-to-br', port]
|
||||
).decode('UTF-8').strip()
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
@ -323,6 +323,23 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
else:
|
||||
return releases[self.series]
|
||||
|
||||
def get_percona_service_entry(self, memory_constraint=None):
|
||||
"""Return a amulet service entry for percona cluster.
|
||||
|
||||
:param memory_constraint: Override the default memory constraint
|
||||
in the service entry.
|
||||
:type memory_constraint: str
|
||||
:returns: Amulet service entry.
|
||||
:rtype: dict
|
||||
"""
|
||||
memory_constraint = memory_constraint or '3072M'
|
||||
svc_entry = {
|
||||
'name': 'percona-cluster',
|
||||
'constraints': {'mem': memory_constraint}}
|
||||
if self._get_openstack_release() <= self.trusty_mitaka:
|
||||
svc_entry['location'] = 'cs:trusty/percona-cluster'
|
||||
return svc_entry
|
||||
|
||||
def get_ceph_expected_pools(self, radosgw=False):
|
||||
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||
test scenario, based on OpenStack release and whether ceph radosgw
|
||||
|
@ -126,7 +126,11 @@ def _config_ini(path):
|
||||
:returns: Configuration contained in path
|
||||
:rtype: Dict
|
||||
"""
|
||||
conf = configparser.ConfigParser()
|
||||
# When strict is enabled, duplicate options are not allowed in the
|
||||
# parsed INI; however, Oslo allows duplicate values. This change
|
||||
# causes us to ignore the duplicate values which is acceptable as
|
||||
# long as we don't validate any multi-value options
|
||||
conf = configparser.ConfigParser(strict=False)
|
||||
conf.read(path)
|
||||
return dict(conf)
|
||||
|
||||
@ -204,7 +208,7 @@ def validate_file_ownership(config):
|
||||
"Invalid ownership configuration: {}".format(key))
|
||||
owner = options.get('owner', config.get('owner', 'root'))
|
||||
group = options.get('group', config.get('group', 'root'))
|
||||
optional = options.get('optional', config.get('optional', 'False'))
|
||||
optional = options.get('optional', config.get('optional', False))
|
||||
if '*' in file_name:
|
||||
for file in glob.glob(file_name):
|
||||
if file not in files.keys():
|
||||
@ -226,7 +230,7 @@ def validate_file_permissions(config):
|
||||
raise RuntimeError(
|
||||
"Invalid ownership configuration: {}".format(key))
|
||||
mode = options.get('mode', config.get('permissions', '600'))
|
||||
optional = options.get('optional', config.get('optional', 'False'))
|
||||
optional = options.get('optional', config.get('optional', False))
|
||||
if '*' in file_name:
|
||||
for file in glob.glob(file_name):
|
||||
if file not in files.keys():
|
||||
|
@ -258,7 +258,7 @@ class SharedDBContext(OSContextGenerator):
|
||||
'database_password': rdata.get(password_setting),
|
||||
'database_type': 'mysql+pymysql'
|
||||
}
|
||||
if CompareOpenStackReleases(rel) < 'stein':
|
||||
if CompareOpenStackReleases(rel) < 'queens':
|
||||
ctxt['database_type'] = 'mysql'
|
||||
if self.context_complete(ctxt):
|
||||
db_ssl(rdata, ctxt, self.ssl_dir)
|
||||
@ -443,8 +443,10 @@ class IdentityServiceContext(OSContextGenerator):
|
||||
'api_version': api_version})
|
||||
|
||||
if float(api_version) > 2:
|
||||
ctxt.update({'admin_domain_name':
|
||||
rdata.get('service_domain')})
|
||||
ctxt.update({
|
||||
'admin_domain_name': rdata.get('service_domain'),
|
||||
'service_project_id': rdata.get('service_tenant_id'),
|
||||
'service_domain_id': rdata.get('service_domain_id')})
|
||||
|
||||
# we keep all veriables in ctxt for compatibility and
|
||||
# add nested dictionary for keystone_authtoken generic
|
||||
|
@ -1 +0,0 @@
|
||||
openstack_https_frontend
|
@ -0,0 +1,29 @@
|
||||
{% if endpoints -%}
|
||||
{% for ext_port in ext_ports -%}
|
||||
Listen {{ ext_port }}
|
||||
{% endfor -%}
|
||||
{% for address, endpoint, ext, int in endpoints -%}
|
||||
<VirtualHost {{ address }}:{{ ext }}>
|
||||
ServerName {{ endpoint }}
|
||||
SSLEngine on
|
||||
SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
|
||||
SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
|
||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
||||
# See LP 1484489 - this is to support <= 2.4.7 and >= 2.4.8
|
||||
SSLCertificateChainFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
||||
ProxyPass / http://localhost:{{ int }}/
|
||||
ProxyPassReverse / http://localhost:{{ int }}/
|
||||
ProxyPreserveHost on
|
||||
RequestHeader set X-Forwarded-Proto "https"
|
||||
</VirtualHost>
|
||||
{% endfor -%}
|
||||
<Proxy *>
|
||||
Order deny,allow
|
||||
Allow from all
|
||||
</Proxy>
|
||||
<Location />
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</Location>
|
||||
{% endif -%}
|
@ -1 +0,0 @@
|
||||
wsgi-openstack-api.conf
|
@ -0,0 +1,91 @@
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
|
||||
{% if port -%}
|
||||
Listen {{ port }}
|
||||
{% endif -%}
|
||||
|
||||
{% if admin_port -%}
|
||||
Listen {{ admin_port }}
|
||||
{% endif -%}
|
||||
|
||||
{% if public_port -%}
|
||||
Listen {{ public_port }}
|
||||
{% endif -%}
|
||||
|
||||
{% if port -%}
|
||||
<VirtualHost *:{{ port }}>
|
||||
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}
|
||||
WSGIScriptAlias / {{ script }}
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIPassAuthorization On
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
<IfVersion < 2.4>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</IfVersion>
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
{% endif -%}
|
||||
|
||||
{% if admin_port -%}
|
||||
<VirtualHost *:{{ admin_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-admin
|
||||
WSGIScriptAlias / {{ admin_script }}
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIPassAuthorization On
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
<IfVersion < 2.4>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</IfVersion>
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
{% endif -%}
|
||||
|
||||
{% if public_port -%}
|
||||
<VirtualHost *:{{ public_port }}>
|
||||
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
|
||||
display-name=%{GROUP}
|
||||
WSGIProcessGroup {{ service_name }}-public
|
||||
WSGIScriptAlias / {{ public_script }}
|
||||
WSGIApplicationGroup %{GLOBAL}
|
||||
WSGIPassAuthorization On
|
||||
<IfVersion >= 2.4>
|
||||
ErrorLogFormat "%{cu}t %M"
|
||||
</IfVersion>
|
||||
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||
|
||||
<Directory /usr/bin>
|
||||
<IfVersion >= 2.4>
|
||||
Require all granted
|
||||
</IfVersion>
|
||||
<IfVersion < 2.4>
|
||||
Order allow,deny
|
||||
Allow from all
|
||||
</IfVersion>
|
||||
</Directory>
|
||||
</VirtualHost>
|
||||
{% endif -%}
|
@ -1,267 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
import six
|
||||
|
||||
from charmhelpers.core.hookenv import relation_id as current_relation_id
|
||||
from charmhelpers.core.hookenv import (
|
||||
is_relation_made,
|
||||
relation_ids,
|
||||
relation_get as _relation_get,
|
||||
local_unit,
|
||||
relation_set as _relation_set,
|
||||
leader_get as _leader_get,
|
||||
leader_set,
|
||||
is_leader,
|
||||
)
|
||||
|
||||
|
||||
"""
|
||||
This helper provides functions to support use of a peer relation
|
||||
for basic key/value storage, with the added benefit that all storage
|
||||
can be replicated across peer units.
|
||||
|
||||
Requirement to use:
|
||||
|
||||
To use this, the "peer_echo()" method has to be called form the peer
|
||||
relation's relation-changed hook:
|
||||
|
||||
@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
|
||||
def cluster_relation_changed():
|
||||
peer_echo()
|
||||
|
||||
Once this is done, you can use peer storage from anywhere:
|
||||
|
||||
@hooks.hook("some-hook")
|
||||
def some_hook():
|
||||
# You can store and retrieve key/values this way:
|
||||
if is_relation_made("cluster"): # from charmhelpers.core.hookenv
|
||||
# There are peers available so we can work with peer storage
|
||||
peer_store("mykey", "myvalue")
|
||||
value = peer_retrieve("mykey")
|
||||
print value
|
||||
else:
|
||||
print "No peers joind the relation, cannot share key/values :("
|
||||
"""
|
||||
|
||||
|
||||
def leader_get(attribute=None, rid=None):
|
||||
"""Wrapper to ensure that settings are migrated from the peer relation.
|
||||
|
||||
This is to support upgrading an environment that does not support
|
||||
Juju leadership election to one that does.
|
||||
|
||||
If a setting is not extant in the leader-get but is on the relation-get
|
||||
peer rel, it is migrated and marked as such so that it is not re-migrated.
|
||||
"""
|
||||
migration_key = '__leader_get_migrated_settings__'
|
||||
if not is_leader():
|
||||
return _leader_get(attribute=attribute)
|
||||
|
||||
settings_migrated = False
|
||||
leader_settings = _leader_get(attribute=attribute)
|
||||
previously_migrated = _leader_get(attribute=migration_key)
|
||||
|
||||
if previously_migrated:
|
||||
migrated = set(json.loads(previously_migrated))
|
||||
else:
|
||||
migrated = set([])
|
||||
|
||||
try:
|
||||
if migration_key in leader_settings:
|
||||
del leader_settings[migration_key]
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
if attribute:
|
||||
if attribute in migrated:
|
||||
return leader_settings
|
||||
|
||||
# If attribute not present in leader db, check if this unit has set
|
||||
# the attribute in the peer relation
|
||||
if not leader_settings:
|
||||
peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
|
||||
rid=rid)
|
||||
if peer_setting:
|
||||
leader_set(settings={attribute: peer_setting})
|
||||
leader_settings = peer_setting
|
||||
|
||||
if leader_settings:
|
||||
settings_migrated = True
|
||||
migrated.add(attribute)
|
||||
else:
|
||||
r_settings = _relation_get(unit=local_unit(), rid=rid)
|
||||
if r_settings:
|
||||
for key in set(r_settings.keys()).difference(migrated):
|
||||
# Leader setting wins
|
||||
if not leader_settings.get(key):
|
||||
leader_settings[key] = r_settings[key]
|
||||
|
||||
settings_migrated = True
|
||||
migrated.add(key)
|
||||
|
||||
if settings_migrated:
|
||||
leader_set(**leader_settings)
|
||||
|
||||
if migrated and settings_migrated:
|
||||
migrated = json.dumps(list(migrated))
|
||||
leader_set(settings={migration_key: migrated})
|
||||
|
||||
return leader_settings
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||
"""Attempt to use leader-set if supported in the current version of Juju,
|
||||
otherwise falls back on relation-set.
|
||||
|
||||
Note that we only attempt to use leader-set if the provided relation_id is
|
||||
a peer relation id or no relation id is provided (in which case we assume
|
||||
we are within the peer relation context).
|
||||
"""
|
||||
try:
|
||||
if relation_id in relation_ids('cluster'):
|
||||
return leader_set(settings=relation_settings, **kwargs)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
except NotImplementedError:
|
||||
return _relation_set(relation_id=relation_id,
|
||||
relation_settings=relation_settings, **kwargs)
|
||||
|
||||
|
||||
def relation_get(attribute=None, unit=None, rid=None):
|
||||
"""Attempt to use leader-get if supported in the current version of Juju,
|
||||
otherwise falls back on relation-get.
|
||||
|
||||
Note that we only attempt to use leader-get if the provided rid is a peer
|
||||
relation id or no relation id is provided (in which case we assume we are
|
||||
within the peer relation context).
|
||||
"""
|
||||
try:
|
||||
if rid in relation_ids('cluster'):
|
||||
return leader_get(attribute, rid)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
except NotImplementedError:
|
||||
return _relation_get(attribute=attribute, rid=rid, unit=unit)
|
||||
|
||||
|
||||
def peer_retrieve(key, relation_name='cluster'):
|
||||
"""Retrieve a named key from peer relation `relation_name`."""
|
||||
cluster_rels = relation_ids(relation_name)
|
||||
if len(cluster_rels) > 0:
|
||||
cluster_rid = cluster_rels[0]
|
||||
return relation_get(attribute=key, rid=cluster_rid,
|
||||
unit=local_unit())
|
||||
else:
|
||||
raise ValueError('Unable to detect'
|
||||
'peer relation {}'.format(relation_name))
|
||||
|
||||
|
||||
def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
|
||||
inc_list=None, exc_list=None):
|
||||
""" Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
|
||||
inc_list = inc_list if inc_list else []
|
||||
exc_list = exc_list if exc_list else []
|
||||
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
|
||||
matched = {}
|
||||
if peerdb_settings is None:
|
||||
return matched
|
||||
for k, v in peerdb_settings.items():
|
||||
full_prefix = prefix + delimiter
|
||||
if k.startswith(full_prefix):
|
||||
new_key = k.replace(full_prefix, '')
|
||||
if new_key in exc_list:
|
||||
continue
|
||||
if new_key in inc_list or len(inc_list) == 0:
|
||||
matched[new_key] = v
|
||||
return matched
|
||||
|
||||
|
||||
def peer_store(key, value, relation_name='cluster'):
|
||||
"""Store the key/value pair on the named peer relation `relation_name`."""
|
||||
cluster_rels = relation_ids(relation_name)
|
||||
if len(cluster_rels) > 0:
|
||||
cluster_rid = cluster_rels[0]
|
||||
relation_set(relation_id=cluster_rid,
|
||||
relation_settings={key: value})
|
||||
else:
|
||||
raise ValueError('Unable to detect '
|
||||
'peer relation {}'.format(relation_name))
|
||||
|
||||
|
||||
def peer_echo(includes=None, force=False):
|
||||
"""Echo filtered attributes back onto the same relation for storage.
|
||||
|
||||
This is a requirement to use the peerstorage module - it needs to be called
|
||||
from the peer relation's changed hook.
|
||||
|
||||
If Juju leader support exists this will be a noop unless force is True.
|
||||
"""
|
||||
try:
|
||||
is_leader()
|
||||
except NotImplementedError:
|
||||
pass
|
||||
else:
|
||||
if not force:
|
||||
return # NOOP if leader-election is supported
|
||||
|
||||
# Use original non-leader calls
|
||||
relation_get = _relation_get
|
||||
relation_set = _relation_set
|
||||
|
||||
rdata = relation_get()
|
||||
echo_data = {}
|
||||
if includes is None:
|
||||
echo_data = rdata.copy()
|
||||
for ex in ['private-address', 'public-address']:
|
||||
if ex in echo_data:
|
||||
echo_data.pop(ex)
|
||||
else:
|
||||
for attribute, value in six.iteritems(rdata):
|
||||
for include in includes:
|
||||
if include in attribute:
|
||||
echo_data[attribute] = value
|
||||
if len(echo_data) > 0:
|
||||
relation_set(relation_settings=echo_data)
|
||||
|
||||
|
||||
def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
|
||||
peer_store_fatal=False, relation_settings=None,
|
||||
delimiter='_', **kwargs):
|
||||
"""Store passed-in arguments both in argument relation and in peer storage.
|
||||
|
||||
It functions like doing relation_set() and peer_store() at the same time,
|
||||
with the same data.
|
||||
|
||||
@param relation_id: the id of the relation to store the data on. Defaults
|
||||
to the current relation.
|
||||
@param peer_store_fatal: Set to True, the function will raise an exception
|
||||
should the peer sotrage not be avialable."""
|
||||
|
||||
relation_settings = relation_settings if relation_settings else {}
|
||||
relation_set(relation_id=relation_id,
|
||||
relation_settings=relation_settings,
|
||||
**kwargs)
|
||||
if is_relation_made(peer_relation_name):
|
||||
for key, value in six.iteritems(dict(list(kwargs.items()) +
|
||||
list(relation_settings.items()))):
|
||||
key_prefix = relation_id or current_relation_id()
|
||||
peer_store(key_prefix + delimiter + key,
|
||||
value,
|
||||
relation_name=peer_relation_name)
|
||||
else:
|
||||
if peer_store_fatal:
|
||||
raise ValueError('Unable to detect '
|
||||
'peer relation {}'.format(peer_relation_name))
|
@ -1,116 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Charm Helpers saltstack - declare the state of your machines.
|
||||
|
||||
This helper enables you to declare your machine state, rather than
|
||||
program it procedurally (and have to test each change to your procedures).
|
||||
Your install hook can be as simple as::
|
||||
|
||||
{{{
|
||||
from charmhelpers.contrib.saltstack import (
|
||||
install_salt_support,
|
||||
update_machine_state,
|
||||
)
|
||||
|
||||
|
||||
def install():
|
||||
install_salt_support()
|
||||
update_machine_state('machine_states/dependencies.yaml')
|
||||
update_machine_state('machine_states/installed.yaml')
|
||||
}}}
|
||||
|
||||
and won't need to change (nor will its tests) when you change the machine
|
||||
state.
|
||||
|
||||
It's using a python package called salt-minion which allows various formats for
|
||||
specifying resources, such as::
|
||||
|
||||
{{{
|
||||
/srv/{{ basedir }}:
|
||||
file.directory:
|
||||
- group: ubunet
|
||||
- user: ubunet
|
||||
- require:
|
||||
- user: ubunet
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
ubunet:
|
||||
group.present:
|
||||
- gid: 1500
|
||||
user.present:
|
||||
- uid: 1500
|
||||
- gid: 1500
|
||||
- createhome: False
|
||||
- require:
|
||||
- group: ubunet
|
||||
}}}
|
||||
|
||||
The docs for all the different state definitions are at:
|
||||
http://docs.saltstack.com/ref/states/all/
|
||||
|
||||
|
||||
TODO:
|
||||
* Add test helpers which will ensure that machine state definitions
|
||||
are functionally (but not necessarily logically) correct (ie. getting
|
||||
salt to parse all state defs.
|
||||
* Add a link to a public bootstrap charm example / blogpost.
|
||||
* Find a way to obviate the need to use the grains['charm_dir'] syntax
|
||||
in templates.
|
||||
"""
|
||||
# Copyright 2013 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
import subprocess
|
||||
|
||||
import charmhelpers.contrib.templating.contexts
|
||||
import charmhelpers.core.host
|
||||
import charmhelpers.core.hookenv
|
||||
|
||||
|
||||
salt_grains_path = '/etc/salt/grains'
|
||||
|
||||
|
||||
def install_salt_support(from_ppa=True):
|
||||
"""Installs the salt-minion helper for machine state.
|
||||
|
||||
By default the salt-minion package is installed from
|
||||
the saltstack PPA. If from_ppa is False you must ensure
|
||||
that the salt-minion package is available in the apt cache.
|
||||
"""
|
||||
if from_ppa:
|
||||
subprocess.check_call([
|
||||
'/usr/bin/add-apt-repository',
|
||||
'--yes',
|
||||
'ppa:saltstack/salt',
|
||||
])
|
||||
subprocess.check_call(['/usr/bin/apt-get', 'update'])
|
||||
# We install salt-common as salt-minion would run the salt-minion
|
||||
# daemon.
|
||||
charmhelpers.fetch.apt_install('salt-common')
|
||||
|
||||
|
||||
def update_machine_state(state_path):
|
||||
"""Update the machine state using the provided state declaration."""
|
||||
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
||||
salt_grains_path)
|
||||
subprocess.check_call([
|
||||
'salt-call',
|
||||
'--local',
|
||||
'state.template',
|
||||
state_path,
|
||||
])
|
@ -1,92 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import subprocess
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
|
||||
"""Generate selfsigned SSL keypair
|
||||
|
||||
You must provide one of the 3 optional arguments:
|
||||
config, subject or cn
|
||||
If more than one is provided the leftmost will be used
|
||||
|
||||
Arguments:
|
||||
keyfile -- (required) full path to the keyfile to be created
|
||||
certfile -- (required) full path to the certfile to be created
|
||||
keysize -- (optional) SSL key length
|
||||
config -- (optional) openssl configuration file
|
||||
subject -- (optional) dictionary with SSL subject variables
|
||||
cn -- (optional) cerfificate common name
|
||||
|
||||
Required keys in subject dict:
|
||||
cn -- Common name (eq. FQDN)
|
||||
|
||||
Optional keys in subject dict
|
||||
country -- Country Name (2 letter code)
|
||||
state -- State or Province Name (full name)
|
||||
locality -- Locality Name (eg, city)
|
||||
organization -- Organization Name (eg, company)
|
||||
organizational_unit -- Organizational Unit Name (eg, section)
|
||||
email -- Email Address
|
||||
"""
|
||||
|
||||
cmd = []
|
||||
if config:
|
||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
||||
"-keyout", keyfile,
|
||||
"-out", certfile, "-config", config]
|
||||
elif subject:
|
||||
ssl_subject = ""
|
||||
if "country" in subject:
|
||||
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
|
||||
if "state" in subject:
|
||||
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
|
||||
if "locality" in subject:
|
||||
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
|
||||
if "organization" in subject:
|
||||
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
|
||||
if "organizational_unit" in subject:
|
||||
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
|
||||
if "cn" in subject:
|
||||
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
|
||||
else:
|
||||
hookenv.log("When using \"subject\" argument you must "
|
||||
"provide \"cn\" field at very least")
|
||||
return False
|
||||
if "email" in subject:
|
||||
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
|
||||
|
||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
||||
"-keyout", keyfile,
|
||||
"-out", certfile, "-subj", ssl_subject]
|
||||
elif cn:
|
||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
||||
"-keyout", keyfile,
|
||||
"-out", certfile, "-subj", "/CN={}".format(cn)]
|
||||
|
||||
if not cmd:
|
||||
hookenv.log("No config, subject or cn provided,"
|
||||
"unable to generate self signed SSL certificates")
|
||||
return False
|
||||
try:
|
||||
subprocess.check_call(cmd)
|
||||
return True
|
||||
except Exception as e:
|
||||
print("Execution of openssl command failed:\n{}".format(e))
|
||||
return False
|
@ -1,277 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from os.path import join as path_join
|
||||
from os.path import exists
|
||||
import subprocess
|
||||
|
||||
from charmhelpers.core.hookenv import log, DEBUG
|
||||
|
||||
STD_CERT = "standard"
|
||||
|
||||
# Mysql server is fairly picky about cert creation
|
||||
# and types, spec its creation separately for now.
|
||||
MYSQL_CERT = "mysql"
|
||||
|
||||
|
||||
class ServiceCA(object):
|
||||
|
||||
default_expiry = str(365 * 2)
|
||||
default_ca_expiry = str(365 * 6)
|
||||
|
||||
def __init__(self, name, ca_dir, cert_type=STD_CERT):
|
||||
self.name = name
|
||||
self.ca_dir = ca_dir
|
||||
self.cert_type = cert_type
|
||||
|
||||
###############
|
||||
# Hook Helper API
|
||||
@staticmethod
|
||||
def get_ca(type=STD_CERT):
|
||||
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
||||
ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
|
||||
ca = ServiceCA(service_name, ca_path, type)
|
||||
ca.init()
|
||||
return ca
|
||||
|
||||
@classmethod
|
||||
def get_service_cert(cls, type=STD_CERT):
|
||||
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
||||
ca = cls.get_ca()
|
||||
crt, key = ca.get_or_create_cert(service_name)
|
||||
return crt, key, ca.get_ca_bundle()
|
||||
|
||||
###############
|
||||
|
||||
def init(self):
|
||||
log("initializing service ca", level=DEBUG)
|
||||
if not exists(self.ca_dir):
|
||||
self._init_ca_dir(self.ca_dir)
|
||||
self._init_ca()
|
||||
|
||||
@property
|
||||
def ca_key(self):
|
||||
return path_join(self.ca_dir, 'private', 'cacert.key')
|
||||
|
||||
@property
|
||||
def ca_cert(self):
|
||||
return path_join(self.ca_dir, 'cacert.pem')
|
||||
|
||||
@property
|
||||
def ca_conf(self):
|
||||
return path_join(self.ca_dir, 'ca.cnf')
|
||||
|
||||
@property
|
||||
def signing_conf(self):
|
||||
return path_join(self.ca_dir, 'signing.cnf')
|
||||
|
||||
def _init_ca_dir(self, ca_dir):
|
||||
os.mkdir(ca_dir)
|
||||
for i in ['certs', 'crl', 'newcerts', 'private']:
|
||||
sd = path_join(ca_dir, i)
|
||||
if not exists(sd):
|
||||
os.mkdir(sd)
|
||||
|
||||
if not exists(path_join(ca_dir, 'serial')):
|
||||
with open(path_join(ca_dir, 'serial'), 'w') as fh:
|
||||
fh.write('02\n')
|
||||
|
||||
if not exists(path_join(ca_dir, 'index.txt')):
|
||||
with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
|
||||
fh.write('')
|
||||
|
||||
def _init_ca(self):
|
||||
"""Generate the root ca's cert and key.
|
||||
"""
|
||||
if not exists(path_join(self.ca_dir, 'ca.cnf')):
|
||||
with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
|
||||
fh.write(
|
||||
CA_CONF_TEMPLATE % (self.get_conf_variables()))
|
||||
|
||||
if not exists(path_join(self.ca_dir, 'signing.cnf')):
|
||||
with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
|
||||
fh.write(
|
||||
SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
|
||||
|
||||
if exists(self.ca_cert) or exists(self.ca_key):
|
||||
raise RuntimeError("Initialized called when CA already exists")
|
||||
cmd = ['openssl', 'req', '-config', self.ca_conf,
|
||||
'-x509', '-nodes', '-newkey', 'rsa',
|
||||
'-days', self.default_ca_expiry,
|
||||
'-keyout', self.ca_key, '-out', self.ca_cert,
|
||||
'-outform', 'PEM']
|
||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
log("CA Init:\n %s" % output, level=DEBUG)
|
||||
|
||||
def get_conf_variables(self):
|
||||
return dict(
|
||||
org_name="juju",
|
||||
org_unit_name="%s service" % self.name,
|
||||
common_name=self.name,
|
||||
ca_dir=self.ca_dir)
|
||||
|
||||
def get_or_create_cert(self, common_name):
|
||||
if common_name in self:
|
||||
return self.get_certificate(common_name)
|
||||
return self.create_certificate(common_name)
|
||||
|
||||
def create_certificate(self, common_name):
|
||||
if common_name in self:
|
||||
return self.get_certificate(common_name)
|
||||
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
||||
csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name)
|
||||
self._create_certificate(common_name, key_p, csr_p, crt_p)
|
||||
return self.get_certificate(common_name)
|
||||
|
||||
def get_certificate(self, common_name):
|
||||
if common_name not in self:
|
||||
raise ValueError("No certificate for %s" % common_name)
|
||||
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
||||
with open(crt_p) as fh:
|
||||
crt = fh.read()
|
||||
with open(key_p) as fh:
|
||||
key = fh.read()
|
||||
return crt, key
|
||||
|
||||
def __contains__(self, common_name):
|
||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
||||
return exists(crt_p)
|
||||
|
||||
def _create_certificate(self, common_name, key_p, csr_p, crt_p):
|
||||
template_vars = self.get_conf_variables()
|
||||
template_vars['common_name'] = common_name
|
||||
subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
|
||||
template_vars)
|
||||
|
||||
log("CA Create Cert %s" % common_name, level=DEBUG)
|
||||
cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
|
||||
'-nodes', '-days', self.default_expiry,
|
||||
'-keyout', key_p, '-out', csr_p, '-subj', subj]
|
||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||
cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
|
||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||
|
||||
log("CA Sign Cert %s" % common_name, level=DEBUG)
|
||||
if self.cert_type == MYSQL_CERT:
|
||||
cmd = ['openssl', 'x509', '-req',
|
||||
'-in', csr_p, '-days', self.default_expiry,
|
||||
'-CA', self.ca_cert, '-CAkey', self.ca_key,
|
||||
'-set_serial', '01', '-out', crt_p]
|
||||
else:
|
||||
cmd = ['openssl', 'ca', '-config', self.signing_conf,
|
||||
'-extensions', 'req_extensions',
|
||||
'-days', self.default_expiry, '-notext',
|
||||
'-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
|
||||
log("running %s" % " ".join(cmd), level=DEBUG)
|
||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||
|
||||
def get_ca_bundle(self):
|
||||
with open(self.ca_cert) as fh:
|
||||
return fh.read()
|
||||
|
||||
|
||||
CA_CONF_TEMPLATE = """
|
||||
[ ca ]
|
||||
default_ca = CA_default
|
||||
|
||||
[ CA_default ]
|
||||
dir = %(ca_dir)s
|
||||
policy = policy_match
|
||||
database = $dir/index.txt
|
||||
serial = $dir/serial
|
||||
certs = $dir/certs
|
||||
crl_dir = $dir/crl
|
||||
new_certs_dir = $dir/newcerts
|
||||
certificate = $dir/cacert.pem
|
||||
private_key = $dir/private/cacert.key
|
||||
RANDFILE = $dir/private/.rand
|
||||
default_md = default
|
||||
|
||||
[ req ]
|
||||
default_bits = 1024
|
||||
default_md = sha1
|
||||
|
||||
prompt = no
|
||||
distinguished_name = ca_distinguished_name
|
||||
|
||||
x509_extensions = ca_extensions
|
||||
|
||||
[ ca_distinguished_name ]
|
||||
organizationName = %(org_name)s
|
||||
organizationalUnitName = %(org_unit_name)s Certificate Authority
|
||||
|
||||
|
||||
[ policy_match ]
|
||||
countryName = optional
|
||||
stateOrProvinceName = optional
|
||||
organizationName = match
|
||||
organizationalUnitName = optional
|
||||
commonName = supplied
|
||||
|
||||
[ ca_extensions ]
|
||||
basicConstraints = critical,CA:true
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid:always, issuer
|
||||
keyUsage = cRLSign, keyCertSign
|
||||
"""
|
||||
|
||||
|
||||
SIGNING_CONF_TEMPLATE = """
|
||||
[ ca ]
|
||||
default_ca = CA_default
|
||||
|
||||
[ CA_default ]
|
||||
dir = %(ca_dir)s
|
||||
policy = policy_match
|
||||
database = $dir/index.txt
|
||||
serial = $dir/serial
|
||||
certs = $dir/certs
|
||||
crl_dir = $dir/crl
|
||||
new_certs_dir = $dir/newcerts
|
||||
certificate = $dir/cacert.pem
|
||||
private_key = $dir/private/cacert.key
|
||||
RANDFILE = $dir/private/.rand
|
||||
default_md = default
|
||||
|
||||
[ req ]
|
||||
default_bits = 1024
|
||||
default_md = sha1
|
||||
|
||||
prompt = no
|
||||
distinguished_name = req_distinguished_name
|
||||
|
||||
x509_extensions = req_extensions
|
||||
|
||||
[ req_distinguished_name ]
|
||||
organizationName = %(org_name)s
|
||||
organizationalUnitName = %(org_unit_name)s machine resources
|
||||
commonName = %(common_name)s
|
||||
|
||||
[ policy_match ]
|
||||
countryName = optional
|
||||
stateOrProvinceName = optional
|
||||
organizationName = match
|
||||
organizationalUnitName = optional
|
||||
commonName = supplied
|
||||
|
||||
[ req_extensions ]
|
||||
basicConstraints = CA:false
|
||||
subjectKeyIdentifier = hash
|
||||
authorityKeyIdentifier = keyid:always, issuer
|
||||
keyUsage = digitalSignature, keyEncipherment, keyAgreement
|
||||
extendedKeyUsage = serverAuth, clientAuth
|
||||
"""
|
@ -1482,6 +1482,21 @@ def send_request_if_needed(request, relation='ceph'):
|
||||
relation_set(relation_id=rid, broker_req=request.request)
|
||||
|
||||
|
||||
def has_broker_rsp(rid=None, unit=None):
|
||||
"""Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data.
|
||||
|
||||
:param rid: The relation to check (default of None means current relation)
|
||||
:type rid: Union[str, None]
|
||||
:param unit: The remote unit to check (default of None means current unit)
|
||||
:type unit: Union[str, None]
|
||||
:returns: True if broker key exists and is set to something 'truthy'
|
||||
:rtype: bool
|
||||
"""
|
||||
rdata = relation_get(rid=rid, unit=unit) or {}
|
||||
broker_rsp = rdata.get(get_broker_rsp_key())
|
||||
return True if broker_rsp else False
|
||||
|
||||
|
||||
def is_broker_action_done(action, rid=None, unit=None):
|
||||
"""Check whether broker action has completed yet.
|
||||
|
||||
|
@ -1,13 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
@ -1,137 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Copyright 2013 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
"""A helper to create a yaml cache of config with namespaced relation data."""
|
||||
import os
|
||||
import yaml
|
||||
|
||||
import six
|
||||
|
||||
import charmhelpers.core.hookenv
|
||||
|
||||
|
||||
charm_dir = os.environ.get('CHARM_DIR', '')
|
||||
|
||||
|
||||
def dict_keys_without_hyphens(a_dict):
|
||||
"""Return the a new dict with underscores instead of hyphens in keys."""
|
||||
return dict(
|
||||
(key.replace('-', '_'), val) for key, val in a_dict.items())
|
||||
|
||||
|
||||
def update_relations(context, namespace_separator=':'):
|
||||
"""Update the context with the relation data."""
|
||||
# Add any relation data prefixed with the relation type.
|
||||
relation_type = charmhelpers.core.hookenv.relation_type()
|
||||
relations = []
|
||||
context['current_relation'] = {}
|
||||
if relation_type is not None:
|
||||
relation_data = charmhelpers.core.hookenv.relation_get()
|
||||
context['current_relation'] = relation_data
|
||||
# Deprecated: the following use of relation data as keys
|
||||
# directly in the context will be removed.
|
||||
relation_data = dict(
|
||||
("{relation_type}{namespace_separator}{key}".format(
|
||||
relation_type=relation_type,
|
||||
key=key,
|
||||
namespace_separator=namespace_separator), val)
|
||||
for key, val in relation_data.items())
|
||||
relation_data = dict_keys_without_hyphens(relation_data)
|
||||
context.update(relation_data)
|
||||
relations = charmhelpers.core.hookenv.relations_of_type(relation_type)
|
||||
relations = [dict_keys_without_hyphens(rel) for rel in relations]
|
||||
|
||||
context['relations_full'] = charmhelpers.core.hookenv.relations()
|
||||
|
||||
# the hookenv.relations() data structure is effectively unusable in
|
||||
# templates and other contexts when trying to access relation data other
|
||||
# than the current relation. So provide a more useful structure that works
|
||||
# with any hook.
|
||||
local_unit = charmhelpers.core.hookenv.local_unit()
|
||||
relations = {}
|
||||
for rname, rids in context['relations_full'].items():
|
||||
relations[rname] = []
|
||||
for rid, rdata in rids.items():
|
||||
data = rdata.copy()
|
||||
if local_unit in rdata:
|
||||
data.pop(local_unit)
|
||||
for unit_name, rel_data in data.items():
|
||||
new_data = {'__relid__': rid, '__unit__': unit_name}
|
||||
new_data.update(rel_data)
|
||||
relations[rname].append(new_data)
|
||||
context['relations'] = relations
|
||||
|
||||
|
||||
def juju_state_to_yaml(yaml_path, namespace_separator=':',
|
||||
allow_hyphens_in_keys=True, mode=None):
|
||||
"""Update the juju config and state in a yaml file.
|
||||
|
||||
This includes any current relation-get data, and the charm
|
||||
directory.
|
||||
|
||||
This function was created for the ansible and saltstack
|
||||
support, as those libraries can use a yaml file to supply
|
||||
context to templates, but it may be useful generally to
|
||||
create and update an on-disk cache of all the config, including
|
||||
previous relation data.
|
||||
|
||||
By default, hyphens are allowed in keys as this is supported
|
||||
by yaml, but for tools like ansible, hyphens are not valid [1].
|
||||
|
||||
[1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
|
||||
"""
|
||||
config = charmhelpers.core.hookenv.config()
|
||||
|
||||
# Add the charm_dir which we will need to refer to charm
|
||||
# file resources etc.
|
||||
config['charm_dir'] = charm_dir
|
||||
config['local_unit'] = charmhelpers.core.hookenv.local_unit()
|
||||
config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
|
||||
config['unit_public_address'] = charmhelpers.core.hookenv.unit_get(
|
||||
'public-address'
|
||||
)
|
||||
|
||||
# Don't use non-standard tags for unicode which will not
|
||||
# work when salt uses yaml.load_safe.
|
||||
yaml.add_representer(six.text_type,
|
||||
lambda dumper, value: dumper.represent_scalar(
|
||||
six.u('tag:yaml.org,2002:str'), value))
|
||||
|
||||
yaml_dir = os.path.dirname(yaml_path)
|
||||
if not os.path.exists(yaml_dir):
|
||||
os.makedirs(yaml_dir)
|
||||
|
||||
if os.path.exists(yaml_path):
|
||||
with open(yaml_path, "r") as existing_vars_file:
|
||||
existing_vars = yaml.load(existing_vars_file.read())
|
||||
else:
|
||||
with open(yaml_path, "w+"):
|
||||
pass
|
||||
existing_vars = {}
|
||||
|
||||
if mode is not None:
|
||||
os.chmod(yaml_path, mode)
|
||||
|
||||
if not allow_hyphens_in_keys:
|
||||
config = dict_keys_without_hyphens(config)
|
||||
existing_vars.update(config)
|
||||
|
||||
update_relations(existing_vars, namespace_separator)
|
||||
|
||||
with open(yaml_path, "w+") as fp:
|
||||
fp.write(yaml.dump(existing_vars, default_flow_style=False))
|
@ -1,38 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Templating using the python-jinja2 package.
|
||||
"""
|
||||
import six
|
||||
from charmhelpers.fetch import apt_install, apt_update
|
||||
try:
|
||||
import jinja2
|
||||
except ImportError:
|
||||
apt_update(fatal=True)
|
||||
if six.PY3:
|
||||
apt_install(["python3-jinja2"], fatal=True)
|
||||
else:
|
||||
apt_install(["python-jinja2"], fatal=True)
|
||||
import jinja2
|
||||
|
||||
|
||||
DEFAULT_TEMPLATES_DIR = 'templates'
|
||||
|
||||
|
||||
def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR):
|
||||
templates = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(template_dir))
|
||||
template = templates.get_template(template_name)
|
||||
return template.render(context)
|
@ -1,27 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''
|
||||
Templating using standard Python str.format() method.
|
||||
'''
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def render(template, extra={}, **kwargs):
|
||||
"""Return the template rendered using Python's str.format()."""
|
||||
context = hookenv.execution_environment()
|
||||
context.update(extra)
|
||||
context.update(kwargs)
|
||||
return template.format(**context)
|
@ -1,314 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Easy file synchronization among peer units using ssh + unison.
|
||||
#
|
||||
# For the -joined, -changed, and -departed peer relations, add a call to
|
||||
# ssh_authorized_peers() describing the peer relation and the desired
|
||||
# user + group. After all peer relations have settled, all hosts should
|
||||
# be able to connect to on another via key auth'd ssh as the specified user.
|
||||
#
|
||||
# Other hooks are then free to synchronize files and directories using
|
||||
# sync_to_peers().
|
||||
#
|
||||
# For a peer relation named 'cluster', for example:
|
||||
#
|
||||
# cluster-relation-joined:
|
||||
# ...
|
||||
# ssh_authorized_peers(peer_interface='cluster',
|
||||
# user='juju_ssh', group='juju_ssh',
|
||||
# ensure_local_user=True)
|
||||
# ...
|
||||
#
|
||||
# cluster-relation-changed:
|
||||
# ...
|
||||
# ssh_authorized_peers(peer_interface='cluster',
|
||||
# user='juju_ssh', group='juju_ssh',
|
||||
# ensure_local_user=True)
|
||||
# ...
|
||||
#
|
||||
# cluster-relation-departed:
|
||||
# ...
|
||||
# ssh_authorized_peers(peer_interface='cluster',
|
||||
# user='juju_ssh', group='juju_ssh',
|
||||
# ensure_local_user=True)
|
||||
# ...
|
||||
#
|
||||
# Hooks are now free to sync files as easily as:
|
||||
#
|
||||
# files = ['/etc/fstab', '/etc/apt.conf.d/']
|
||||
# sync_to_peers(peer_interface='cluster',
|
||||
# user='juju_ssh, paths=[files])
|
||||
#
|
||||
# It is assumed the charm itself has setup permissions on each unit
|
||||
# such that 'juju_ssh' has read + write permissions. Also assumed
|
||||
# that the calling charm takes care of leader delegation.
|
||||
#
|
||||
# Additionally files can be synchronized only to an specific unit:
|
||||
# sync_to_peer(slave_address, user='juju_ssh',
|
||||
# paths=[files], verbose=False)
|
||||
|
||||
import os
|
||||
import pwd
|
||||
|
||||
from copy import copy
|
||||
from subprocess import check_call, check_output
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
adduser,
|
||||
add_user_to_group,
|
||||
pwgen,
|
||||
remove_password_expiry,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
hook_name,
|
||||
relation_ids,
|
||||
related_units,
|
||||
relation_set,
|
||||
relation_get,
|
||||
unit_private_ip,
|
||||
INFO,
|
||||
ERROR,
|
||||
)
|
||||
|
||||
BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
|
||||
'-fastcheck=true', '-group=false', '-owner=false',
|
||||
'-prefer=newer', '-times=true']
|
||||
|
||||
|
||||
def get_homedir(user):
|
||||
try:
|
||||
user = pwd.getpwnam(user)
|
||||
return user.pw_dir
|
||||
except KeyError:
|
||||
log('Could not get homedir for user %s: user exists?' % (user), ERROR)
|
||||
raise Exception
|
||||
|
||||
|
||||
def create_private_key(user, priv_key_path, key_type='rsa'):
|
||||
types_bits = {
|
||||
'rsa': '2048',
|
||||
'ecdsa': '521',
|
||||
}
|
||||
if key_type not in types_bits:
|
||||
log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR)
|
||||
key_type = 'rsa'
|
||||
if not os.path.isfile(priv_key_path):
|
||||
log('Generating new SSH key for user %s.' % user)
|
||||
cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type,
|
||||
'-b', types_bits[key_type], '-f', priv_key_path]
|
||||
check_call(cmd)
|
||||
else:
|
||||
log('SSH key already exists at %s.' % priv_key_path)
|
||||
check_call(['chown', user, priv_key_path])
|
||||
check_call(['chmod', '0600', priv_key_path])
|
||||
|
||||
|
||||
def create_public_key(user, priv_key_path, pub_key_path):
|
||||
if not os.path.isfile(pub_key_path):
|
||||
log('Generating missing ssh public key @ %s.' % pub_key_path)
|
||||
cmd = ['ssh-keygen', '-y', '-f', priv_key_path]
|
||||
p = check_output(cmd).strip()
|
||||
with open(pub_key_path, 'wb') as out:
|
||||
out.write(p)
|
||||
check_call(['chown', user, pub_key_path])
|
||||
|
||||
|
||||
def get_keypair(user):
|
||||
home_dir = get_homedir(user)
|
||||
ssh_dir = os.path.join(home_dir, '.ssh')
|
||||
priv_key = os.path.join(ssh_dir, 'id_rsa')
|
||||
pub_key = '%s.pub' % priv_key
|
||||
|
||||
if not os.path.isdir(ssh_dir):
|
||||
os.mkdir(ssh_dir)
|
||||
check_call(['chown', '-R', user, ssh_dir])
|
||||
|
||||
create_private_key(user, priv_key)
|
||||
create_public_key(user, priv_key, pub_key)
|
||||
|
||||
with open(priv_key, 'r') as p:
|
||||
_priv = p.read().strip()
|
||||
|
||||
with open(pub_key, 'r') as p:
|
||||
_pub = p.read().strip()
|
||||
|
||||
return (_priv, _pub)
|
||||
|
||||
|
||||
def write_authorized_keys(user, keys):
|
||||
home_dir = get_homedir(user)
|
||||
ssh_dir = os.path.join(home_dir, '.ssh')
|
||||
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
|
||||
log('Syncing authorized_keys @ %s.' % auth_keys)
|
||||
with open(auth_keys, 'w') as out:
|
||||
for k in keys:
|
||||
out.write('%s\n' % k)
|
||||
|
||||
|
||||
def write_known_hosts(user, hosts):
|
||||
home_dir = get_homedir(user)
|
||||
ssh_dir = os.path.join(home_dir, '.ssh')
|
||||
known_hosts = os.path.join(ssh_dir, 'known_hosts')
|
||||
khosts = []
|
||||
for host in hosts:
|
||||
cmd = ['ssh-keyscan', host]
|
||||
remote_key = check_output(cmd, universal_newlines=True).strip()
|
||||
khosts.append(remote_key)
|
||||
log('Syncing known_hosts @ %s.' % known_hosts)
|
||||
with open(known_hosts, 'w') as out:
|
||||
for host in khosts:
|
||||
out.write('%s\n' % host)
|
||||
|
||||
|
||||
def ensure_user(user, group=None):
|
||||
adduser(user, pwgen())
|
||||
if group:
|
||||
add_user_to_group(user, group)
|
||||
# Remove password expiry (Bug #1686085)
|
||||
remove_password_expiry(user)
|
||||
|
||||
|
||||
def ssh_authorized_peers(peer_interface, user, group=None,
|
||||
ensure_local_user=False):
|
||||
"""
|
||||
Main setup function, should be called from both peer -changed and -joined
|
||||
hooks with the same parameters.
|
||||
"""
|
||||
if ensure_local_user:
|
||||
ensure_user(user, group)
|
||||
priv_key, pub_key = get_keypair(user)
|
||||
hook = hook_name()
|
||||
if hook == '%s-relation-joined' % peer_interface:
|
||||
relation_set(ssh_pub_key=pub_key)
|
||||
elif hook == '%s-relation-changed' % peer_interface or \
|
||||
hook == '%s-relation-departed' % peer_interface:
|
||||
hosts = []
|
||||
keys = []
|
||||
|
||||
for r_id in relation_ids(peer_interface):
|
||||
for unit in related_units(r_id):
|
||||
ssh_pub_key = relation_get('ssh_pub_key',
|
||||
rid=r_id,
|
||||
unit=unit)
|
||||
priv_addr = relation_get('private-address',
|
||||
rid=r_id,
|
||||
unit=unit)
|
||||
if ssh_pub_key:
|
||||
keys.append(ssh_pub_key)
|
||||
hosts.append(priv_addr)
|
||||
else:
|
||||
log('ssh_authorized_peers(): ssh_pub_key '
|
||||
'missing for unit %s, skipping.' % unit)
|
||||
write_authorized_keys(user, keys)
|
||||
write_known_hosts(user, hosts)
|
||||
authed_hosts = ':'.join(hosts)
|
||||
relation_set(ssh_authorized_hosts=authed_hosts)
|
||||
|
||||
|
||||
def _run_as_user(user, gid=None):
|
||||
try:
|
||||
user = pwd.getpwnam(user)
|
||||
except KeyError:
|
||||
log('Invalid user: %s' % user)
|
||||
raise Exception
|
||||
uid = user.pw_uid
|
||||
gid = gid or user.pw_gid
|
||||
os.environ['HOME'] = user.pw_dir
|
||||
|
||||
def _inner():
|
||||
os.setgid(gid)
|
||||
os.setuid(uid)
|
||||
return _inner
|
||||
|
||||
|
||||
def run_as_user(user, cmd, gid=None):
|
||||
return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/')
|
||||
|
||||
|
||||
def collect_authed_hosts(peer_interface):
|
||||
'''Iterate through the units on peer interface to find all that
|
||||
have the calling host in its authorized hosts list'''
|
||||
hosts = []
|
||||
for r_id in (relation_ids(peer_interface) or []):
|
||||
for unit in related_units(r_id):
|
||||
private_addr = relation_get('private-address',
|
||||
rid=r_id, unit=unit)
|
||||
authed_hosts = relation_get('ssh_authorized_hosts',
|
||||
rid=r_id, unit=unit)
|
||||
|
||||
if not authed_hosts:
|
||||
log('Peer %s has not authorized *any* hosts yet, skipping.' %
|
||||
(unit), level=INFO)
|
||||
continue
|
||||
|
||||
if unit_private_ip() in authed_hosts.split(':'):
|
||||
hosts.append(private_addr)
|
||||
else:
|
||||
log('Peer %s has not authorized *this* host yet, skipping.' %
|
||||
(unit), level=INFO)
|
||||
return hosts
|
||||
|
||||
|
||||
def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None,
|
||||
fatal=False):
|
||||
"""Sync path to an specific peer host
|
||||
|
||||
Propagates exception if operation fails and fatal=True.
|
||||
"""
|
||||
cmd = cmd or copy(BASE_CMD)
|
||||
if not verbose:
|
||||
cmd.append('-silent')
|
||||
|
||||
# removing trailing slash from directory paths, unison
|
||||
# doesn't like these.
|
||||
if path.endswith('/'):
|
||||
path = path[:(len(path) - 1)]
|
||||
|
||||
cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
|
||||
|
||||
try:
|
||||
log('Syncing local path %s to %s@%s:%s' % (path, user, host, path))
|
||||
run_as_user(user, cmd, gid)
|
||||
except Exception:
|
||||
log('Error syncing remote files')
|
||||
if fatal:
|
||||
raise
|
||||
|
||||
|
||||
def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None,
|
||||
fatal=False):
|
||||
"""Sync paths to an specific peer host
|
||||
|
||||
Propagates exception if any operation fails and fatal=True.
|
||||
"""
|
||||
if paths:
|
||||
for p in paths:
|
||||
sync_path_to_host(p, host, user, verbose, cmd, gid, fatal)
|
||||
|
||||
|
||||
def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None,
|
||||
gid=None, fatal=False):
|
||||
"""Sync all hosts to an specific path
|
||||
|
||||
The type of group is integer, it allows user has permissions to
|
||||
operate a directory have a different group id with the user id.
|
||||
|
||||
Propagates exception if any operation fails and fatal=True.
|
||||
"""
|
||||
if paths:
|
||||
for host in collect_authed_hosts(peer_interface):
|
||||
sync_to_peer(host, user, paths, verbose, cmd, gid, fatal)
|
@ -1,606 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
'''
|
||||
The coordinator module allows you to use Juju's leadership feature to
|
||||
coordinate operations between units of a service.
|
||||
|
||||
Behavior is defined in subclasses of coordinator.BaseCoordinator.
|
||||
One implementation is provided (coordinator.Serial), which allows an
|
||||
operation to be run on a single unit at a time, on a first come, first
|
||||
served basis. You can trivially define more complex behavior by
|
||||
subclassing BaseCoordinator or Serial.
|
||||
|
||||
:author: Stuart Bishop <stuart.bishop@canonical.com>
|
||||
|
||||
|
||||
Services Framework Usage
|
||||
========================
|
||||
|
||||
Ensure a peers relation is defined in metadata.yaml. Instantiate a
|
||||
BaseCoordinator subclass before invoking ServiceManager.manage().
|
||||
Ensure that ServiceManager.manage() is wired up to the leader-elected,
|
||||
leader-settings-changed, peers relation-changed and peers
|
||||
relation-departed hooks in addition to any other hooks you need, or your
|
||||
service will deadlock.
|
||||
|
||||
Ensure calls to acquire() are guarded, so that locks are only requested
|
||||
when they are really needed (and thus hooks only triggered when necessary).
|
||||
Failing to do this and calling acquire() unconditionally will put your unit
|
||||
into a hook loop. Calls to granted() do not need to be guarded.
|
||||
|
||||
For example::
|
||||
|
||||
from charmhelpers.core import hookenv, services
|
||||
from charmhelpers import coordinator
|
||||
|
||||
def maybe_restart(servicename):
|
||||
serial = coordinator.Serial()
|
||||
if needs_restart():
|
||||
serial.acquire('restart')
|
||||
if serial.granted('restart'):
|
||||
hookenv.service_restart(servicename)
|
||||
|
||||
services = [dict(service='servicename',
|
||||
data_ready=[maybe_restart])]
|
||||
|
||||
if __name__ == '__main__':
|
||||
_ = coordinator.Serial() # Must instantiate before manager.manage()
|
||||
manager = services.ServiceManager(services)
|
||||
manager.manage()
|
||||
|
||||
|
||||
You can implement a similar pattern using a decorator. If the lock has
|
||||
not been granted, an attempt to acquire() it will be made if the guard
|
||||
function returns True. If the lock has been granted, the decorated function
|
||||
is run as normal::
|
||||
|
||||
from charmhelpers.core import hookenv, services
|
||||
from charmhelpers import coordinator
|
||||
|
||||
serial = coordinator.Serial() # Global, instatiated on module import.
|
||||
|
||||
def needs_restart():
|
||||
[ ... Introspect state. Return True if restart is needed ... ]
|
||||
|
||||
@serial.require('restart', needs_restart)
|
||||
def maybe_restart(servicename):
|
||||
hookenv.service_restart(servicename)
|
||||
|
||||
services = [dict(service='servicename',
|
||||
data_ready=[maybe_restart])]
|
||||
|
||||
if __name__ == '__main__':
|
||||
manager = services.ServiceManager(services)
|
||||
manager.manage()
|
||||
|
||||
|
||||
Traditional Usage
|
||||
=================
|
||||
|
||||
Ensure a peers relation is defined in metadata.yaml.
|
||||
|
||||
If you are using charmhelpers.core.hookenv.Hooks, ensure that a
|
||||
BaseCoordinator subclass is instantiated before calling Hooks.execute.
|
||||
|
||||
If you are not using charmhelpers.core.hookenv.Hooks, ensure
|
||||
that a BaseCoordinator subclass is instantiated and its handle()
|
||||
method called at the start of all your hooks.
|
||||
|
||||
For example::
|
||||
|
||||
import sys
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers import coordinator
|
||||
|
||||
hooks = hookenv.Hooks()
|
||||
|
||||
def maybe_restart():
|
||||
serial = coordinator.Serial()
|
||||
if serial.granted('restart'):
|
||||
hookenv.service_restart('myservice')
|
||||
|
||||
@hooks.hook
|
||||
def config_changed():
|
||||
update_config()
|
||||
serial = coordinator.Serial()
|
||||
if needs_restart():
|
||||
serial.acquire('restart'):
|
||||
maybe_restart()
|
||||
|
||||
# Cluster hooks must be wired up.
|
||||
@hooks.hook('cluster-relation-changed', 'cluster-relation-departed')
|
||||
def cluster_relation_changed():
|
||||
maybe_restart()
|
||||
|
||||
# Leader hooks must be wired up.
|
||||
@hooks.hook('leader-elected', 'leader-settings-changed')
|
||||
def leader_settings_changed():
|
||||
maybe_restart()
|
||||
|
||||
[ ... repeat for *all* other hooks you are using ... ]
|
||||
|
||||
if __name__ == '__main__':
|
||||
_ = coordinator.Serial() # Must instantiate before execute()
|
||||
hooks.execute(sys.argv)
|
||||
|
||||
|
||||
You can also use the require decorator. If the lock has not been granted,
|
||||
an attempt to acquire() it will be made if the guard function returns True.
|
||||
If the lock has been granted, the decorated function is run as normal::
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
hooks = hookenv.Hooks()
|
||||
serial = coordinator.Serial() # Must instantiate before execute()
|
||||
|
||||
@require('restart', needs_restart)
|
||||
def maybe_restart():
|
||||
hookenv.service_restart('myservice')
|
||||
|
||||
@hooks.hook('install', 'config-changed', 'upgrade-charm',
|
||||
# Peers and leader hooks must be wired up.
|
||||
'cluster-relation-changed', 'cluster-relation-departed',
|
||||
'leader-elected', 'leader-settings-changed')
|
||||
def default_hook():
|
||||
[...]
|
||||
maybe_restart()
|
||||
|
||||
if __name__ == '__main__':
|
||||
hooks.execute()
|
||||
|
||||
|
||||
Details
|
||||
=======
|
||||
|
||||
A simple API is provided similar to traditional locking APIs. A lock
|
||||
may be requested using the acquire() method, and the granted() method
|
||||
may be used do to check if a lock previously requested by acquire() has
|
||||
been granted. It doesn't matter how many times acquire() is called in a
|
||||
hook.
|
||||
|
||||
Locks are released at the end of the hook they are acquired in. This may
|
||||
be the current hook if the unit is leader and the lock is free. It is
|
||||
more likely a future hook (probably leader-settings-changed, possibly
|
||||
the peers relation-changed or departed hook, potentially any hook).
|
||||
|
||||
Whenever a charm needs to perform a coordinated action it will acquire()
|
||||
the lock and perform the action immediately if acquisition is
|
||||
successful. It will also need to perform the same action in every other
|
||||
hook if the lock has been granted.
|
||||
|
||||
|
||||
Grubby Details
|
||||
--------------
|
||||
|
||||
Why do you need to be able to perform the same action in every hook?
|
||||
If the unit is the leader, then it may be able to grant its own lock
|
||||
and perform the action immediately in the source hook. If the unit is
|
||||
the leader and cannot immediately grant the lock, then its only
|
||||
guaranteed chance of acquiring the lock is in the peers relation-joined,
|
||||
relation-changed or peers relation-departed hooks when another unit has
|
||||
released it (the only channel to communicate to the leader is the peers
|
||||
relation). If the unit is not the leader, then it is unlikely the lock
|
||||
is granted in the source hook (a previous hook must have also made the
|
||||
request for this to happen). A non-leader is notified about the lock via
|
||||
leader settings. These changes may be visible in any hook, even before
|
||||
the leader-settings-changed hook has been invoked. Or the requesting
|
||||
unit may be promoted to leader after making a request, in which case the
|
||||
lock may be granted in leader-elected or in a future peers
|
||||
relation-changed or relation-departed hook.
|
||||
|
||||
This could be simpler if leader-settings-changed was invoked on the
|
||||
leader. We could then never grant locks except in
|
||||
leader-settings-changed hooks giving one place for the operation to be
|
||||
performed. Unfortunately this is not the case with Juju 1.23 leadership.
|
||||
|
||||
But of course, this doesn't really matter to most people as most people
|
||||
seem to prefer the Services Framework or similar reset-the-world
|
||||
approaches, rather than the twisty maze of attempting to deduce what
|
||||
should be done based on what hook happens to be running (which always
|
||||
seems to evolve into reset-the-world anyway when the charm grows beyond
|
||||
the trivial).
|
||||
|
||||
I chose not to implement a callback model, where a callback was passed
|
||||
to acquire to be executed when the lock is granted, because the callback
|
||||
may become invalid between making the request and the lock being granted
|
||||
due to an upgrade-charm being run in the interim. And it would create
|
||||
restrictions, such no lambdas, callback defined at the top level of a
|
||||
module, etc. Still, we could implement it on top of what is here, eg.
|
||||
by adding a defer decorator that stores a pickle of itself to disk and
|
||||
have BaseCoordinator unpickle and execute them when the locks are granted.
|
||||
'''
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
import json
|
||||
import os.path
|
||||
|
||||
from six import with_metaclass
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
# We make BaseCoordinator and subclasses singletons, so that if we
|
||||
# need to spill to local storage then only a single instance does so,
|
||||
# rather than having multiple instances stomp over each other.
|
||||
class Singleton(type):
|
||||
_instances = {}
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super(Singleton, cls).__call__(*args,
|
||||
**kwargs)
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
class BaseCoordinator(with_metaclass(Singleton, object)):
|
||||
relid = None # Peer relation-id, set by __init__
|
||||
relname = None
|
||||
|
||||
grants = None # self.grants[unit][lock] == timestamp
|
||||
requests = None # self.requests[unit][lock] == timestamp
|
||||
|
||||
def __init__(self, relation_key='coordinator', peer_relation_name=None):
|
||||
'''Instatiate a Coordinator.
|
||||
|
||||
Data is stored on the peers relation and in leadership storage
|
||||
under the provided relation_key.
|
||||
|
||||
The peers relation is identified by peer_relation_name, and defaults
|
||||
to the first one found in metadata.yaml.
|
||||
'''
|
||||
# Most initialization is deferred, since invoking hook tools from
|
||||
# the constructor makes testing hard.
|
||||
self.key = relation_key
|
||||
self.relname = peer_relation_name
|
||||
hookenv.atstart(self.initialize)
|
||||
|
||||
# Ensure that handle() is called, without placing that burden on
|
||||
# the charm author. They still need to do this manually if they
|
||||
# are not using a hook framework.
|
||||
hookenv.atstart(self.handle)
|
||||
|
||||
def initialize(self):
|
||||
if self.requests is not None:
|
||||
return # Already initialized.
|
||||
|
||||
assert hookenv.has_juju_version('1.23'), 'Needs Juju 1.23+'
|
||||
|
||||
if self.relname is None:
|
||||
self.relname = _implicit_peer_relation_name()
|
||||
|
||||
relids = hookenv.relation_ids(self.relname)
|
||||
if relids:
|
||||
self.relid = sorted(relids)[0]
|
||||
|
||||
# Load our state, from leadership, the peer relationship, and maybe
|
||||
# local state as a fallback. Populates self.requests and self.grants.
|
||||
self._load_state()
|
||||
self._emit_state()
|
||||
|
||||
# Save our state if the hook completes successfully.
|
||||
hookenv.atexit(self._save_state)
|
||||
|
||||
# Schedule release of granted locks for the end of the hook.
|
||||
# This needs to be the last of our atexit callbacks to ensure
|
||||
# it will be run first when the hook is complete, because there
|
||||
# is no point mutating our state after it has been saved.
|
||||
hookenv.atexit(self._release_granted)
|
||||
|
||||
def acquire(self, lock):
|
||||
'''Acquire the named lock, non-blocking.
|
||||
|
||||
The lock may be granted immediately, or in a future hook.
|
||||
|
||||
Returns True if the lock has been granted. The lock will be
|
||||
automatically released at the end of the hook in which it is
|
||||
granted.
|
||||
|
||||
Do not mindlessly call this method, as it triggers a cascade of
|
||||
hooks. For example, if you call acquire() every time in your
|
||||
peers relation-changed hook you will end up with an infinite loop
|
||||
of hooks. It should almost always be guarded by some condition.
|
||||
'''
|
||||
unit = hookenv.local_unit()
|
||||
ts = self.requests[unit].get(lock)
|
||||
if not ts:
|
||||
# If there is no outstanding request on the peers relation,
|
||||
# create one.
|
||||
self.requests.setdefault(lock, {})
|
||||
self.requests[unit][lock] = _timestamp()
|
||||
self.msg('Requested {}'.format(lock))
|
||||
|
||||
# If the leader has granted the lock, yay.
|
||||
if self.granted(lock):
|
||||
self.msg('Acquired {}'.format(lock))
|
||||
return True
|
||||
|
||||
# If the unit making the request also happens to be the
|
||||
# leader, it must handle the request now. Even though the
|
||||
# request has been stored on the peers relation, the peers
|
||||
# relation-changed hook will not be triggered.
|
||||
if hookenv.is_leader():
|
||||
return self.grant(lock, unit)
|
||||
|
||||
return False # Can't acquire lock, yet. Maybe next hook.
|
||||
|
||||
def granted(self, lock):
|
||||
'''Return True if a previously requested lock has been granted'''
|
||||
unit = hookenv.local_unit()
|
||||
ts = self.requests[unit].get(lock)
|
||||
if ts and self.grants.get(unit, {}).get(lock) == ts:
|
||||
return True
|
||||
return False
|
||||
|
||||
def requested(self, lock):
|
||||
'''Return True if we are in the queue for the lock'''
|
||||
return lock in self.requests[hookenv.local_unit()]
|
||||
|
||||
def request_timestamp(self, lock):
|
||||
'''Return the timestamp of our outstanding request for lock, or None.
|
||||
|
||||
Returns a datetime.datetime() UTC timestamp, with no tzinfo attribute.
|
||||
'''
|
||||
ts = self.requests[hookenv.local_unit()].get(lock, None)
|
||||
if ts is not None:
|
||||
return datetime.strptime(ts, _timestamp_format)
|
||||
|
||||
def handle(self):
|
||||
if not hookenv.is_leader():
|
||||
return # Only the leader can grant requests.
|
||||
|
||||
self.msg('Leader handling coordinator requests')
|
||||
|
||||
# Clear our grants that have been released.
|
||||
for unit in self.grants.keys():
|
||||
for lock, grant_ts in list(self.grants[unit].items()):
|
||||
req_ts = self.requests.get(unit, {}).get(lock)
|
||||
if req_ts != grant_ts:
|
||||
# The request timestamp does not match the granted
|
||||
# timestamp. Several hooks on 'unit' may have run
|
||||
# before the leader got a chance to make a decision,
|
||||
# and 'unit' may have released its lock and attempted
|
||||
# to reacquire it. This will change the timestamp,
|
||||
# and we correctly revoke the old grant putting it
|
||||
# to the end of the queue.
|
||||
ts = datetime.strptime(self.grants[unit][lock],
|
||||
_timestamp_format)
|
||||
del self.grants[unit][lock]
|
||||
self.released(unit, lock, ts)
|
||||
|
||||
# Grant locks
|
||||
for unit in self.requests.keys():
|
||||
for lock in self.requests[unit]:
|
||||
self.grant(lock, unit)
|
||||
|
||||
def grant(self, lock, unit):
|
||||
'''Maybe grant the lock to a unit.
|
||||
|
||||
The decision to grant the lock or not is made for $lock
|
||||
by a corresponding method grant_$lock, which you may define
|
||||
in a subclass. If no such method is defined, the default_grant
|
||||
method is used. See Serial.default_grant() for details.
|
||||
'''
|
||||
if not hookenv.is_leader():
|
||||
return False # Not the leader, so we cannot grant.
|
||||
|
||||
# Set of units already granted the lock.
|
||||
granted = set()
|
||||
for u in self.grants:
|
||||
if lock in self.grants[u]:
|
||||
granted.add(u)
|
||||
if unit in granted:
|
||||
return True # Already granted.
|
||||
|
||||
# Ordered list of units waiting for the lock.
|
||||
reqs = set()
|
||||
for u in self.requests:
|
||||
if u in granted:
|
||||
continue # In the granted set. Not wanted in the req list.
|
||||
for _lock, ts in self.requests[u].items():
|
||||
if _lock == lock:
|
||||
reqs.add((ts, u))
|
||||
queue = [t[1] for t in sorted(reqs)]
|
||||
if unit not in queue:
|
||||
return False # Unit has not requested the lock.
|
||||
|
||||
# Locate custom logic, or fallback to the default.
|
||||
grant_func = getattr(self, 'grant_{}'.format(lock), self.default_grant)
|
||||
|
||||
if grant_func(lock, unit, granted, queue):
|
||||
# Grant the lock.
|
||||
self.msg('Leader grants {} to {}'.format(lock, unit))
|
||||
self.grants.setdefault(unit, {})[lock] = self.requests[unit][lock]
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def released(self, unit, lock, timestamp):
|
||||
'''Called on the leader when it has released a lock.
|
||||
|
||||
By default, does nothing but log messages. Override if you
|
||||
need to perform additional housekeeping when a lock is released,
|
||||
for example recording timestamps.
|
||||
'''
|
||||
interval = _utcnow() - timestamp
|
||||
self.msg('Leader released {} from {}, held {}'.format(lock, unit,
|
||||
interval))
|
||||
|
||||
def require(self, lock, guard_func, *guard_args, **guard_kw):
|
||||
"""Decorate a function to be run only when a lock is acquired.
|
||||
|
||||
The lock is requested if the guard function returns True.
|
||||
|
||||
The decorated function is called if the lock has been granted.
|
||||
"""
|
||||
def decorator(f):
|
||||
@wraps(f)
|
||||
def wrapper(*args, **kw):
|
||||
if self.granted(lock):
|
||||
self.msg('Granted {}'.format(lock))
|
||||
return f(*args, **kw)
|
||||
if guard_func(*guard_args, **guard_kw) and self.acquire(lock):
|
||||
return f(*args, **kw)
|
||||
return None
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
def msg(self, msg):
|
||||
'''Emit a message. Override to customize log spam.'''
|
||||
hookenv.log('coordinator.{} {}'.format(self._name(), msg),
|
||||
level=hookenv.INFO)
|
||||
|
||||
def _name(self):
|
||||
return self.__class__.__name__
|
||||
|
||||
def _load_state(self):
|
||||
self.msg('Loading state'.format(self._name()))
|
||||
|
||||
# All responses must be stored in the leadership settings.
|
||||
# The leader cannot use local state, as a different unit may
|
||||
# be leader next time. Which is fine, as the leadership
|
||||
# settings are always available.
|
||||
self.grants = json.loads(hookenv.leader_get(self.key) or '{}')
|
||||
|
||||
local_unit = hookenv.local_unit()
|
||||
|
||||
# All requests must be stored on the peers relation. This is
|
||||
# the only channel units have to communicate with the leader.
|
||||
# Even the leader needs to store its requests here, as a
|
||||
# different unit may be leader by the time the request can be
|
||||
# granted.
|
||||
if self.relid is None:
|
||||
# The peers relation is not available. Maybe we are early in
|
||||
# the units's lifecycle. Maybe this unit is standalone.
|
||||
# Fallback to using local state.
|
||||
self.msg('No peer relation. Loading local state')
|
||||
self.requests = {local_unit: self._load_local_state()}
|
||||
else:
|
||||
self.requests = self._load_peer_state()
|
||||
if local_unit not in self.requests:
|
||||
# The peers relation has just been joined. Update any state
|
||||
# loaded from our peers with our local state.
|
||||
self.msg('New peer relation. Merging local state')
|
||||
self.requests[local_unit] = self._load_local_state()
|
||||
|
||||
def _emit_state(self):
|
||||
# Emit this units lock status.
|
||||
for lock in sorted(self.requests[hookenv.local_unit()].keys()):
|
||||
if self.granted(lock):
|
||||
self.msg('Granted {}'.format(lock))
|
||||
else:
|
||||
self.msg('Waiting on {}'.format(lock))
|
||||
|
||||
def _save_state(self):
|
||||
self.msg('Publishing state'.format(self._name()))
|
||||
if hookenv.is_leader():
|
||||
# sort_keys to ensure stability.
|
||||
raw = json.dumps(self.grants, sort_keys=True)
|
||||
hookenv.leader_set({self.key: raw})
|
||||
|
||||
local_unit = hookenv.local_unit()
|
||||
|
||||
if self.relid is None:
|
||||
# No peers relation yet. Fallback to local state.
|
||||
self.msg('No peer relation. Saving local state')
|
||||
self._save_local_state(self.requests[local_unit])
|
||||
else:
|
||||
# sort_keys to ensure stability.
|
||||
raw = json.dumps(self.requests[local_unit], sort_keys=True)
|
||||
hookenv.relation_set(self.relid, relation_settings={self.key: raw})
|
||||
|
||||
def _load_peer_state(self):
|
||||
requests = {}
|
||||
units = set(hookenv.related_units(self.relid))
|
||||
units.add(hookenv.local_unit())
|
||||
for unit in units:
|
||||
raw = hookenv.relation_get(self.key, unit, self.relid)
|
||||
if raw:
|
||||
requests[unit] = json.loads(raw)
|
||||
return requests
|
||||
|
||||
def _local_state_filename(self):
|
||||
# Include the class name. We allow multiple BaseCoordinator
|
||||
# subclasses to be instantiated, and they are singletons, so
|
||||
# this avoids conflicts (unless someone creates and uses two
|
||||
# BaseCoordinator subclasses with the same class name, so don't
|
||||
# do that).
|
||||
return '.charmhelpers.coordinator.{}'.format(self._name())
|
||||
|
||||
def _load_local_state(self):
|
||||
fn = self._local_state_filename()
|
||||
if os.path.exists(fn):
|
||||
with open(fn, 'r') as f:
|
||||
return json.load(f)
|
||||
return {}
|
||||
|
||||
def _save_local_state(self, state):
|
||||
fn = self._local_state_filename()
|
||||
with open(fn, 'w') as f:
|
||||
json.dump(state, f)
|
||||
|
||||
def _release_granted(self):
|
||||
# At the end of every hook, release all locks granted to
|
||||
# this unit. If a hook neglects to make use of what it
|
||||
# requested, it will just have to make the request again.
|
||||
# Implicit release is the only way this will work, as
|
||||
# if the unit is standalone there may be no future triggers
|
||||
# called to do a manual release.
|
||||
unit = hookenv.local_unit()
|
||||
for lock in list(self.requests[unit].keys()):
|
||||
if self.granted(lock):
|
||||
self.msg('Released local {} lock'.format(lock))
|
||||
del self.requests[unit][lock]
|
||||
|
||||
|
||||
class Serial(BaseCoordinator):
|
||||
def default_grant(self, lock, unit, granted, queue):
|
||||
'''Default logic to grant a lock to a unit. Unless overridden,
|
||||
only one unit may hold the lock and it will be granted to the
|
||||
earliest queued request.
|
||||
|
||||
To define custom logic for $lock, create a subclass and
|
||||
define a grant_$lock method.
|
||||
|
||||
`unit` is the unit name making the request.
|
||||
|
||||
`granted` is the set of units already granted the lock. It will
|
||||
never include `unit`. It may be empty.
|
||||
|
||||
`queue` is the list of units waiting for the lock, ordered by time
|
||||
of request. It will always include `unit`, but `unit` is not
|
||||
necessarily first.
|
||||
|
||||
Returns True if the lock should be granted to `unit`.
|
||||
'''
|
||||
return unit == queue[0] and not granted
|
||||
|
||||
|
||||
def _implicit_peer_relation_name():
|
||||
md = hookenv.metadata()
|
||||
assert 'peers' in md, 'No peer relations in metadata.yaml'
|
||||
return sorted(md['peers'].keys())[0]
|
||||
|
||||
|
||||
# A human readable, sortable UTC timestamp format.
|
||||
_timestamp_format = '%Y-%m-%d %H:%M:%S.%fZ'
|
||||
|
||||
|
||||
def _utcnow(): # pragma: no cover
|
||||
# This wrapper exists as mocking datetime methods is problematic.
|
||||
return datetime.utcnow()
|
||||
|
||||
|
||||
def _timestamp():
|
||||
return _utcnow().strftime(_timestamp_format)
|
@ -1,71 +0,0 @@
|
||||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import tarfile
|
||||
import zipfile
|
||||
from charmhelpers.core import (
|
||||
host,
|
||||
hookenv,
|
||||
)
|
||||
|
||||
|
||||
class ArchiveError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def get_archive_handler(archive_name):
|
||||
if os.path.isfile(archive_name):
|
||||
if tarfile.is_tarfile(archive_name):
|
||||
return extract_tarfile
|
||||
elif zipfile.is_zipfile(archive_name):
|
||||
return extract_zipfile
|
||||
else:
|
||||
# look at the file name
|
||||
for ext in ('.tar', '.tar.gz', '.tgz', 'tar.bz2', '.tbz2', '.tbz'):
|
||||
if archive_name.endswith(ext):
|
||||
return extract_tarfile
|
||||
for ext in ('.zip', '.jar'):
|
||||
if archive_name.endswith(ext):
|
||||
return extract_zipfile
|
||||
|
||||
|
||||
def archive_dest_default(archive_name):
|
||||
archive_file = os.path.basename(archive_name)
|
||||
return os.path.join(hookenv.charm_dir(), "archives", archive_file)
|
||||
|
||||
|
||||
def extract(archive_name, destpath=None):
|
||||
handler = get_archive_handler(archive_name)
|
||||
if handler:
|
||||
if not destpath:
|
||||
destpath = archive_dest_default(archive_name)
|
||||
if not os.path.isdir(destpath):
|
||||
host.mkdir(destpath)
|
||||
handler(archive_name, destpath)
|
||||
return destpath
|
||||
else:
|
||||
raise ArchiveError("No handler for archive")
|
||||
|
||||
|
||||
def extract_tarfile(archive_name, destpath):
|
||||
"Unpack a tar archive, optionally compressed"
|
||||
archive = tarfile.open(archive_name)
|
||||
archive.extractall(destpath)
|
||||
|
||||
|
||||
def extract_zipfile(archive_name, destpath):
|
||||
"Unpack a zip file"
|
||||
archive = zipfile.ZipFile(archive_name)
|
||||
archive.extractall(destpath)
|
@ -61,7 +61,7 @@ class SwiftStorageBasicDeployment(OpenStackAmuletDeployment):
|
||||
"""
|
||||
this_service = {'name': 'swift-storage'}
|
||||
other_services = [
|
||||
{'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},
|
||||
self.get_percona_service_entry(),
|
||||
{'name': 'keystone'},
|
||||
{'name': 'glance'},
|
||||
{'name': 'swift-proxy'}
|
||||
|
2
tox.ini
2
tox.ini
@ -94,7 +94,7 @@ basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-queens --no-destroy
|
||||
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-stein --no-destroy
|
||||
|
||||
[testenv:func27-dfs]
|
||||
# Charm Functional Test
|
||||
|
Loading…
Reference in New Issue
Block a user