[corey.bryant,r=trivial] Sync charm-helpers.
This commit is contained in:
parent
aaeb8bca28
commit
dbcf9b3d4f
@ -20,7 +20,7 @@ import sys
|
|||||||
|
|
||||||
from six.moves import zip
|
from six.moves import zip
|
||||||
|
|
||||||
from charmhelpers.core import unitdata
|
import charmhelpers.core.unitdata
|
||||||
|
|
||||||
|
|
||||||
class OutputFormatter(object):
|
class OutputFormatter(object):
|
||||||
@ -163,8 +163,8 @@ class CommandLine(object):
|
|||||||
if getattr(arguments.func, '_cli_no_output', False):
|
if getattr(arguments.func, '_cli_no_output', False):
|
||||||
output = ''
|
output = ''
|
||||||
self.formatter.format_output(output, arguments.format)
|
self.formatter.format_output(output, arguments.format)
|
||||||
if unitdata._KV:
|
if charmhelpers.core.unitdata._KV:
|
||||||
unitdata._KV.flush()
|
charmhelpers.core.unitdata._KV.flush()
|
||||||
|
|
||||||
|
|
||||||
cmdline = CommandLine()
|
cmdline = CommandLine()
|
||||||
|
@ -53,7 +53,7 @@ def _validate_cidr(network):
|
|||||||
|
|
||||||
|
|
||||||
def no_ip_found_error_out(network):
|
def no_ip_found_error_out(network):
|
||||||
errmsg = ("No IP address found in network: %s" % network)
|
errmsg = ("No IP address found in network(s): %s" % network)
|
||||||
raise ValueError(errmsg)
|
raise ValueError(errmsg)
|
||||||
|
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
|||||||
"""Get an IPv4 or IPv6 address within the network from the host.
|
"""Get an IPv4 or IPv6 address within the network from the host.
|
||||||
|
|
||||||
:param network (str): CIDR presentation format. For example,
|
:param network (str): CIDR presentation format. For example,
|
||||||
'192.168.1.0/24'.
|
'192.168.1.0/24'. Supports multiple networks as a space-delimited list.
|
||||||
:param fallback (str): If no address is found, return fallback.
|
:param fallback (str): If no address is found, return fallback.
|
||||||
:param fatal (boolean): If no address is found, fallback is not
|
:param fatal (boolean): If no address is found, fallback is not
|
||||||
set and fatal is True then exit(1).
|
set and fatal is True then exit(1).
|
||||||
@ -75,24 +75,26 @@ def get_address_in_network(network, fallback=None, fatal=False):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
_validate_cidr(network)
|
networks = network.split() or [network]
|
||||||
network = netaddr.IPNetwork(network)
|
for network in networks:
|
||||||
for iface in netifaces.interfaces():
|
_validate_cidr(network)
|
||||||
addresses = netifaces.ifaddresses(iface)
|
network = netaddr.IPNetwork(network)
|
||||||
if network.version == 4 and netifaces.AF_INET in addresses:
|
for iface in netifaces.interfaces():
|
||||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
addresses = netifaces.ifaddresses(iface)
|
||||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
if network.version == 4 and netifaces.AF_INET in addresses:
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||||
if cidr in network:
|
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||||
return str(cidr.ip)
|
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||||
|
if cidr in network:
|
||||||
|
return str(cidr.ip)
|
||||||
|
|
||||||
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
||||||
for addr in addresses[netifaces.AF_INET6]:
|
for addr in addresses[netifaces.AF_INET6]:
|
||||||
if not addr['addr'].startswith('fe80'):
|
if not addr['addr'].startswith('fe80'):
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||||
addr['netmask']))
|
addr['netmask']))
|
||||||
if cidr in network:
|
if cidr in network:
|
||||||
return str(cidr.ip)
|
return str(cidr.ip)
|
||||||
|
|
||||||
if fallback is not None:
|
if fallback is not None:
|
||||||
return fallback
|
return fallback
|
||||||
|
@ -14,12 +14,18 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
import six
|
import six
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from charmhelpers.contrib.amulet.deployment import (
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
AmuletDeployment
|
AmuletDeployment
|
||||||
)
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
"""OpenStack amulet deployment.
|
"""OpenStack amulet deployment.
|
||||||
@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
that is specifically for use by OpenStack charms.
|
that is specifically for use by OpenStack charms.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
def __init__(self, series=None, openstack=None, source=None,
|
||||||
|
stable=True, log_level=DEBUG):
|
||||||
"""Initialize the deployment environment."""
|
"""Initialize the deployment environment."""
|
||||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
self.log.info('OpenStackAmuletDeployment: init')
|
||||||
self.openstack = openstack
|
self.openstack = openstack
|
||||||
self.source = source
|
self.source = source
|
||||||
self.stable = stable
|
self.stable = stable
|
||||||
@ -38,6 +47,22 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
# out.
|
# out.
|
||||||
self.current_next = "trusty"
|
self.current_next = "trusty"
|
||||||
|
|
||||||
|
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
def _determine_branch_locations(self, other_services):
|
def _determine_branch_locations(self, other_services):
|
||||||
"""Determine the branch locations for the other services.
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
@ -45,6 +70,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
stable or next (dev) branch, and based on this, use the corresonding
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
stable or next branches for the other_services."""
|
stable or next branches for the other_services."""
|
||||||
|
|
||||||
|
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
||||||
|
|
||||||
# Charms outside the lp:~openstack-charmers namespace
|
# Charms outside the lp:~openstack-charmers namespace
|
||||||
base_charms = ['mysql', 'mongodb', 'nrpe']
|
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||||
|
|
||||||
@ -82,6 +109,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
|
|
||||||
def _add_services(self, this_service, other_services):
|
def _add_services(self, this_service, other_services):
|
||||||
"""Add services to the deployment and set openstack-origin/source."""
|
"""Add services to the deployment and set openstack-origin/source."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: adding services')
|
||||||
|
|
||||||
other_services = self._determine_branch_locations(other_services)
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
@ -95,7 +124,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw']
|
||||||
|
|
||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
@ -111,9 +141,79 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
|
|
||||||
def _configure_services(self, configs):
|
def _configure_services(self, configs):
|
||||||
"""Configure all of the services."""
|
"""Configure all of the services."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: configure services')
|
||||||
for service, config in six.iteritems(configs):
|
for service, config in six.iteritems(configs):
|
||||||
self.d.configure(service, config)
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||||
|
include_only=None, timeout=1800):
|
||||||
|
"""Wait for all units to have a specific extended status, except
|
||||||
|
for any defined as excluded. Unless specified via message, any
|
||||||
|
status containing any case of 'ready' will be considered a match.
|
||||||
|
|
||||||
|
Examples of message usage:
|
||||||
|
|
||||||
|
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
|
||||||
|
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
Wait for all units to reach this status (exact match):
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
|
||||||
|
Wait for all units to reach any one of these (exact match):
|
||||||
|
message = re.compile('Unit is ready|OK|Ready')
|
||||||
|
|
||||||
|
Wait for at least one unit to reach this status (exact match):
|
||||||
|
message = {'ready'}
|
||||||
|
|
||||||
|
See Amulet's sentry.wait_for_messages() for message usage detail.
|
||||||
|
https://github.com/juju/amulet/blob/master/amulet/sentry.py
|
||||||
|
|
||||||
|
:param message: Expected status match
|
||||||
|
:param exclude_services: List of juju service names to ignore,
|
||||||
|
not to be used in conjuction with include_only.
|
||||||
|
:param include_only: List of juju service names to exclusively check,
|
||||||
|
not to be used in conjuction with exclude_services.
|
||||||
|
:param timeout: Maximum time in seconds to wait for status match
|
||||||
|
:returns: None. Raises if timeout is hit.
|
||||||
|
"""
|
||||||
|
self.log.info('Waiting for extended status on units...')
|
||||||
|
|
||||||
|
all_services = self.d.services.keys()
|
||||||
|
|
||||||
|
if exclude_services and include_only:
|
||||||
|
raise ValueError('exclude_services can not be used '
|
||||||
|
'with include_only')
|
||||||
|
|
||||||
|
if message:
|
||||||
|
if isinstance(message, re._pattern_type):
|
||||||
|
match = message.pattern
|
||||||
|
else:
|
||||||
|
match = message
|
||||||
|
|
||||||
|
self.log.debug('Custom extended status wait match: '
|
||||||
|
'{}'.format(match))
|
||||||
|
else:
|
||||||
|
self.log.debug('Default extended status wait match: contains '
|
||||||
|
'READY (case-insensitive)')
|
||||||
|
message = re.compile('.*ready.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
if exclude_services:
|
||||||
|
self.log.debug('Excluding services from extended status match: '
|
||||||
|
'{}'.format(exclude_services))
|
||||||
|
else:
|
||||||
|
exclude_services = []
|
||||||
|
|
||||||
|
if include_only:
|
||||||
|
services = include_only
|
||||||
|
else:
|
||||||
|
services = list(set(all_services) - set(exclude_services))
|
||||||
|
|
||||||
|
self.log.debug('Waiting up to {}s for extended status on services: '
|
||||||
|
'{}'.format(timeout, services))
|
||||||
|
service_messages = {service: message for service in services}
|
||||||
|
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||||
|
self.log.info('OK')
|
||||||
|
|
||||||
def _get_openstack_release(self):
|
def _get_openstack_release(self):
|
||||||
"""Get openstack release.
|
"""Get openstack release.
|
||||||
|
|
||||||
@ -125,7 +225,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
self.precise_havana, self.precise_icehouse,
|
self.precise_havana, self.precise_icehouse,
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
self.wily_liberty) = range(12)
|
self.wily_liberty, self.trusty_mitaka,
|
||||||
|
self.xenial_mitaka) = range(14)
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
@ -137,9 +238,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
|
||||||
('utopic', None): self.utopic_juno,
|
('utopic', None): self.utopic_juno,
|
||||||
('vivid', None): self.vivid_kilo,
|
('vivid', None): self.vivid_kilo,
|
||||||
('wily', None): self.wily_liberty}
|
('wily', None): self.wily_liberty,
|
||||||
|
('xenial', None): self.xenial_mitaka}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
def _get_openstack_release_string(self):
|
||||||
@ -156,6 +259,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
@ -18,6 +18,7 @@ import amulet
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
@ -604,7 +605,22 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
'{}'.format(sample_type, samples))
|
'{}'.format(sample_type, samples))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# rabbitmq/amqp specific helpers:
|
# rabbitmq/amqp specific helpers:
|
||||||
|
|
||||||
|
def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
|
||||||
|
"""Wait for rmq units extended status to show cluster readiness,
|
||||||
|
after an optional initial sleep period. Initial sleep is likely
|
||||||
|
necessary to be effective following a config change, as status
|
||||||
|
message may not instantly update to non-ready."""
|
||||||
|
|
||||||
|
if init_sleep:
|
||||||
|
time.sleep(init_sleep)
|
||||||
|
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
deployment._auto_wait_for_status(message=message,
|
||||||
|
timeout=timeout,
|
||||||
|
include_only=['rabbitmq-server'])
|
||||||
|
|
||||||
def add_rmq_test_user(self, sentry_units,
|
def add_rmq_test_user(self, sentry_units,
|
||||||
username="testuser1", password="changeme"):
|
username="testuser1", password="changeme"):
|
||||||
"""Add a test user via the first rmq juju unit, check connection as
|
"""Add a test user via the first rmq juju unit, check connection as
|
||||||
@ -752,7 +768,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('SSL is enabled @{}:{} '
|
self.log.debug('SSL is enabled @{}:{} '
|
||||||
'({})'.format(host, port, unit_name))
|
'({})'.format(host, port, unit_name))
|
||||||
return True
|
return True
|
||||||
elif not port and not conf_ssl:
|
elif not conf_ssl:
|
||||||
self.log.debug('SSL not enabled @{}:{} '
|
self.log.debug('SSL not enabled @{}:{} '
|
||||||
'({})'.format(host, port, unit_name))
|
'({})'.format(host, port, unit_name))
|
||||||
return False
|
return False
|
||||||
@ -805,7 +821,10 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if port:
|
if port:
|
||||||
config['ssl_port'] = port
|
config['ssl_port'] = port
|
||||||
|
|
||||||
deployment.configure('rabbitmq-server', config)
|
deployment.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Wait for unit status
|
||||||
|
self.rmq_wait_for_cluster(deployment)
|
||||||
|
|
||||||
# Confirm
|
# Confirm
|
||||||
tries = 0
|
tries = 0
|
||||||
@ -832,7 +851,10 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
|
|
||||||
# Disable RMQ SSL
|
# Disable RMQ SSL
|
||||||
config = {'ssl': 'off'}
|
config = {'ssl': 'off'}
|
||||||
deployment.configure('rabbitmq-server', config)
|
deployment.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Wait for unit status
|
||||||
|
self.rmq_wait_for_cluster(deployment)
|
||||||
|
|
||||||
# Confirm
|
# Confirm
|
||||||
tries = 0
|
tries = 0
|
||||||
|
@ -626,6 +626,12 @@ class HAProxyContext(OSContextGenerator):
|
|||||||
if config('haproxy-client-timeout'):
|
if config('haproxy-client-timeout'):
|
||||||
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
|
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
|
||||||
|
|
||||||
|
if config('haproxy-queue-timeout'):
|
||||||
|
ctxt['haproxy_queue_timeout'] = config('haproxy-queue-timeout')
|
||||||
|
|
||||||
|
if config('haproxy-connect-timeout'):
|
||||||
|
ctxt['haproxy_connect_timeout'] = config('haproxy-connect-timeout')
|
||||||
|
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
ctxt['ipv6'] = True
|
ctxt['ipv6'] = True
|
||||||
ctxt['local_host'] = 'ip6-localhost'
|
ctxt['local_host'] = 'ip6-localhost'
|
||||||
@ -952,6 +958,19 @@ class NeutronContext(OSContextGenerator):
|
|||||||
'config': config}
|
'config': config}
|
||||||
return ovs_ctxt
|
return ovs_ctxt
|
||||||
|
|
||||||
|
def midonet_ctxt(self):
|
||||||
|
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||||
|
self.network_manager)
|
||||||
|
midonet_config = neutron_plugin_attribute(self.plugin, 'config',
|
||||||
|
self.network_manager)
|
||||||
|
mido_ctxt = {'core_plugin': driver,
|
||||||
|
'neutron_plugin': 'midonet',
|
||||||
|
'neutron_security_groups': self.neutron_security_groups,
|
||||||
|
'local_ip': unit_private_ip(),
|
||||||
|
'config': midonet_config}
|
||||||
|
|
||||||
|
return mido_ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
if self.network_manager not in ['quantum', 'neutron']:
|
if self.network_manager not in ['quantum', 'neutron']:
|
||||||
return {}
|
return {}
|
||||||
@ -973,6 +992,8 @@ class NeutronContext(OSContextGenerator):
|
|||||||
ctxt.update(self.nuage_ctxt())
|
ctxt.update(self.nuage_ctxt())
|
||||||
elif self.plugin == 'plumgrid':
|
elif self.plugin == 'plumgrid':
|
||||||
ctxt.update(self.pg_ctxt())
|
ctxt.update(self.pg_ctxt())
|
||||||
|
elif self.plugin == 'midonet':
|
||||||
|
ctxt.update(self.midonet_ctxt())
|
||||||
|
|
||||||
alchemy_flags = config('neutron-alchemy-flags')
|
alchemy_flags = config('neutron-alchemy-flags')
|
||||||
if alchemy_flags:
|
if alchemy_flags:
|
||||||
@ -1073,6 +1094,20 @@ class OSConfigFlagContext(OSContextGenerator):
|
|||||||
config_flags_parser(config_flags)}
|
config_flags_parser(config_flags)}
|
||||||
|
|
||||||
|
|
||||||
|
class LibvirtConfigFlagsContext(OSContextGenerator):
|
||||||
|
"""
|
||||||
|
This context provides support for extending
|
||||||
|
the libvirt section through user-defined flags.
|
||||||
|
"""
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {}
|
||||||
|
libvirt_flags = config('libvirt-flags')
|
||||||
|
if libvirt_flags:
|
||||||
|
ctxt['libvirt_flags'] = config_flags_parser(
|
||||||
|
libvirt_flags)
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class SubordinateConfigContext(OSContextGenerator):
|
class SubordinateConfigContext(OSContextGenerator):
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@ -1105,7 +1140,7 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||||||
|
|
||||||
ctxt = {
|
ctxt = {
|
||||||
... other context ...
|
... other context ...
|
||||||
'subordinate_config': {
|
'subordinate_configuration': {
|
||||||
'DEFAULT': {
|
'DEFAULT': {
|
||||||
'key1': 'value1',
|
'key1': 'value1',
|
||||||
},
|
},
|
||||||
@ -1146,22 +1181,23 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||||||
try:
|
try:
|
||||||
sub_config = json.loads(sub_config)
|
sub_config = json.loads(sub_config)
|
||||||
except:
|
except:
|
||||||
log('Could not parse JSON from subordinate_config '
|
log('Could not parse JSON from '
|
||||||
'setting from %s' % rid, level=ERROR)
|
'subordinate_configuration setting from %s'
|
||||||
|
% rid, level=ERROR)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
for service in self.services:
|
for service in self.services:
|
||||||
if service not in sub_config:
|
if service not in sub_config:
|
||||||
log('Found subordinate_config on %s but it contained'
|
log('Found subordinate_configuration on %s but it '
|
||||||
'nothing for %s service' % (rid, service),
|
'contained nothing for %s service'
|
||||||
level=INFO)
|
% (rid, service), level=INFO)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
sub_config = sub_config[service]
|
sub_config = sub_config[service]
|
||||||
if self.config_file not in sub_config:
|
if self.config_file not in sub_config:
|
||||||
log('Found subordinate_config on %s but it contained'
|
log('Found subordinate_configuration on %s but it '
|
||||||
'nothing for %s' % (rid, self.config_file),
|
'contained nothing for %s'
|
||||||
level=INFO)
|
% (rid, self.config_file), level=INFO)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
sub_config = sub_config[self.config_file]
|
sub_config = sub_config[self.config_file]
|
||||||
|
@ -9,15 +9,17 @@
|
|||||||
CRITICAL=0
|
CRITICAL=0
|
||||||
NOTACTIVE=''
|
NOTACTIVE=''
|
||||||
LOGFILE=/var/log/nagios/check_haproxy.log
|
LOGFILE=/var/log/nagios/check_haproxy.log
|
||||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
|
||||||
|
|
||||||
for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
|
typeset -i N_INSTANCES=0
|
||||||
|
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
|
||||||
do
|
do
|
||||||
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
|
N_INSTANCES=N_INSTANCES+1
|
||||||
|
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' --regex=",${appserver},.*,UP.*" -e ' 200 OK')
|
||||||
if [ $? != 0 ]; then
|
if [ $? != 0 ]; then
|
||||||
date >> $LOGFILE
|
date >> $LOGFILE
|
||||||
echo $output >> $LOGFILE
|
echo $output >> $LOGFILE
|
||||||
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
|
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v | grep ",${appserver}," >> $LOGFILE 2>&1
|
||||||
CRITICAL=1
|
CRITICAL=1
|
||||||
NOTACTIVE="${NOTACTIVE} $appserver"
|
NOTACTIVE="${NOTACTIVE} $appserver"
|
||||||
fi
|
fi
|
||||||
@ -28,5 +30,5 @@ if [ $CRITICAL = 1 ]; then
|
|||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "OK: All haproxy instances looking good"
|
echo "OK: All haproxy instances ($N_INSTANCES) looking good"
|
||||||
exit 0
|
exit 0
|
||||||
|
@ -204,11 +204,25 @@ def neutron_plugins():
|
|||||||
database=config('database'),
|
database=config('database'),
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [['plumgrid-lxc'],
|
'packages': ['plumgrid-lxc',
|
||||||
['iovisor-dkms']],
|
'iovisor-dkms'],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-plumgrid'],
|
'neutron-plugin-plumgrid'],
|
||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'midonet': {
|
||||||
|
'config': '/etc/neutron/plugins/midonet/midonet.ini',
|
||||||
|
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package()],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'python-neutron-plugin-midonet'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if release >= 'icehouse':
|
if release >= 'icehouse':
|
||||||
|
@ -12,19 +12,26 @@ defaults
|
|||||||
option tcplog
|
option tcplog
|
||||||
option dontlognull
|
option dontlognull
|
||||||
retries 3
|
retries 3
|
||||||
timeout queue 1000
|
{%- if haproxy_queue_timeout %}
|
||||||
timeout connect 1000
|
timeout queue {{ haproxy_queue_timeout }}
|
||||||
{% if haproxy_client_timeout -%}
|
{%- else %}
|
||||||
|
timeout queue 5000
|
||||||
|
{%- endif %}
|
||||||
|
{%- if haproxy_connect_timeout %}
|
||||||
|
timeout connect {{ haproxy_connect_timeout }}
|
||||||
|
{%- else %}
|
||||||
|
timeout connect 5000
|
||||||
|
{%- endif %}
|
||||||
|
{%- if haproxy_client_timeout %}
|
||||||
timeout client {{ haproxy_client_timeout }}
|
timeout client {{ haproxy_client_timeout }}
|
||||||
{% else -%}
|
{%- else %}
|
||||||
timeout client 30000
|
timeout client 30000
|
||||||
{% endif -%}
|
{%- endif %}
|
||||||
|
{%- if haproxy_server_timeout %}
|
||||||
{% if haproxy_server_timeout -%}
|
|
||||||
timeout server {{ haproxy_server_timeout }}
|
timeout server {{ haproxy_server_timeout }}
|
||||||
{% else -%}
|
{%- else %}
|
||||||
timeout server 30000
|
timeout server 30000
|
||||||
{% endif -%}
|
{%- endif %}
|
||||||
|
|
||||||
listen stats {{ stat_port }}
|
listen stats {{ stat_port }}
|
||||||
mode http
|
mode http
|
||||||
|
@ -26,6 +26,7 @@ import re
|
|||||||
|
|
||||||
import six
|
import six
|
||||||
import traceback
|
import traceback
|
||||||
|
import uuid
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from charmhelpers.contrib.network import ip
|
from charmhelpers.contrib.network import ip
|
||||||
@ -41,6 +42,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
log as juju_log,
|
log as juju_log,
|
||||||
charm_dir,
|
charm_dir,
|
||||||
INFO,
|
INFO,
|
||||||
|
related_units,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
relation_set,
|
relation_set,
|
||||||
status_set,
|
status_set,
|
||||||
@ -84,6 +86,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
@ -97,6 +100,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
|||||||
('2014.2', 'juno'),
|
('2014.2', 'juno'),
|
||||||
('2015.1', 'kilo'),
|
('2015.1', 'kilo'),
|
||||||
('2015.2', 'liberty'),
|
('2015.2', 'liberty'),
|
||||||
|
('2016.1', 'mitaka'),
|
||||||
])
|
])
|
||||||
|
|
||||||
# The ugly duckling
|
# The ugly duckling
|
||||||
@ -121,36 +125,46 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
('2.2.2', 'kilo'),
|
('2.2.2', 'kilo'),
|
||||||
('2.3.0', 'liberty'),
|
('2.3.0', 'liberty'),
|
||||||
('2.4.0', 'liberty'),
|
('2.4.0', 'liberty'),
|
||||||
|
('2.5.0', 'liberty'),
|
||||||
])
|
])
|
||||||
|
|
||||||
# >= Liberty version->codename mapping
|
# >= Liberty version->codename mapping
|
||||||
PACKAGE_CODENAMES = {
|
PACKAGE_CODENAMES = {
|
||||||
'nova-common': OrderedDict([
|
'nova-common': OrderedDict([
|
||||||
('12.0.0', 'liberty'),
|
('12.0.0', 'liberty'),
|
||||||
|
('13.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'neutron-common': OrderedDict([
|
'neutron-common': OrderedDict([
|
||||||
('7.0.0', 'liberty'),
|
('7.0.0', 'liberty'),
|
||||||
|
('8.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'cinder-common': OrderedDict([
|
'cinder-common': OrderedDict([
|
||||||
('7.0.0', 'liberty'),
|
('7.0.0', 'liberty'),
|
||||||
|
('8.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'keystone': OrderedDict([
|
'keystone': OrderedDict([
|
||||||
('8.0.0', 'liberty'),
|
('8.0.0', 'liberty'),
|
||||||
|
('9.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'horizon-common': OrderedDict([
|
'horizon-common': OrderedDict([
|
||||||
('8.0.0', 'liberty'),
|
('8.0.0', 'liberty'),
|
||||||
|
('9.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'ceilometer-common': OrderedDict([
|
'ceilometer-common': OrderedDict([
|
||||||
('5.0.0', 'liberty'),
|
('5.0.0', 'liberty'),
|
||||||
|
('6.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'heat-common': OrderedDict([
|
'heat-common': OrderedDict([
|
||||||
('5.0.0', 'liberty'),
|
('5.0.0', 'liberty'),
|
||||||
|
('6.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'glance-common': OrderedDict([
|
'glance-common': OrderedDict([
|
||||||
('11.0.0', 'liberty'),
|
('11.0.0', 'liberty'),
|
||||||
|
('12.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
'openstack-dashboard': OrderedDict([
|
'openstack-dashboard': OrderedDict([
|
||||||
('8.0.0', 'liberty'),
|
('8.0.0', 'liberty'),
|
||||||
|
('9.0.0', 'mitaka'),
|
||||||
]),
|
]),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,6 +388,9 @@ def configure_installation_source(rel):
|
|||||||
'liberty': 'trusty-updates/liberty',
|
'liberty': 'trusty-updates/liberty',
|
||||||
'liberty/updates': 'trusty-updates/liberty',
|
'liberty/updates': 'trusty-updates/liberty',
|
||||||
'liberty/proposed': 'trusty-proposed/liberty',
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'mitaka': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/updates': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -858,7 +875,9 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None):
|
|||||||
if charm_state != 'active' and charm_state != 'unknown':
|
if charm_state != 'active' and charm_state != 'unknown':
|
||||||
state = workload_state_compare(state, charm_state)
|
state = workload_state_compare(state, charm_state)
|
||||||
if message:
|
if message:
|
||||||
message = "{} {}".format(message, charm_message)
|
charm_message = charm_message.replace("Incomplete relations: ",
|
||||||
|
"")
|
||||||
|
message = "{}, {}".format(message, charm_message)
|
||||||
else:
|
else:
|
||||||
message = charm_message
|
message = charm_message
|
||||||
|
|
||||||
@ -975,3 +994,19 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
|
|||||||
action_set({'outcome': 'no upgrade available.'})
|
action_set({'outcome': 'no upgrade available.'})
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
def remote_restart(rel_name, remote_service=None):
|
||||||
|
trigger = {
|
||||||
|
'restart-trigger': str(uuid.uuid4()),
|
||||||
|
}
|
||||||
|
if remote_service:
|
||||||
|
trigger['remote-service'] = remote_service
|
||||||
|
for rid in relation_ids(rel_name):
|
||||||
|
# This subordinate can be related to two seperate services using
|
||||||
|
# different subordinate relations so only issue the restart if
|
||||||
|
# the principle is conencted down the relation we think it is
|
||||||
|
if related_units(relid=rid):
|
||||||
|
relation_set(relation_id=rid,
|
||||||
|
relation_settings=trigger,
|
||||||
|
)
|
||||||
|
@ -42,8 +42,12 @@ def parse_options(given, available):
|
|||||||
yield "--{0}={1}".format(key, value)
|
yield "--{0}={1}".format(key, value)
|
||||||
|
|
||||||
|
|
||||||
def pip_install_requirements(requirements, **options):
|
def pip_install_requirements(requirements, constraints=None, **options):
|
||||||
"""Install a requirements file """
|
"""Install a requirements file.
|
||||||
|
|
||||||
|
:param constraints: Path to pip constraints file.
|
||||||
|
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
|
||||||
|
"""
|
||||||
command = ["install"]
|
command = ["install"]
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', )
|
available_options = ('proxy', 'src', 'log', )
|
||||||
@ -51,8 +55,13 @@ def pip_install_requirements(requirements, **options):
|
|||||||
command.append(option)
|
command.append(option)
|
||||||
|
|
||||||
command.append("-r {0}".format(requirements))
|
command.append("-r {0}".format(requirements))
|
||||||
log("Installing from file: {} with options: {}".format(requirements,
|
if constraints:
|
||||||
command))
|
command.append("-c {0}".format(constraints))
|
||||||
|
log("Installing from file: {} with constraints {} "
|
||||||
|
"and options: {}".format(requirements, constraints, command))
|
||||||
|
else:
|
||||||
|
log("Installing from file: {} with options: {}".format(requirements,
|
||||||
|
command))
|
||||||
pip_execute(command)
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,6 +23,8 @@
|
|||||||
# James Page <james.page@ubuntu.com>
|
# James Page <james.page@ubuntu.com>
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
#
|
#
|
||||||
|
import bisect
|
||||||
|
import six
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
@ -72,6 +74,394 @@ log to syslog = {use_syslog}
|
|||||||
err to syslog = {use_syslog}
|
err to syslog = {use_syslog}
|
||||||
clog to syslog = {use_syslog}
|
clog to syslog = {use_syslog}
|
||||||
"""
|
"""
|
||||||
|
# For 50 < osds < 240,000 OSDs (Roughly 1 Exabyte at 6T OSDs)
|
||||||
|
powers_of_two = [8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608]
|
||||||
|
|
||||||
|
|
||||||
|
def validator(value, valid_type, valid_range=None):
|
||||||
|
"""
|
||||||
|
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
||||||
|
Example input:
|
||||||
|
validator(value=1,
|
||||||
|
valid_type=int,
|
||||||
|
valid_range=[0, 2])
|
||||||
|
This says I'm testing value=1. It must be an int inclusive in [0,2]
|
||||||
|
|
||||||
|
:param value: The value to validate
|
||||||
|
:param valid_type: The type that value should be.
|
||||||
|
:param valid_range: A range of values that value can assume.
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
assert isinstance(value, valid_type), "{} is not a {}".format(
|
||||||
|
value,
|
||||||
|
valid_type)
|
||||||
|
if valid_range is not None:
|
||||||
|
assert isinstance(valid_range, list), \
|
||||||
|
"valid_range must be a list, was given {}".format(valid_range)
|
||||||
|
# If we're dealing with strings
|
||||||
|
if valid_type is six.string_types:
|
||||||
|
assert value in valid_range, \
|
||||||
|
"{} is not in the list {}".format(value, valid_range)
|
||||||
|
# Integer, float should have a min and max
|
||||||
|
else:
|
||||||
|
if len(valid_range) != 2:
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid valid_range list of {} for {}. "
|
||||||
|
"List must be [min,max]".format(valid_range, value))
|
||||||
|
assert value >= valid_range[0], \
|
||||||
|
"{} is less than minimum allowed value of {}".format(
|
||||||
|
value, valid_range[0])
|
||||||
|
assert value <= valid_range[1], \
|
||||||
|
"{} is greater than maximum allowed value of {}".format(
|
||||||
|
value, valid_range[1])
|
||||||
|
|
||||||
|
|
||||||
|
class PoolCreationError(Exception):
|
||||||
|
"""
|
||||||
|
A custom error to inform the caller that a pool creation failed. Provides an error message
|
||||||
|
"""
|
||||||
|
def __init__(self, message):
|
||||||
|
super(PoolCreationError, self).__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class Pool(object):
|
||||||
|
"""
|
||||||
|
An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
|
||||||
|
Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
|
||||||
|
"""
|
||||||
|
def __init__(self, service, name):
|
||||||
|
self.service = service
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
# Create the pool if it doesn't exist already
|
||||||
|
# To be implemented by subclasses
|
||||||
|
def create(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def add_cache_tier(self, cache_pool, mode):
|
||||||
|
"""
|
||||||
|
Adds a new cache tier to an existing pool.
|
||||||
|
:param cache_pool: six.string_types. The cache tier pool name to add.
|
||||||
|
:param mode: six.string_types. The caching mode to use for this pool. valid range = ["readonly", "writeback"]
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# Check the input types and values
|
||||||
|
validator(value=cache_pool, valid_type=six.string_types)
|
||||||
|
validator(value=mode, valid_type=six.string_types, valid_range=["readonly", "writeback"])
|
||||||
|
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'add', self.name, cache_pool])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, mode])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'set-overlay', self.name, cache_pool])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'pool', 'set', cache_pool, 'hit_set_type', 'bloom'])
|
||||||
|
|
||||||
|
def remove_cache_tier(self, cache_pool):
|
||||||
|
"""
|
||||||
|
Removes a cache tier from Ceph. Flushes all dirty objects from writeback pools and waits for that to complete.
|
||||||
|
:param cache_pool: six.string_types. The cache tier pool name to remove.
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
# read-only is easy, writeback is much harder
|
||||||
|
mode = get_cache_mode(cache_pool)
|
||||||
|
if mode == 'readonly':
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
|
elif mode == 'writeback':
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
|
||||||
|
# Flush the cache and wait for it to return
|
||||||
|
check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
||||||
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
|
def get_pgs(self, pool_size):
|
||||||
|
"""
|
||||||
|
:param pool_size: int. pool_size is either the number of replicas for replicated pools or the K+M sum for
|
||||||
|
erasure coded pools
|
||||||
|
:return: int. The number of pgs to use.
|
||||||
|
"""
|
||||||
|
validator(value=pool_size, valid_type=int)
|
||||||
|
osds = get_osds(self.service)
|
||||||
|
if not osds:
|
||||||
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
|
# which don't support OSD query from cli
|
||||||
|
return 200
|
||||||
|
|
||||||
|
# Calculate based on Ceph best practices
|
||||||
|
if osds < 5:
|
||||||
|
return 128
|
||||||
|
elif 5 < osds < 10:
|
||||||
|
return 512
|
||||||
|
elif 10 < osds < 50:
|
||||||
|
return 4096
|
||||||
|
else:
|
||||||
|
estimate = (osds * 100) / pool_size
|
||||||
|
# Return the next nearest power of 2
|
||||||
|
index = bisect.bisect_right(powers_of_two, estimate)
|
||||||
|
return powers_of_two[index]
|
||||||
|
|
||||||
|
|
||||||
|
class ReplicatedPool(Pool):
|
||||||
|
def __init__(self, service, name, replicas=2):
|
||||||
|
super(ReplicatedPool, self).__init__(service=service, name=name)
|
||||||
|
self.replicas = replicas
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if not pool_exists(self.service, self.name):
|
||||||
|
# Create it
|
||||||
|
pgs = self.get_pgs(self.replicas)
|
||||||
|
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# Default jerasure erasure coded pool
|
||||||
|
class ErasurePool(Pool):
|
||||||
|
def __init__(self, service, name, erasure_code_profile="default"):
|
||||||
|
super(ErasurePool, self).__init__(service=service, name=name)
|
||||||
|
self.erasure_code_profile = erasure_code_profile
|
||||||
|
|
||||||
|
def create(self):
|
||||||
|
if not pool_exists(self.service, self.name):
|
||||||
|
# Try to find the erasure profile information so we can properly size the pgs
|
||||||
|
erasure_profile = get_erasure_profile(service=self.service, name=self.erasure_code_profile)
|
||||||
|
|
||||||
|
# Check for errors
|
||||||
|
if erasure_profile is None:
|
||||||
|
log(message='Failed to discover erasure_profile named={}'.format(self.erasure_code_profile),
|
||||||
|
level=ERROR)
|
||||||
|
raise PoolCreationError(message='unable to find erasure profile {}'.format(self.erasure_code_profile))
|
||||||
|
if 'k' not in erasure_profile or 'm' not in erasure_profile:
|
||||||
|
# Error
|
||||||
|
log(message='Unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile),
|
||||||
|
level=ERROR)
|
||||||
|
raise PoolCreationError(
|
||||||
|
message='unable to find k (data chunks) or m (coding chunks) in {}'.format(erasure_profile))
|
||||||
|
|
||||||
|
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
|
||||||
|
# Create it
|
||||||
|
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs),
|
||||||
|
'erasure', self.erasure_code_profile]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
"""Get an existing erasure code profile if it already exists.
|
||||||
|
Returns json formatted output"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_erasure_profile(service, name):
|
||||||
|
"""
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param name:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
out = check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'erasure-code-profile', 'get',
|
||||||
|
name, '--format=json'])
|
||||||
|
return json.loads(out)
|
||||||
|
except (CalledProcessError, OSError, ValueError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def pool_set(service, pool_name, key, value):
|
||||||
|
"""
|
||||||
|
Sets a value for a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param key: six.string_types
|
||||||
|
:param value:
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def snapshot_pool(service, pool_name, snapshot_name):
|
||||||
|
"""
|
||||||
|
Snapshots a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param snapshot_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'mksnap', pool_name, snapshot_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_pool_snapshot(service, pool_name, snapshot_name):
|
||||||
|
"""
|
||||||
|
Remove a snapshot from a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param snapshot_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rmsnap', pool_name, snapshot_name]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# max_bytes should be an int or long
|
||||||
|
def set_pool_quota(service, pool_name, max_bytes):
|
||||||
|
"""
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:param max_bytes: int or long
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
# Set a byte quota on a RADOS pool in ceph.
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes]
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def remove_pool_quota(service, pool_name):
|
||||||
|
"""
|
||||||
|
Set a byte quota on a RADOS pool in ceph.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', '0']
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host',
|
||||||
|
data_chunks=2, coding_chunks=1,
|
||||||
|
locality=None, durability_estimator=None):
|
||||||
|
"""
|
||||||
|
Create a new erasure code profile if one does not already exist for it. Updates
|
||||||
|
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
||||||
|
for more details
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param profile_name: six.string_types
|
||||||
|
:param erasure_plugin_name: six.string_types
|
||||||
|
:param failure_domain: six.string_types. One of ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region',
|
||||||
|
'room', 'root', 'row'])
|
||||||
|
:param data_chunks: int
|
||||||
|
:param coding_chunks: int
|
||||||
|
:param locality: int
|
||||||
|
:param durability_estimator: int
|
||||||
|
:return: None. Can raise CalledProcessError
|
||||||
|
"""
|
||||||
|
# Ensure this failure_domain is allowed by Ceph
|
||||||
|
validator(failure_domain, six.string_types,
|
||||||
|
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
|
||||||
|
'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
|
||||||
|
'ruleset_failure_domain=' + failure_domain]
|
||||||
|
if locality is not None and durability_estimator is not None:
|
||||||
|
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||||
|
|
||||||
|
# Add plugin specific information
|
||||||
|
if locality is not None:
|
||||||
|
# For local erasure codes
|
||||||
|
cmd.append('l=' + str(locality))
|
||||||
|
if durability_estimator is not None:
|
||||||
|
# For Shec erasure codes
|
||||||
|
cmd.append('c=' + str(durability_estimator))
|
||||||
|
|
||||||
|
if erasure_profile_exists(service, profile_name):
|
||||||
|
cmd.append('--force')
|
||||||
|
|
||||||
|
try:
|
||||||
|
check_call(cmd)
|
||||||
|
except CalledProcessError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def rename_pool(service, old_name, new_name):
|
||||||
|
"""
|
||||||
|
Rename a Ceph pool from old_name to new_name
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param old_name: six.string_types
|
||||||
|
:param new_name: six.string_types
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
validator(value=old_name, valid_type=six.string_types)
|
||||||
|
validator(value=new_name, valid_type=six.string_types)
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'rename', old_name, new_name]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def erasure_profile_exists(service, name):
|
||||||
|
"""
|
||||||
|
Check to see if an Erasure code profile already exists.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param name: six.string_types
|
||||||
|
:return: int or None
|
||||||
|
"""
|
||||||
|
validator(value=name, valid_type=six.string_types)
|
||||||
|
try:
|
||||||
|
check_call(['ceph', '--id', service,
|
||||||
|
'osd', 'erasure-code-profile', 'get',
|
||||||
|
name])
|
||||||
|
return True
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_cache_mode(service, pool_name):
|
||||||
|
"""
|
||||||
|
Find the current caching mode of the pool_name given.
|
||||||
|
:param service: six.string_types. The Ceph user name to run the command under
|
||||||
|
:param pool_name: six.string_types
|
||||||
|
:return: int or None
|
||||||
|
"""
|
||||||
|
validator(value=service, valid_type=six.string_types)
|
||||||
|
validator(value=pool_name, valid_type=six.string_types)
|
||||||
|
out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
|
||||||
|
try:
|
||||||
|
osd_json = json.loads(out)
|
||||||
|
for pool in osd_json['pools']:
|
||||||
|
if pool['pool_name'] == pool_name:
|
||||||
|
return pool['cache_mode']
|
||||||
|
return None
|
||||||
|
except ValueError:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def pool_exists(service, name):
|
||||||
|
"""Check to see if a RADOS pool already exists."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rados', '--id', service,
|
||||||
|
'lspools']).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def get_osds(service):
|
||||||
|
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||||
|
cluster.
|
||||||
|
"""
|
||||||
|
version = ceph_version()
|
||||||
|
if version and version >= '0.56':
|
||||||
|
return json.loads(check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'ls',
|
||||||
|
'--format=json']).decode('UTF-8'))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def install():
|
def install():
|
||||||
@ -101,53 +491,37 @@ def create_rbd_image(service, pool, image, sizemb):
|
|||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def pool_exists(service, name):
|
def update_pool(client, pool, settings):
|
||||||
"""Check to see if a RADOS pool already exists."""
|
cmd = ['ceph', '--id', client, 'osd', 'pool', 'set', pool]
|
||||||
try:
|
for k, v in six.iteritems(settings):
|
||||||
out = check_output(['rados', '--id', service,
|
cmd.append(k)
|
||||||
'lspools']).decode('UTF-8')
|
cmd.append(v)
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return name in out
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def get_osds(service):
|
def create_pool(service, name, replicas=3, pg_num=None):
|
||||||
"""Return a list of all Ceph Object Storage Daemons currently in the
|
|
||||||
cluster.
|
|
||||||
"""
|
|
||||||
version = ceph_version()
|
|
||||||
if version and version >= '0.56':
|
|
||||||
return json.loads(check_output(['ceph', '--id', service,
|
|
||||||
'osd', 'ls',
|
|
||||||
'--format=json']).decode('UTF-8'))
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def create_pool(service, name, replicas=3):
|
|
||||||
"""Create a new RADOS pool."""
|
"""Create a new RADOS pool."""
|
||||||
if pool_exists(service, name):
|
if pool_exists(service, name):
|
||||||
log("Ceph pool {} already exists, skipping creation".format(name),
|
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||||
level=WARNING)
|
level=WARNING)
|
||||||
return
|
return
|
||||||
|
|
||||||
# Calculate the number of placement groups based
|
if not pg_num:
|
||||||
# on upstream recommended best practices.
|
# Calculate the number of placement groups based
|
||||||
osds = get_osds(service)
|
# on upstream recommended best practices.
|
||||||
if osds:
|
osds = get_osds(service)
|
||||||
pgnum = (len(osds) * 100 // replicas)
|
if osds:
|
||||||
else:
|
pg_num = (len(osds) * 100 // replicas)
|
||||||
# NOTE(james-page): Default to 200 for older ceph versions
|
else:
|
||||||
# which don't support OSD query from cli
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
pgnum = 200
|
# which don't support OSD query from cli
|
||||||
|
pg_num = 200
|
||||||
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pg_num)]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
|
update_pool(service, name, settings={'size': str(replicas)})
|
||||||
str(replicas)]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_pool(service, name):
|
def delete_pool(service, name):
|
||||||
@ -202,10 +576,10 @@ def create_key_file(service, key):
|
|||||||
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
def get_ceph_nodes():
|
def get_ceph_nodes(relation='ceph'):
|
||||||
"""Query named relation 'ceph' to determine current nodes."""
|
"""Query named relation to determine current nodes."""
|
||||||
hosts = []
|
hosts = []
|
||||||
for r_id in relation_ids('ceph'):
|
for r_id in relation_ids(relation):
|
||||||
for unit in related_units(r_id):
|
for unit in related_units(r_id):
|
||||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||||
|
|
||||||
@ -357,14 +731,14 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
|||||||
service_start(svc)
|
service_start(svc)
|
||||||
|
|
||||||
|
|
||||||
def ensure_ceph_keyring(service, user=None, group=None):
|
def ensure_ceph_keyring(service, user=None, group=None, relation='ceph'):
|
||||||
"""Ensures a ceph keyring is created for a named service and optionally
|
"""Ensures a ceph keyring is created for a named service and optionally
|
||||||
ensures user and group ownership.
|
ensures user and group ownership.
|
||||||
|
|
||||||
Returns False if no ceph key is available in relation state.
|
Returns False if no ceph key is available in relation state.
|
||||||
"""
|
"""
|
||||||
key = None
|
key = None
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids(relation):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
key = relation_get('key', rid=rid, unit=unit)
|
key = relation_get('key', rid=rid, unit=unit)
|
||||||
if key:
|
if key:
|
||||||
@ -405,6 +779,7 @@ class CephBrokerRq(object):
|
|||||||
|
|
||||||
The API is versioned and defaults to version 1.
|
The API is versioned and defaults to version 1.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, api_version=1, request_id=None):
|
def __init__(self, api_version=1, request_id=None):
|
||||||
self.api_version = api_version
|
self.api_version = api_version
|
||||||
if request_id:
|
if request_id:
|
||||||
@ -413,9 +788,16 @@ class CephBrokerRq(object):
|
|||||||
self.request_id = str(uuid.uuid1())
|
self.request_id = str(uuid.uuid1())
|
||||||
self.ops = []
|
self.ops = []
|
||||||
|
|
||||||
def add_op_create_pool(self, name, replica_count=3):
|
def add_op_create_pool(self, name, replica_count=3, pg_num=None):
|
||||||
|
"""Adds an operation to create a pool.
|
||||||
|
|
||||||
|
@param pg_num setting: optional setting. If not provided, this value
|
||||||
|
will be calculated by the broker based on how many OSDs are in the
|
||||||
|
cluster at the time of creation. Note that, if provided, this value
|
||||||
|
will be capped at the current available maximum.
|
||||||
|
"""
|
||||||
self.ops.append({'op': 'create-pool', 'name': name,
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
'replicas': replica_count})
|
'replicas': replica_count, 'pg_num': pg_num})
|
||||||
|
|
||||||
def set_ops(self, ops):
|
def set_ops(self, ops):
|
||||||
"""Set request ops to provided value.
|
"""Set request ops to provided value.
|
||||||
@ -433,8 +815,8 @@ class CephBrokerRq(object):
|
|||||||
def _ops_equal(self, other):
|
def _ops_equal(self, other):
|
||||||
if len(self.ops) == len(other.ops):
|
if len(self.ops) == len(other.ops):
|
||||||
for req_no in range(0, len(self.ops)):
|
for req_no in range(0, len(self.ops)):
|
||||||
for key in ['replicas', 'name', 'op']:
|
for key in ['replicas', 'name', 'op', 'pg_num']:
|
||||||
if self.ops[req_no][key] != other.ops[req_no][key]:
|
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
@ -540,7 +922,7 @@ def get_previous_request(rid):
|
|||||||
return request
|
return request
|
||||||
|
|
||||||
|
|
||||||
def get_request_states(request):
|
def get_request_states(request, relation='ceph'):
|
||||||
"""Return a dict of requests per relation id with their corresponding
|
"""Return a dict of requests per relation id with their corresponding
|
||||||
completion state.
|
completion state.
|
||||||
|
|
||||||
@ -552,7 +934,7 @@ def get_request_states(request):
|
|||||||
"""
|
"""
|
||||||
complete = []
|
complete = []
|
||||||
requests = {}
|
requests = {}
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids(relation):
|
||||||
complete = False
|
complete = False
|
||||||
previous_request = get_previous_request(rid)
|
previous_request = get_previous_request(rid)
|
||||||
if request == previous_request:
|
if request == previous_request:
|
||||||
@ -570,14 +952,14 @@ def get_request_states(request):
|
|||||||
return requests
|
return requests
|
||||||
|
|
||||||
|
|
||||||
def is_request_sent(request):
|
def is_request_sent(request, relation='ceph'):
|
||||||
"""Check to see if a functionally equivalent request has already been sent
|
"""Check to see if a functionally equivalent request has already been sent
|
||||||
|
|
||||||
Returns True if a similair request has been sent
|
Returns True if a similair request has been sent
|
||||||
|
|
||||||
@param request: A CephBrokerRq object
|
@param request: A CephBrokerRq object
|
||||||
"""
|
"""
|
||||||
states = get_request_states(request)
|
states = get_request_states(request, relation=relation)
|
||||||
for rid in states.keys():
|
for rid in states.keys():
|
||||||
if not states[rid]['sent']:
|
if not states[rid]['sent']:
|
||||||
return False
|
return False
|
||||||
@ -585,7 +967,7 @@ def is_request_sent(request):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def is_request_complete(request):
|
def is_request_complete(request, relation='ceph'):
|
||||||
"""Check to see if a functionally equivalent request has already been
|
"""Check to see if a functionally equivalent request has already been
|
||||||
completed
|
completed
|
||||||
|
|
||||||
@ -593,7 +975,7 @@ def is_request_complete(request):
|
|||||||
|
|
||||||
@param request: A CephBrokerRq object
|
@param request: A CephBrokerRq object
|
||||||
"""
|
"""
|
||||||
states = get_request_states(request)
|
states = get_request_states(request, relation=relation)
|
||||||
for rid in states.keys():
|
for rid in states.keys():
|
||||||
if not states[rid]['complete']:
|
if not states[rid]['complete']:
|
||||||
return False
|
return False
|
||||||
@ -643,15 +1025,15 @@ def get_broker_rsp_key():
|
|||||||
return 'broker-rsp-' + local_unit().replace('/', '-')
|
return 'broker-rsp-' + local_unit().replace('/', '-')
|
||||||
|
|
||||||
|
|
||||||
def send_request_if_needed(request):
|
def send_request_if_needed(request, relation='ceph'):
|
||||||
"""Send broker request if an equivalent request has not already been sent
|
"""Send broker request if an equivalent request has not already been sent
|
||||||
|
|
||||||
@param request: A CephBrokerRq object
|
@param request: A CephBrokerRq object
|
||||||
"""
|
"""
|
||||||
if is_request_sent(request):
|
if is_request_sent(request, relation=relation):
|
||||||
log('Request already sent but not complete, not sending new request',
|
log('Request already sent but not complete, not sending new request',
|
||||||
level=DEBUG)
|
level=DEBUG)
|
||||||
else:
|
else:
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids(relation):
|
||||||
log('Sending request {}'.format(request.request_id), level=DEBUG)
|
log('Sending request {}'.format(request.request_id), level=DEBUG)
|
||||||
relation_set(relation_id=rid, broker_req=request.request)
|
relation_set(relation_id=rid, broker_req=request.request)
|
||||||
|
@ -76,3 +76,13 @@ def ensure_loopback_device(path, size):
|
|||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
return create_loopback(path)
|
return create_loopback(path)
|
||||||
|
|
||||||
|
|
||||||
|
def is_mapped_loopback_device(device):
|
||||||
|
"""
|
||||||
|
Checks if a given device name is an existing/mapped loopback device.
|
||||||
|
:param device: str: Full path to the device (eg, /dev/loop1).
|
||||||
|
:returns: str: Path to the backing file if is a loopback device
|
||||||
|
empty string otherwise
|
||||||
|
"""
|
||||||
|
return loopback_devices().get(device, "")
|
||||||
|
@ -490,6 +490,19 @@ def relation_types():
|
|||||||
return rel_types
|
return rel_types
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def peer_relation_id():
|
||||||
|
'''Get the peers relation id if a peers relation has been joined, else None.'''
|
||||||
|
md = metadata()
|
||||||
|
section = md.get('peers')
|
||||||
|
if section:
|
||||||
|
for key in section:
|
||||||
|
relids = relation_ids(key)
|
||||||
|
if relids:
|
||||||
|
return relids[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def relation_to_interface(relation_name):
|
def relation_to_interface(relation_name):
|
||||||
"""
|
"""
|
||||||
@ -504,12 +517,12 @@ def relation_to_interface(relation_name):
|
|||||||
def relation_to_role_and_interface(relation_name):
|
def relation_to_role_and_interface(relation_name):
|
||||||
"""
|
"""
|
||||||
Given the name of a relation, return the role and the name of the interface
|
Given the name of a relation, return the role and the name of the interface
|
||||||
that relation uses (where role is one of ``provides``, ``requires``, or ``peer``).
|
that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
|
||||||
|
|
||||||
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
|
:returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
|
||||||
"""
|
"""
|
||||||
_metadata = metadata()
|
_metadata = metadata()
|
||||||
for role in ('provides', 'requires', 'peer'):
|
for role in ('provides', 'requires', 'peers'):
|
||||||
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
|
interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
|
||||||
if interface:
|
if interface:
|
||||||
return role, interface
|
return role, interface
|
||||||
@ -521,7 +534,7 @@ def role_and_interface_to_relations(role, interface_name):
|
|||||||
"""
|
"""
|
||||||
Given a role and interface name, return a list of relation names for the
|
Given a role and interface name, return a list of relation names for the
|
||||||
current charm that use that interface under that role (where role is one
|
current charm that use that interface under that role (where role is one
|
||||||
of ``provides``, ``requires``, or ``peer``).
|
of ``provides``, ``requires``, or ``peers``).
|
||||||
|
|
||||||
:returns: A list of relation names.
|
:returns: A list of relation names.
|
||||||
"""
|
"""
|
||||||
@ -542,7 +555,7 @@ def interface_to_relations(interface_name):
|
|||||||
:returns: A list of relation names.
|
:returns: A list of relation names.
|
||||||
"""
|
"""
|
||||||
results = []
|
results = []
|
||||||
for role in ('provides', 'requires', 'peer'):
|
for role in ('provides', 'requires', 'peers'):
|
||||||
results.extend(role_and_interface_to_relations(role, interface_name))
|
results.extend(role_and_interface_to_relations(role, interface_name))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
@ -624,7 +637,7 @@ def unit_private_ip():
|
|||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def storage_get(attribute="", storage_id=""):
|
def storage_get(attribute=None, storage_id=None):
|
||||||
"""Get storage attributes"""
|
"""Get storage attributes"""
|
||||||
_args = ['storage-get', '--format=json']
|
_args = ['storage-get', '--format=json']
|
||||||
if storage_id:
|
if storage_id:
|
||||||
@ -638,7 +651,7 @@ def storage_get(attribute="", storage_id=""):
|
|||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def storage_list(storage_name=""):
|
def storage_list(storage_name=None):
|
||||||
"""List the storage IDs for the unit"""
|
"""List the storage IDs for the unit"""
|
||||||
_args = ['storage-list', '--format=json']
|
_args = ['storage-list', '--format=json']
|
||||||
if storage_name:
|
if storage_name:
|
||||||
@ -820,6 +833,7 @@ def status_get():
|
|||||||
|
|
||||||
def translate_exc(from_exc, to_exc):
|
def translate_exc(from_exc, to_exc):
|
||||||
def inner_translate_exc1(f):
|
def inner_translate_exc1(f):
|
||||||
|
@wraps(f)
|
||||||
def inner_translate_exc2(*args, **kwargs):
|
def inner_translate_exc2(*args, **kwargs):
|
||||||
try:
|
try:
|
||||||
return f(*args, **kwargs)
|
return f(*args, **kwargs)
|
||||||
@ -864,6 +878,40 @@ def leader_set(settings=None, **kwargs):
|
|||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_register(ptype, klass, pid):
|
||||||
|
""" is used while a hook is running to let Juju know that a
|
||||||
|
payload has been started."""
|
||||||
|
cmd = ['payload-register']
|
||||||
|
for x in [ptype, klass, pid]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_unregister(klass, pid):
|
||||||
|
""" is used while a hook is running to let Juju know
|
||||||
|
that a payload has been manually stopped. The <class> and <id> provided
|
||||||
|
must match a payload that has been previously registered with juju using
|
||||||
|
payload-register."""
|
||||||
|
cmd = ['payload-unregister']
|
||||||
|
for x in [klass, pid]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
|
def payload_status_set(klass, pid, status):
|
||||||
|
"""is used to update the current status of a registered payload.
|
||||||
|
The <class> and <id> provided must match a payload that has been previously
|
||||||
|
registered with juju using payload-register. The <status> must be one of the
|
||||||
|
follow: starting, started, stopping, stopped"""
|
||||||
|
cmd = ['payload-status-set']
|
||||||
|
for x in [klass, pid, status]:
|
||||||
|
cmd.append(x)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def juju_version():
|
def juju_version():
|
||||||
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
"""Full version string (eg. '1.23.3.1-trusty-amd64')"""
|
||||||
|
@ -67,7 +67,9 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
|
|||||||
"""Pause a system service.
|
"""Pause a system service.
|
||||||
|
|
||||||
Stop it, and prevent it from starting again at boot."""
|
Stop it, and prevent it from starting again at boot."""
|
||||||
stopped = service_stop(service_name)
|
stopped = True
|
||||||
|
if service_running(service_name):
|
||||||
|
stopped = service_stop(service_name)
|
||||||
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
sysv_file = os.path.join(initd_dir, service_name)
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
if os.path.exists(upstart_file):
|
if os.path.exists(upstart_file):
|
||||||
@ -105,7 +107,9 @@ def service_resume(service_name, init_dir="/etc/init",
|
|||||||
"Unable to detect {0} as either Upstart {1} or SysV {2}".format(
|
"Unable to detect {0} as either Upstart {1} or SysV {2}".format(
|
||||||
service_name, upstart_file, sysv_file))
|
service_name, upstart_file, sysv_file))
|
||||||
|
|
||||||
started = service_start(service_name)
|
started = service_running(service_name)
|
||||||
|
if not started:
|
||||||
|
started = service_start(service_name)
|
||||||
return started
|
return started
|
||||||
|
|
||||||
|
|
||||||
@ -142,8 +146,22 @@ def service_available(service_name):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
def adduser(username, password=None, shell='/bin/bash', system_user=False,
|
||||||
"""Add a user to the system"""
|
primary_group=None, secondary_groups=None):
|
||||||
|
"""
|
||||||
|
Add a user to the system.
|
||||||
|
|
||||||
|
Will log but otherwise succeed if the user already exists.
|
||||||
|
|
||||||
|
:param str username: Username to create
|
||||||
|
:param str password: Password for user; if ``None``, create a system user
|
||||||
|
:param str shell: The default shell for the user
|
||||||
|
:param bool system_user: Whether to create a login or system user
|
||||||
|
:param str primary_group: Primary group for user; defaults to their username
|
||||||
|
:param list secondary_groups: Optional list of additional groups
|
||||||
|
|
||||||
|
:returns: The password database entry struct, as returned by `pwd.getpwnam`
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
user_info = pwd.getpwnam(username)
|
user_info = pwd.getpwnam(username)
|
||||||
log('user {0} already exists!'.format(username))
|
log('user {0} already exists!'.format(username))
|
||||||
@ -158,6 +176,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
|||||||
'--shell', shell,
|
'--shell', shell,
|
||||||
'--password', password,
|
'--password', password,
|
||||||
])
|
])
|
||||||
|
if not primary_group:
|
||||||
|
try:
|
||||||
|
grp.getgrnam(username)
|
||||||
|
primary_group = username # avoid "group exists" error
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
if primary_group:
|
||||||
|
cmd.extend(['-g', primary_group])
|
||||||
|
if secondary_groups:
|
||||||
|
cmd.extend(['-G', ','.join(secondary_groups)])
|
||||||
cmd.append(username)
|
cmd.append(username)
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
user_info = pwd.getpwnam(username)
|
user_info = pwd.getpwnam(username)
|
||||||
@ -566,7 +594,14 @@ def chdir(d):
|
|||||||
os.chdir(cur)
|
os.chdir(cur)
|
||||||
|
|
||||||
|
|
||||||
def chownr(path, owner, group, follow_links=True):
|
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
||||||
|
"""
|
||||||
|
Recursively change user and group ownership of files and directories
|
||||||
|
in given path. Doesn't chown path itself by default, only its children.
|
||||||
|
|
||||||
|
:param bool follow_links: Also Chown links if True
|
||||||
|
:param bool chowntopdir: Also chown path itself if True
|
||||||
|
"""
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
gid = grp.getgrnam(group).gr_gid
|
gid = grp.getgrnam(group).gr_gid
|
||||||
if follow_links:
|
if follow_links:
|
||||||
@ -574,6 +609,10 @@ def chownr(path, owner, group, follow_links=True):
|
|||||||
else:
|
else:
|
||||||
chown = os.lchown
|
chown = os.lchown
|
||||||
|
|
||||||
|
if chowntopdir:
|
||||||
|
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
|
||||||
|
if not broken_symlink:
|
||||||
|
chown(path, uid, gid)
|
||||||
for root, dirs, files in os.walk(path):
|
for root, dirs, files in os.walk(path):
|
||||||
for name in dirs + files:
|
for name in dirs + files:
|
||||||
full = os.path.join(root, name)
|
full = os.path.join(root, name)
|
||||||
@ -584,3 +623,19 @@ def chownr(path, owner, group, follow_links=True):
|
|||||||
|
|
||||||
def lchownr(path, owner, group):
|
def lchownr(path, owner, group):
|
||||||
chownr(path, owner, group, follow_links=False)
|
chownr(path, owner, group, follow_links=False)
|
||||||
|
|
||||||
|
|
||||||
|
def get_total_ram():
|
||||||
|
'''The total amount of system RAM in bytes.
|
||||||
|
|
||||||
|
This is what is reported by the OS, and may be overcommitted when
|
||||||
|
there are multiple containers hosted on the same machine.
|
||||||
|
'''
|
||||||
|
with open('/proc/meminfo', 'r') as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
if line:
|
||||||
|
key, value, unit = line.split()
|
||||||
|
if key == 'MemTotal:':
|
||||||
|
assert unit == 'kB', 'Unknown unit'
|
||||||
|
return int(value) * 1024 # Classic, not KiB.
|
||||||
|
raise NotImplementedError()
|
||||||
|
@ -46,6 +46,8 @@ def hugepage_support(user, group='hugetlb', nr_hugepages=256,
|
|||||||
group_info = add_group(group)
|
group_info = add_group(group)
|
||||||
gid = group_info.gr_gid
|
gid = group_info.gr_gid
|
||||||
add_user_to_group(user, group)
|
add_user_to_group(user, group)
|
||||||
|
if max_map_count < 2 * nr_hugepages:
|
||||||
|
max_map_count = 2 * nr_hugepages
|
||||||
sysctl_settings = {
|
sysctl_settings = {
|
||||||
'vm.nr_hugepages': nr_hugepages,
|
'vm.nr_hugepages': nr_hugepages,
|
||||||
'vm.max_map_count': max_map_count,
|
'vm.max_map_count': max_map_count,
|
||||||
|
@ -243,33 +243,40 @@ class TemplateCallback(ManagerCallback):
|
|||||||
:param str source: The template source file, relative to
|
:param str source: The template source file, relative to
|
||||||
`$CHARM_DIR/templates`
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
:param str target: The target to write the rendered template to
|
:param str target: The target to write the rendered template to (or None)
|
||||||
:param str owner: The owner of the rendered file
|
:param str owner: The owner of the rendered file
|
||||||
:param str group: The group of the rendered file
|
:param str group: The group of the rendered file
|
||||||
:param int perms: The permissions of the rendered file
|
:param int perms: The permissions of the rendered file
|
||||||
:param partial on_change_action: functools partial to be executed when
|
:param partial on_change_action: functools partial to be executed when
|
||||||
rendered file changes
|
rendered file changes
|
||||||
|
:param jinja2 loader template_loader: A jinja2 template loader
|
||||||
|
|
||||||
|
:return str: The rendered template
|
||||||
"""
|
"""
|
||||||
def __init__(self, source, target,
|
def __init__(self, source, target,
|
||||||
owner='root', group='root', perms=0o444,
|
owner='root', group='root', perms=0o444,
|
||||||
on_change_action=None):
|
on_change_action=None, template_loader=None):
|
||||||
self.source = source
|
self.source = source
|
||||||
self.target = target
|
self.target = target
|
||||||
self.owner = owner
|
self.owner = owner
|
||||||
self.group = group
|
self.group = group
|
||||||
self.perms = perms
|
self.perms = perms
|
||||||
self.on_change_action = on_change_action
|
self.on_change_action = on_change_action
|
||||||
|
self.template_loader = template_loader
|
||||||
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
def __call__(self, manager, service_name, event_name):
|
||||||
pre_checksum = ''
|
pre_checksum = ''
|
||||||
if self.on_change_action and os.path.isfile(self.target):
|
if self.on_change_action and os.path.isfile(self.target):
|
||||||
pre_checksum = host.file_hash(self.target)
|
pre_checksum = host.file_hash(self.target)
|
||||||
service = manager.get_service(service_name)
|
service = manager.get_service(service_name)
|
||||||
context = {}
|
context = {'ctx': {}}
|
||||||
for ctx in service.get('required_data', []):
|
for ctx in service.get('required_data', []):
|
||||||
context.update(ctx)
|
context.update(ctx)
|
||||||
templating.render(self.source, self.target, context,
|
context['ctx'].update(ctx)
|
||||||
self.owner, self.group, self.perms)
|
|
||||||
|
result = templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms,
|
||||||
|
template_loader=self.template_loader)
|
||||||
if self.on_change_action:
|
if self.on_change_action:
|
||||||
if pre_checksum == host.file_hash(self.target):
|
if pre_checksum == host.file_hash(self.target):
|
||||||
hookenv.log(
|
hookenv.log(
|
||||||
@ -278,6 +285,8 @@ class TemplateCallback(ManagerCallback):
|
|||||||
else:
|
else:
|
||||||
self.on_change_action()
|
self.on_change_action()
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases for templates
|
# Convenience aliases for templates
|
||||||
render_template = template = TemplateCallback
|
render_template = template = TemplateCallback
|
||||||
|
@ -21,13 +21,14 @@ from charmhelpers.core import hookenv
|
|||||||
|
|
||||||
|
|
||||||
def render(source, target, context, owner='root', group='root',
|
def render(source, target, context, owner='root', group='root',
|
||||||
perms=0o444, templates_dir=None, encoding='UTF-8'):
|
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
|
||||||
"""
|
"""
|
||||||
Render a template.
|
Render a template.
|
||||||
|
|
||||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
The `target` path should be absolute.
|
The `target` path should be absolute. It can also be `None`, in which
|
||||||
|
case no file will be written.
|
||||||
|
|
||||||
The context should be a dict containing the values to be replaced in the
|
The context should be a dict containing the values to be replaced in the
|
||||||
template.
|
template.
|
||||||
@ -36,6 +37,9 @@ def render(source, target, context, owner='root', group='root',
|
|||||||
|
|
||||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
The rendered template will be written to the file as well as being returned
|
||||||
|
as a string.
|
||||||
|
|
||||||
Note: Using this requires python-jinja2; if it is not installed, calling
|
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||||
"""
|
"""
|
||||||
@ -52,17 +56,26 @@ def render(source, target, context, owner='root', group='root',
|
|||||||
apt_install('python-jinja2', fatal=True)
|
apt_install('python-jinja2', fatal=True)
|
||||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
|
||||||
if templates_dir is None:
|
if template_loader:
|
||||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
template_env = Environment(loader=template_loader)
|
||||||
loader = Environment(loader=FileSystemLoader(templates_dir))
|
else:
|
||||||
|
if templates_dir is None:
|
||||||
|
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||||
|
template_env = Environment(loader=FileSystemLoader(templates_dir))
|
||||||
try:
|
try:
|
||||||
source = source
|
source = source
|
||||||
template = loader.get_template(source)
|
template = template_env.get_template(source)
|
||||||
except exceptions.TemplateNotFound as e:
|
except exceptions.TemplateNotFound as e:
|
||||||
hookenv.log('Could not load template %s from %s.' %
|
hookenv.log('Could not load template %s from %s.' %
|
||||||
(source, templates_dir),
|
(source, templates_dir),
|
||||||
level=hookenv.ERROR)
|
level=hookenv.ERROR)
|
||||||
raise e
|
raise e
|
||||||
content = template.render(context)
|
content = template.render(context)
|
||||||
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
if target is not None:
|
||||||
host.write_file(target, content.encode(encoding), owner, group, perms)
|
target_dir = os.path.dirname(target)
|
||||||
|
if not os.path.exists(target_dir):
|
||||||
|
# This is a terrible default directory permission, as the file
|
||||||
|
# or its siblings will often contain secrets.
|
||||||
|
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||||
|
host.write_file(target, content.encode(encoding), owner, group, perms)
|
||||||
|
return content
|
||||||
|
@ -98,6 +98,14 @@ CLOUD_ARCHIVE_POCKETS = {
|
|||||||
'liberty/proposed': 'trusty-proposed/liberty',
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
||||||
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
||||||
|
# Mitaka
|
||||||
|
'mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka': 'trusty-updates/mitaka',
|
||||||
|
'trusty-mitaka/updates': 'trusty-updates/mitaka',
|
||||||
|
'trusty-updates/mitaka': 'trusty-updates/mitaka',
|
||||||
|
'mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-mitaka/proposed': 'trusty-proposed/mitaka',
|
||||||
|
'trusty-proposed/mitaka': 'trusty-proposed/mitaka',
|
||||||
}
|
}
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
@ -225,12 +233,12 @@ def apt_purge(packages, fatal=False):
|
|||||||
|
|
||||||
def apt_mark(packages, mark, fatal=False):
|
def apt_mark(packages, mark, fatal=False):
|
||||||
"""Flag one or more packages using apt-mark"""
|
"""Flag one or more packages using apt-mark"""
|
||||||
|
log("Marking {} as {}".format(packages, mark))
|
||||||
cmd = ['apt-mark', mark]
|
cmd = ['apt-mark', mark]
|
||||||
if isinstance(packages, six.string_types):
|
if isinstance(packages, six.string_types):
|
||||||
cmd.append(packages)
|
cmd.append(packages)
|
||||||
else:
|
else:
|
||||||
cmd.extend(packages)
|
cmd.extend(packages)
|
||||||
log("Holding {}".format(packages))
|
|
||||||
|
|
||||||
if fatal:
|
if fatal:
|
||||||
subprocess.check_call(cmd, universal_newlines=True)
|
subprocess.check_call(cmd, universal_newlines=True)
|
||||||
@ -411,7 +419,7 @@ def plugins(fetch_handlers=None):
|
|||||||
importlib.import_module(package),
|
importlib.import_module(package),
|
||||||
classname)
|
classname)
|
||||||
plugin_list.append(handler_class())
|
plugin_list.append(handler_class())
|
||||||
except (ImportError, AttributeError):
|
except NotImplementedError:
|
||||||
# Skip missing plugins so that they can be ommitted from
|
# Skip missing plugins so that they can be ommitted from
|
||||||
# installation if desired
|
# installation if desired
|
||||||
log("FetchHandler {} not found, skipping plugin".format(
|
log("FetchHandler {} not found, skipping plugin".format(
|
||||||
|
@ -108,7 +108,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
install_opener(opener)
|
install_opener(opener)
|
||||||
response = urlopen(source)
|
response = urlopen(source)
|
||||||
try:
|
try:
|
||||||
with open(dest, 'w') as dest_file:
|
with open(dest, 'wb') as dest_file:
|
||||||
dest_file.write(response.read())
|
dest_file.write(response.read())
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if os.path.isfile(dest):
|
if os.path.isfile(dest):
|
||||||
|
@ -15,60 +15,50 @@
|
|||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from subprocess import check_call
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource,
|
||||||
|
filter_installed_packages,
|
||||||
|
apt_install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
raise ImportError('bzrlib does not support Python3')
|
|
||||||
|
|
||||||
try:
|
if filter_installed_packages(['bzr']) != []:
|
||||||
from bzrlib.branch import Branch
|
apt_install(['bzr'])
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
if filter_installed_packages(['bzr']) != []:
|
||||||
except ImportError:
|
raise NotImplementedError('Unable to install bzr')
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-bzrlib")
|
|
||||||
from bzrlib.branch import Branch
|
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
|
||||||
|
|
||||||
|
|
||||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||||
"""Handler for bazaar branches via generic and lp URLs"""
|
"""Handler for bazaar branches via generic and lp URLs"""
|
||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
if url_parts.scheme not in ('bzr+ssh', 'lp', ''):
|
||||||
return False
|
return False
|
||||||
|
elif not url_parts.scheme:
|
||||||
|
return os.path.exists(os.path.join(source, '.bzr'))
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def branch(self, source, dest):
|
def branch(self, source, dest):
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
# If we use lp:branchname scheme we need to load plugins
|
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
if url_parts.scheme == "lp":
|
if os.path.exists(dest):
|
||||||
from bzrlib.plugin import load_plugins
|
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
|
||||||
load_plugins()
|
else:
|
||||||
try:
|
check_call(['bzr', 'branch', source, dest])
|
||||||
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
|
|
||||||
except errors.AlreadyControlDirError:
|
|
||||||
local_branch = Branch.open(dest)
|
|
||||||
try:
|
|
||||||
remote_branch = Branch.open(source)
|
|
||||||
remote_branch.push(local_branch)
|
|
||||||
tree = workingtree.WorkingTree.open(dest)
|
|
||||||
tree.update()
|
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def install(self, source):
|
def install(self, source, dest=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
if dest:
|
||||||
branch_name)
|
dest_dir = os.path.join(dest, branch_name)
|
||||||
|
else:
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
|
branch_name)
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
mkdir(dest_dir, perms=0o755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
try:
|
try:
|
||||||
|
@ -15,24 +15,19 @@
|
|||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from subprocess import check_call
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource,
|
||||||
|
filter_installed_packages,
|
||||||
|
apt_install,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.host import mkdir
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
import six
|
if filter_installed_packages(['git']) != []:
|
||||||
if six.PY3:
|
apt_install(['git'])
|
||||||
raise ImportError('GitPython does not support Python 3')
|
if filter_installed_packages(['git']) != []:
|
||||||
|
raise NotImplementedError('Unable to install git')
|
||||||
try:
|
|
||||||
from git import Repo
|
|
||||||
except ImportError:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-git")
|
|
||||||
from git import Repo
|
|
||||||
|
|
||||||
from git.exc import GitCommandError # noqa E402
|
|
||||||
|
|
||||||
|
|
||||||
class GitUrlFetchHandler(BaseFetchHandler):
|
class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
@ -40,19 +35,24 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
def can_handle(self, source):
|
def can_handle(self, source):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
# TODO (mattyw) no support for ssh git@ yet
|
# TODO (mattyw) no support for ssh git@ yet
|
||||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
if url_parts.scheme not in ('http', 'https', 'git', ''):
|
||||||
return False
|
return False
|
||||||
|
elif not url_parts.scheme:
|
||||||
|
return os.path.exists(os.path.join(source, '.git'))
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def clone(self, source, dest, branch, depth=None):
|
def clone(self, source, dest, branch="master", depth=None):
|
||||||
if not self.can_handle(source):
|
if not self.can_handle(source):
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
|
|
||||||
if depth:
|
if os.path.exists(dest):
|
||||||
Repo.clone_from(source, dest, branch=branch, depth=depth)
|
cmd = ['git', '-C', dest, 'pull', source, branch]
|
||||||
else:
|
else:
|
||||||
Repo.clone_from(source, dest, branch=branch)
|
cmd = ['git', 'clone', source, dest, '--branch', branch]
|
||||||
|
if depth:
|
||||||
|
cmd.extend(['--depth', depth])
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
def install(self, source, branch="master", dest=None, depth=None):
|
def install(self, source, branch="master", dest=None, depth=None):
|
||||||
url_parts = self.parse_url(source)
|
url_parts = self.parse_url(source)
|
||||||
@ -66,8 +66,6 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||||||
mkdir(dest_dir, perms=0o755)
|
mkdir(dest_dir, perms=0o755)
|
||||||
try:
|
try:
|
||||||
self.clone(source, dest_dir, branch, depth)
|
self.clone(source, dest_dir, branch, depth)
|
||||||
except GitCommandError as e:
|
|
||||||
raise UnhandledSource(e)
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UnhandledSource(e.strerror)
|
raise UnhandledSource(e.strerror)
|
||||||
return dest_dir
|
return dest_dir
|
||||||
|
@ -14,12 +14,18 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
import six
|
import six
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from charmhelpers.contrib.amulet.deployment import (
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
AmuletDeployment
|
AmuletDeployment
|
||||||
)
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
"""OpenStack amulet deployment.
|
"""OpenStack amulet deployment.
|
||||||
@ -28,9 +34,12 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
that is specifically for use by OpenStack charms.
|
that is specifically for use by OpenStack charms.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
def __init__(self, series=None, openstack=None, source=None,
|
||||||
|
stable=True, log_level=DEBUG):
|
||||||
"""Initialize the deployment environment."""
|
"""Initialize the deployment environment."""
|
||||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
self.log.info('OpenStackAmuletDeployment: init')
|
||||||
self.openstack = openstack
|
self.openstack = openstack
|
||||||
self.source = source
|
self.source = source
|
||||||
self.stable = stable
|
self.stable = stable
|
||||||
@ -38,6 +47,22 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
# out.
|
# out.
|
||||||
self.current_next = "trusty"
|
self.current_next = "trusty"
|
||||||
|
|
||||||
|
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
def _determine_branch_locations(self, other_services):
|
def _determine_branch_locations(self, other_services):
|
||||||
"""Determine the branch locations for the other services.
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
@ -45,6 +70,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
stable or next (dev) branch, and based on this, use the corresonding
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
stable or next branches for the other_services."""
|
stable or next branches for the other_services."""
|
||||||
|
|
||||||
|
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
||||||
|
|
||||||
# Charms outside the lp:~openstack-charmers namespace
|
# Charms outside the lp:~openstack-charmers namespace
|
||||||
base_charms = ['mysql', 'mongodb', 'nrpe']
|
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||||
|
|
||||||
@ -82,6 +109,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
|
|
||||||
def _add_services(self, this_service, other_services):
|
def _add_services(self, this_service, other_services):
|
||||||
"""Add services to the deployment and set openstack-origin/source."""
|
"""Add services to the deployment and set openstack-origin/source."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: adding services')
|
||||||
|
|
||||||
other_services = self._determine_branch_locations(other_services)
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
@ -95,7 +124,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw']
|
||||||
|
|
||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
# Charms which can not use openstack-origin, ie. many subordinates
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
|
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
||||||
|
'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
@ -111,9 +141,79 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
|
|
||||||
def _configure_services(self, configs):
|
def _configure_services(self, configs):
|
||||||
"""Configure all of the services."""
|
"""Configure all of the services."""
|
||||||
|
self.log.info('OpenStackAmuletDeployment: configure services')
|
||||||
for service, config in six.iteritems(configs):
|
for service, config in six.iteritems(configs):
|
||||||
self.d.configure(service, config)
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
||||||
|
include_only=None, timeout=1800):
|
||||||
|
"""Wait for all units to have a specific extended status, except
|
||||||
|
for any defined as excluded. Unless specified via message, any
|
||||||
|
status containing any case of 'ready' will be considered a match.
|
||||||
|
|
||||||
|
Examples of message usage:
|
||||||
|
|
||||||
|
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
|
||||||
|
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
Wait for all units to reach this status (exact match):
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
|
||||||
|
Wait for all units to reach any one of these (exact match):
|
||||||
|
message = re.compile('Unit is ready|OK|Ready')
|
||||||
|
|
||||||
|
Wait for at least one unit to reach this status (exact match):
|
||||||
|
message = {'ready'}
|
||||||
|
|
||||||
|
See Amulet's sentry.wait_for_messages() for message usage detail.
|
||||||
|
https://github.com/juju/amulet/blob/master/amulet/sentry.py
|
||||||
|
|
||||||
|
:param message: Expected status match
|
||||||
|
:param exclude_services: List of juju service names to ignore,
|
||||||
|
not to be used in conjuction with include_only.
|
||||||
|
:param include_only: List of juju service names to exclusively check,
|
||||||
|
not to be used in conjuction with exclude_services.
|
||||||
|
:param timeout: Maximum time in seconds to wait for status match
|
||||||
|
:returns: None. Raises if timeout is hit.
|
||||||
|
"""
|
||||||
|
self.log.info('Waiting for extended status on units...')
|
||||||
|
|
||||||
|
all_services = self.d.services.keys()
|
||||||
|
|
||||||
|
if exclude_services and include_only:
|
||||||
|
raise ValueError('exclude_services can not be used '
|
||||||
|
'with include_only')
|
||||||
|
|
||||||
|
if message:
|
||||||
|
if isinstance(message, re._pattern_type):
|
||||||
|
match = message.pattern
|
||||||
|
else:
|
||||||
|
match = message
|
||||||
|
|
||||||
|
self.log.debug('Custom extended status wait match: '
|
||||||
|
'{}'.format(match))
|
||||||
|
else:
|
||||||
|
self.log.debug('Default extended status wait match: contains '
|
||||||
|
'READY (case-insensitive)')
|
||||||
|
message = re.compile('.*ready.*', re.IGNORECASE)
|
||||||
|
|
||||||
|
if exclude_services:
|
||||||
|
self.log.debug('Excluding services from extended status match: '
|
||||||
|
'{}'.format(exclude_services))
|
||||||
|
else:
|
||||||
|
exclude_services = []
|
||||||
|
|
||||||
|
if include_only:
|
||||||
|
services = include_only
|
||||||
|
else:
|
||||||
|
services = list(set(all_services) - set(exclude_services))
|
||||||
|
|
||||||
|
self.log.debug('Waiting up to {}s for extended status on services: '
|
||||||
|
'{}'.format(timeout, services))
|
||||||
|
service_messages = {service: message for service in services}
|
||||||
|
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
||||||
|
self.log.info('OK')
|
||||||
|
|
||||||
def _get_openstack_release(self):
|
def _get_openstack_release(self):
|
||||||
"""Get openstack release.
|
"""Get openstack release.
|
||||||
|
|
||||||
@ -125,7 +225,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
self.precise_havana, self.precise_icehouse,
|
self.precise_havana, self.precise_icehouse,
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
self.wily_liberty) = range(12)
|
self.wily_liberty, self.trusty_mitaka,
|
||||||
|
self.xenial_mitaka) = range(14)
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
@ -137,9 +238,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
|
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
|
||||||
('utopic', None): self.utopic_juno,
|
('utopic', None): self.utopic_juno,
|
||||||
('vivid', None): self.vivid_kilo,
|
('vivid', None): self.vivid_kilo,
|
||||||
('wily', None): self.wily_liberty}
|
('wily', None): self.wily_liberty,
|
||||||
|
('xenial', None): self.xenial_mitaka}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
def _get_openstack_release_string(self):
|
||||||
@ -156,6 +259,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
('wily', 'liberty'),
|
('wily', 'liberty'),
|
||||||
|
('xenial', 'mitaka'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
@ -18,6 +18,7 @@ import amulet
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
@ -604,7 +605,22 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
'{}'.format(sample_type, samples))
|
'{}'.format(sample_type, samples))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# rabbitmq/amqp specific helpers:
|
# rabbitmq/amqp specific helpers:
|
||||||
|
|
||||||
|
def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
|
||||||
|
"""Wait for rmq units extended status to show cluster readiness,
|
||||||
|
after an optional initial sleep period. Initial sleep is likely
|
||||||
|
necessary to be effective following a config change, as status
|
||||||
|
message may not instantly update to non-ready."""
|
||||||
|
|
||||||
|
if init_sleep:
|
||||||
|
time.sleep(init_sleep)
|
||||||
|
|
||||||
|
message = re.compile('^Unit is ready and clustered$')
|
||||||
|
deployment._auto_wait_for_status(message=message,
|
||||||
|
timeout=timeout,
|
||||||
|
include_only=['rabbitmq-server'])
|
||||||
|
|
||||||
def add_rmq_test_user(self, sentry_units,
|
def add_rmq_test_user(self, sentry_units,
|
||||||
username="testuser1", password="changeme"):
|
username="testuser1", password="changeme"):
|
||||||
"""Add a test user via the first rmq juju unit, check connection as
|
"""Add a test user via the first rmq juju unit, check connection as
|
||||||
@ -805,7 +821,10 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if port:
|
if port:
|
||||||
config['ssl_port'] = port
|
config['ssl_port'] = port
|
||||||
|
|
||||||
deployment.configure('rabbitmq-server', config)
|
deployment.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Wait for unit status
|
||||||
|
self.rmq_wait_for_cluster(deployment)
|
||||||
|
|
||||||
# Confirm
|
# Confirm
|
||||||
tries = 0
|
tries = 0
|
||||||
@ -832,7 +851,10 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
|
|
||||||
# Disable RMQ SSL
|
# Disable RMQ SSL
|
||||||
config = {'ssl': 'off'}
|
config = {'ssl': 'off'}
|
||||||
deployment.configure('rabbitmq-server', config)
|
deployment.d.configure('rabbitmq-server', config)
|
||||||
|
|
||||||
|
# Wait for unit status
|
||||||
|
self.rmq_wait_for_cluster(deployment)
|
||||||
|
|
||||||
# Confirm
|
# Confirm
|
||||||
tries = 0
|
tries = 0
|
||||||
|
Loading…
Reference in New Issue
Block a user