Notify clients of series upgrade
When the percona cluster is undergoing a series upgrade, clients should suspend db activity in their hooks (like db migrations). This change sents a notification of upgrade down the shared-db relation which clients can then react to. Change-Id: I5d8ed7d3935db5568c50f8d585e37a4d0cc6914f
This commit is contained in:
parent
f5ee53234b
commit
8443c165f9
|
@ -24,6 +24,8 @@ from charmhelpers.core.hookenv import (
|
|||
action_get,
|
||||
action_set,
|
||||
action_fail,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
leader_set,
|
||||
is_leader,
|
||||
)
|
||||
|
@ -33,6 +35,10 @@ from charmhelpers.core.host import (
|
|||
lsb_release,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
DB_SERIES_UPGRADING_KEY,
|
||||
)
|
||||
|
||||
import percona_utils
|
||||
import percona_hooks
|
||||
|
||||
|
@ -67,6 +73,10 @@ def complete_cluster_series_upgrade(args):
|
|||
# Unset cluster_series_upgrading
|
||||
leader_set(cluster_series_upgrading="")
|
||||
leader_set(cluster_series_upgrade_leader="")
|
||||
for r_id in relation_ids('shared-db'):
|
||||
relation_set(
|
||||
relation_id=r_id,
|
||||
relation_settings={DB_SERIES_UPGRADING_KEY: None})
|
||||
percona_hooks.config_changed()
|
||||
|
||||
|
||||
|
|
|
@ -723,7 +723,7 @@ class MySQL8Helper(MySQLHelper):
|
|||
try:
|
||||
cursor.execute("GRANT CREATE USER ON *.* TO '{}'@'{}' WITH GRANT "
|
||||
"OPTION".format(db_user, remote_ip))
|
||||
cursor.execute("GRANT SELECT, INSERT, UPDATE, DELETE ON "
|
||||
cursor.execute("GRANT SELECT, INSERT, UPDATE, DELETE, EXECUTE ON "
|
||||
"mysql_innodb_cluster_metadata.* TO '{}'@'{}'"
|
||||
.format(db_user, remote_ip))
|
||||
cursor.execute("GRANT SELECT ON mysql.user TO '{}'@'{}'"
|
||||
|
|
|
@ -25,6 +25,7 @@ Helpers for clustering and determining "cluster leadership" and other
|
|||
clustering-related helpers.
|
||||
"""
|
||||
|
||||
import functools
|
||||
import subprocess
|
||||
import os
|
||||
import time
|
||||
|
@ -281,6 +282,10 @@ def determine_apache_port(public_port, singlenode_mode=False):
|
|||
return public_port - (i * 10)
|
||||
|
||||
|
||||
determine_apache_port_single = functools.partial(
|
||||
determine_apache_port, singlenode_mode=True)
|
||||
|
||||
|
||||
def get_hacluster_config(exclude_keys=None):
|
||||
'''
|
||||
Obtains all relevant configuration from charm configuration required
|
||||
|
@ -404,3 +409,43 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
|
|||
log(msg, DEBUG)
|
||||
status_set('maintenance', msg)
|
||||
time.sleep(calculated_wait)
|
||||
|
||||
|
||||
def get_managed_services_and_ports(services, external_ports,
|
||||
external_services=None,
|
||||
port_conv_f=determine_apache_port_single):
|
||||
"""Get the services and ports managed by this charm.
|
||||
|
||||
Return only the services and corresponding ports that are managed by this
|
||||
charm. This excludes haproxy when there is a relation with hacluster. This
|
||||
is because this charm passes responsability for stopping and starting
|
||||
haproxy to hacluster.
|
||||
|
||||
Similarly, if a relation with hacluster exists then the ports returned by
|
||||
this method correspond to those managed by the apache server rather than
|
||||
haproxy.
|
||||
|
||||
:param services: List of services.
|
||||
:type services: List[str]
|
||||
:param external_ports: List of ports managed by external services.
|
||||
:type external_ports: List[int]
|
||||
:param external_services: List of services to be removed if ha relation is
|
||||
present.
|
||||
:type external_services: List[str]
|
||||
:param port_conv_f: Function to apply to ports to calculate the ports
|
||||
managed by services controlled by this charm.
|
||||
:type port_convert_func: f()
|
||||
:returns: A tuple containing a list of services first followed by a list of
|
||||
ports.
|
||||
:rtype: Tuple[List[str], List[int]]
|
||||
"""
|
||||
if external_services is None:
|
||||
external_services = ['haproxy']
|
||||
if relation_ids('ha'):
|
||||
for svc in external_services:
|
||||
try:
|
||||
services.remove(svc)
|
||||
except ValueError:
|
||||
pass
|
||||
external_ports = [port_conv_f(p) for p in external_ports]
|
||||
return services, external_ports
|
||||
|
|
|
@ -52,7 +52,7 @@ class RestrictedPackages(BaseAudit):
|
|||
def __init__(self, pkgs, **kwargs):
|
||||
super(RestrictedPackages, self).__init__(**kwargs)
|
||||
if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
|
||||
self.pkgs = [pkgs]
|
||||
self.pkgs = pkgs.split()
|
||||
else:
|
||||
self.pkgs = pkgs
|
||||
|
||||
|
@ -100,4 +100,5 @@ class RestrictedPackages(BaseAudit):
|
|||
apt_purge(pkg.name)
|
||||
|
||||
def is_virtual_package(self, pkg):
|
||||
return pkg.has_provides and not pkg.has_versions
|
||||
return (pkg.get('has_provides', False) and
|
||||
not pkg.get('has_versions', False))
|
||||
|
|
|
@ -44,6 +44,7 @@ from charmhelpers.core.hookenv import (
|
|||
INFO,
|
||||
ERROR,
|
||||
related_units,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
status_set,
|
||||
|
@ -331,6 +332,10 @@ PACKAGE_CODENAMES = {
|
|||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
|
||||
DB_SERIES_UPGRADING_KEY = 'cluster-series-upgrading'
|
||||
|
||||
DB_MAINTENANCE_KEYS = [DB_SERIES_UPGRADING_KEY]
|
||||
|
||||
|
||||
class CompareOpenStackReleases(BasicStringComparator):
|
||||
"""Provide comparisons of OpenStack releases.
|
||||
|
@ -1912,3 +1917,33 @@ def set_db_initialised():
|
|||
"""
|
||||
juju_log('Setting db-initialised to True', 'DEBUG')
|
||||
leader_set({'db-initialised': True})
|
||||
|
||||
|
||||
def is_db_maintenance_mode(relid=None):
|
||||
"""Check relation data from notifications of db in maintenance mode.
|
||||
|
||||
:returns: Whether db has notified it is in maintenance mode.
|
||||
:rtype: bool
|
||||
"""
|
||||
juju_log('Checking for maintenance notifications', 'DEBUG')
|
||||
if relid:
|
||||
r_ids = [relid]
|
||||
else:
|
||||
r_ids = relation_ids('shared-db')
|
||||
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
|
||||
notifications = []
|
||||
for r_id, unit in rids_units:
|
||||
settings = relation_get(unit=unit, rid=r_id)
|
||||
for key, value in settings.items():
|
||||
if value and key in DB_MAINTENANCE_KEYS:
|
||||
juju_log(
|
||||
'Unit: {}, Key: {}, Value: {}'.format(unit, key, value),
|
||||
'DEBUG')
|
||||
try:
|
||||
notifications.append(bool_from_string(value))
|
||||
except ValueError:
|
||||
juju_log(
|
||||
'Could not discern bool from {}'.format(value),
|
||||
'WARN')
|
||||
pass
|
||||
return True in notifications
|
||||
|
|
|
@ -38,6 +38,7 @@ so with this we get rid of the dependency.
|
|||
import locale
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
class _container(dict):
|
||||
|
@ -59,6 +60,13 @@ class Cache(object):
|
|||
def __init__(self, progress=None):
|
||||
pass
|
||||
|
||||
def __contains__(self, package):
|
||||
try:
|
||||
pkg = self.__getitem__(package)
|
||||
return pkg is not None
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
def __getitem__(self, package):
|
||||
"""Get information about a package from apt and dpkg databases.
|
||||
|
||||
|
@ -178,6 +186,28 @@ class Cache(object):
|
|||
return pkgs
|
||||
|
||||
|
||||
class Config(_container):
|
||||
def __init__(self):
|
||||
super(Config, self).__init__(self._populate())
|
||||
|
||||
def _populate(self):
|
||||
cfgs = {}
|
||||
cmd = ['apt-config', 'dump']
|
||||
output = subprocess.check_output(cmd,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
for line in output.splitlines():
|
||||
if not line.startswith("CommandLine"):
|
||||
k, v = line.split(" ", 1)
|
||||
cfgs[k] = v.strip(";").strip("\"")
|
||||
|
||||
return cfgs
|
||||
|
||||
|
||||
# Backwards compatibility with old apt_pkg module
|
||||
sys.modules[__name__].config = Config()
|
||||
|
||||
|
||||
def init():
|
||||
"""Compability shim that does nothing."""
|
||||
pass
|
||||
|
|
|
@ -79,6 +79,7 @@ from charmhelpers.contrib.charmsupport import nrpe
|
|||
from charmhelpers.contrib.hardening.harden import harden
|
||||
from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
DB_SERIES_UPGRADING_KEY,
|
||||
is_unit_paused_set,
|
||||
is_unit_upgrading_set,
|
||||
set_unit_upgrading,
|
||||
|
@ -371,6 +372,10 @@ def prepare():
|
|||
leader_set(cluster_series_upgrading=True)
|
||||
leader_set(
|
||||
cluster_series_upgrade_leader=get_relation_ip('cluster'))
|
||||
for r_id in relation_ids('shared-db'):
|
||||
relation_set(
|
||||
relation_id=r_id,
|
||||
relation_settings={DB_SERIES_UPGRADING_KEY: True})
|
||||
else:
|
||||
hosts = [leader_get('cluster_series_upgrade_leader')]
|
||||
|
||||
|
@ -977,6 +982,11 @@ def leader_settings_changed():
|
|||
# NOTE(tkurek): deconfigure old leader
|
||||
if relation_ids('slave'):
|
||||
deconfigure_slave()
|
||||
if not leader_get('cluster_series_upgrading'):
|
||||
for r_id in relation_ids('shared-db'):
|
||||
relation_set(
|
||||
relation_id=r_id,
|
||||
relation_settings={DB_SERIES_UPGRADING_KEY: None})
|
||||
|
||||
|
||||
@hooks.hook('leader-elected')
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
charm_name: "percona-cluster"
|
||||
tests:
|
||||
- disco_model:
|
||||
- zaza.openstack.charm_tests.mysql.tests.PerconaClusterColdStartTest
|
||||
- zaza.openstack.charm_tests.mysql.tests.PerconaClusterCharmTests
|
||||
- zaza.openstack.charm_tests.mysql.tests.PerconaClusterScaleTests
|
||||
- bionic_model:
|
||||
- zaza.openstack.charm_tests.mysql.tests.PerconaClusterColdStartTest
|
||||
- zaza.openstack.charm_tests.mysql.tests.PerconaClusterCharmTests
|
||||
|
@ -17,7 +13,6 @@ target_deploy_status:
|
|||
gate_bundles:
|
||||
- bionic_model: bionic-ha
|
||||
- xenial_model: xenial-ha
|
||||
- disco_model: disco-ha
|
||||
smoke_bundles:
|
||||
- bionic_model: bionic-ha
|
||||
dev_bundles:
|
||||
|
|
|
@ -42,11 +42,17 @@ class ResumeTestCase(CharmTestCase):
|
|||
class CompleteClusterSeriesUpgrade(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
to_patch = [
|
||||
"is_leader",
|
||||
"leader_set",
|
||||
"relation_ids",
|
||||
"relation_set"]
|
||||
super(CompleteClusterSeriesUpgrade, self).setUp(
|
||||
actions, ["is_leader", "leader_set"])
|
||||
actions, to_patch)
|
||||
|
||||
def test_leader_complete_series_upgrade(self):
|
||||
self.is_leader.return_value = True
|
||||
self.relation_ids.return_value = ['relid:1']
|
||||
calls = [mock.call(cluster_series_upgrading=""),
|
||||
mock.call(cluster_series_upgrade_leader="")]
|
||||
with patch('actions.actions.percona_hooks.config_changed'
|
||||
|
@ -54,6 +60,9 @@ class CompleteClusterSeriesUpgrade(CharmTestCase):
|
|||
actions.complete_cluster_series_upgrade([])
|
||||
self.leader_set.assert_has_calls(calls)
|
||||
config_changed.assert_called_once_with()
|
||||
self.relation_set.assert_called_once_with(
|
||||
relation_id='relid:1',
|
||||
relation_settings={'cluster-series-upgrading': None})
|
||||
|
||||
def test_non_leader_complete_series_upgrade(self):
|
||||
self.is_leader.return_value = False
|
||||
|
|
|
@ -640,6 +640,78 @@ class TestInstallPerconaXtraDB(CharmTestCase):
|
|||
self.run_mysql_checks.assert_not_called()
|
||||
|
||||
|
||||
class TestLeaderHooks(CharmTestCase):
|
||||
TO_PATCH = [
|
||||
'maybe_notify_bootstrapped',
|
||||
'config_changed',
|
||||
'relation_ids',
|
||||
'leader_get',
|
||||
'relation_set',
|
||||
'master_joined',
|
||||
'deconfigure_slave',
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
CharmTestCase.setUp(self, hooks, self.TO_PATCH)
|
||||
|
||||
def relation_ids_full(self, rel_id):
|
||||
return ['{}:1'.format(rel_id)]
|
||||
|
||||
def test_leader_settings_changed(self):
|
||||
self.relation_ids.side_effect = self.relation_ids_full
|
||||
self.leader_get.return_value = None
|
||||
hooks.leader_settings_changed()
|
||||
self.maybe_notify_bootstrapped.assert_called_once_with()
|
||||
self.config_changed.assert_called_once_with()
|
||||
self.master_joined.assert_called_once_with()
|
||||
self.deconfigure_slave.assert_called_once_with()
|
||||
self.relation_set.assert_called_once_with(
|
||||
relation_id='shared-db:1',
|
||||
relation_settings={'cluster-series-upgrading': None})
|
||||
|
||||
|
||||
class TestSeriesUpgrade(CharmTestCase):
|
||||
TO_PATCH = [
|
||||
'register_configs',
|
||||
'pause_unit_helper',
|
||||
'set_unit_upgrading',
|
||||
'leader_get',
|
||||
'leader_set',
|
||||
'relation_ids',
|
||||
'relation_set',
|
||||
'get_relation_ip',
|
||||
'render_config',
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
CharmTestCase.setUp(self, hooks, self.TO_PATCH)
|
||||
|
||||
def test_prepare_leader(self):
|
||||
self.register_configs.return_value = 'registered_configs'
|
||||
self.leader_get.return_value = None
|
||||
self.get_relation_ip.return_value = '10.0.0.10'
|
||||
self.relation_ids.return_value = ['relid:1']
|
||||
hooks.prepare()
|
||||
self.pause_unit_helper.assert_called_once_with('registered_configs')
|
||||
self.set_unit_upgrading.assert_called_once_with()
|
||||
leader_set_calls = [
|
||||
mock.call(cluster_series_upgrading=True),
|
||||
mock.call(cluster_series_upgrade_leader='10.0.0.10')]
|
||||
self.leader_set.assert_has_calls(leader_set_calls)
|
||||
self.relation_set.assert_called_once_with(
|
||||
relation_id='relid:1',
|
||||
relation_settings={'cluster-series-upgrading': True})
|
||||
self.render_config.assert_called_once_with([])
|
||||
|
||||
def test_prepare_non_leader(self):
|
||||
self.register_configs.return_value = 'registered_configs'
|
||||
self.leader_get.return_value = '10.0.0.10'
|
||||
hooks.prepare()
|
||||
self.pause_unit_helper.assert_called_once_with('registered_configs')
|
||||
self.set_unit_upgrading.assert_called_once_with()
|
||||
self.render_config.assert_called_once_with(['10.0.0.10'])
|
||||
|
||||
|
||||
class TestUpgradeCharm(CharmTestCase):
|
||||
TO_PATCH = [
|
||||
'config',
|
||||
|
|
Loading…
Reference in New Issue