Gracefully depart the cluster
Better depart handling from the cluster. Stop MySQL cleanly to notify peer instances early. Change-Id: Ic40e190a299acd998f028d6803320444ce766602
This commit is contained in:
parent
53bf5af4f1
commit
3d1d93865c
|
@ -1383,6 +1383,14 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
|
|||
:returns: This function is called for its side effect
|
||||
:rtype: None
|
||||
"""
|
||||
# This unit is departing the cluster
|
||||
# This overrides everything else.
|
||||
# Stop processing any other information.
|
||||
if reactive.is_flag_set("local.cluster.unit.departing"):
|
||||
ch_core.hookenv.status_set(
|
||||
"waiting", "This unit is departing. Shutting down.")
|
||||
return
|
||||
|
||||
# Set version
|
||||
ch_core.hookenv.application_version_set(self.application_version)
|
||||
# Start with default checks
|
||||
|
@ -1708,3 +1716,29 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
|
|||
|
||||
else:
|
||||
reactive.clear_flag('tls.enabled')
|
||||
|
||||
def depart_instance(self):
|
||||
"""Depart from the cluster.
|
||||
|
||||
Cleanly stop MySQL giving the other nodes in the cluster notification
|
||||
that this node is down. Disable MySQL so it does not accidently start.
|
||||
Update the cluster relation indicating this node is no longer in the
|
||||
cluster.
|
||||
|
||||
:side effect: Stops MySQL and unsets relation data
|
||||
:returns: This function is called for its side effect
|
||||
:rtype: None
|
||||
"""
|
||||
ch_core.hookenv.log("Stopping mysql ...", "WARNING")
|
||||
ch_core.host.service_stop(self.default_service)
|
||||
ch_core.hookenv.log("Disabling mysql ...", "WARNING")
|
||||
subprocess.check_call(["update-rc.d", self.default_service, "disable"])
|
||||
|
||||
ch_core.hookenv.log("Unsetting cluster values ...", "WARNING")
|
||||
if self.cluster_relation_endpoint:
|
||||
self.cluster_relation_endpoint.peer_relation.to_publish_raw[
|
||||
'cluster-address'] = None
|
||||
self.cluster_relation_endpoint.peer_relation.to_publish_raw[
|
||||
'cluster-user'] = None
|
||||
self.cluster_relation_endpoint.peer_relation.to_publish_raw[
|
||||
'cluster-password'] = None
|
||||
|
|
|
@ -254,7 +254,8 @@ def config_changed_restart():
|
|||
@reactive.when('leadership.set.cluster-instances-clustered')
|
||||
@reactive.when('endpoint.shared-db.changed')
|
||||
@reactive.when('shared-db.available')
|
||||
@reactive.when_not('charm.paused')
|
||||
@reactive.when_none(
|
||||
'charm.paused', 'local.cluster.unit.departing')
|
||||
def shared_db_respond():
|
||||
"""Respond to Shared DB Requests.
|
||||
"""
|
||||
|
@ -275,7 +276,8 @@ def shared_db_respond():
|
|||
@reactive.when('leadership.set.cluster-instances-clustered')
|
||||
@reactive.when('endpoint.db-router.changed')
|
||||
@reactive.when('db-router.available')
|
||||
@reactive.when_not('charm.paused')
|
||||
@reactive.when_none(
|
||||
'charm.paused', 'local.cluster.unit.departing')
|
||||
def db_router_respond():
|
||||
"""Respond to DB Router Requests.
|
||||
"""
|
||||
|
@ -292,6 +294,7 @@ def db_router_respond():
|
|||
@reactive.when('leadership.set.cluster-instances-clustered')
|
||||
@reactive.when('leadership.is_leader')
|
||||
@reactive.when('cluster.available')
|
||||
@reactive.when_not('local.cluster.unit.departing')
|
||||
def scale_out():
|
||||
"""Handle scale-out adding new nodes to an existing cluster."""
|
||||
|
||||
|
@ -316,6 +319,7 @@ def scale_out():
|
|||
|
||||
@reactive.when('certificates.available')
|
||||
@reactive.when('cluster.available')
|
||||
@reactive.when_not('local.cluster.unit.departing')
|
||||
def request_certificates():
|
||||
"""When the certificates interface is available, request TLS certificates.
|
||||
"""
|
||||
|
@ -341,6 +345,7 @@ def request_certificates():
|
|||
'certificates.ca.changed',
|
||||
'certificates.certs.changed',
|
||||
'endpoint.certificates.departed')
|
||||
@reactive.when_not('local.cluster.unit.departing')
|
||||
def configure_certificates():
|
||||
"""When the certificates interface is available, this default handler
|
||||
updates on-disk certificates and switches on the TLS support.
|
||||
|
@ -356,3 +361,34 @@ def configure_certificates():
|
|||
if reactive.is_flag_set('leadership.is_leader'):
|
||||
db_router_respond()
|
||||
instance.assess_status()
|
||||
|
||||
|
||||
@reactive.when_any(
|
||||
'endpoint.coordinator.departed',
|
||||
'endpoint.cluster.departed',
|
||||
)
|
||||
def scale_in():
|
||||
""" Handle scale in.
|
||||
|
||||
If this is the node departing, stop services and notify peers.
|
||||
"""
|
||||
if not ch_core.hookenv.departing_unit():
|
||||
ch_core.hookenv.log(
|
||||
"In a cluster/coordinator departing hook but departing unit is "
|
||||
"unset. Doing nothing."
|
||||
"WARNING")
|
||||
return
|
||||
|
||||
if ch_core.hookenv.local_unit() in ch_core.hookenv.departing_unit():
|
||||
ch_core.hookenv.log(
|
||||
"{} is this unit departing. Shutting down."
|
||||
.format(ch_core.hookenv.departing_unit()),
|
||||
"WARNING")
|
||||
reactive.set_flag("local.cluster.unit.departing")
|
||||
with charm.provide_charm_instance() as instance:
|
||||
instance.depart_instance()
|
||||
else:
|
||||
ch_core.hookenv.log(
|
||||
"{} is not this unit departing. Do nothing."
|
||||
.format(ch_core.hookenv.departing_unit()),
|
||||
"WARNING")
|
||||
|
|
|
@ -1041,6 +1041,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
|
|||
"status_set")
|
||||
|
||||
# All is well
|
||||
self.is_flag_set.return_value = False
|
||||
midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm()
|
||||
midbc.check_if_paused = _check
|
||||
midbc.check_interfaces = _check
|
||||
|
@ -1082,6 +1083,15 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
|
|||
self.status_set.assert_called_once_with(
|
||||
"blocked", "MySQL InnoDB Cluster not healthy: Cluster not healthy")
|
||||
|
||||
# Departing
|
||||
self.is_flag_set.return_value = True
|
||||
self.status_set.reset_mock()
|
||||
_check.reset_mock()
|
||||
midbc._assess_status()
|
||||
_check.assert_not_called()
|
||||
self.status_set.assert_called_once_with(
|
||||
"waiting", "This unit is departing. Shutting down.")
|
||||
|
||||
def test_get_cluster_status(self):
|
||||
_local_addr = "10.10.50.50"
|
||||
_name = "theCluster"
|
||||
|
|
|
@ -74,6 +74,10 @@ class TestRegisteredHooks(test_utils.TestRegisteredHooks):
|
|||
"configure_certificates": (
|
||||
"certificates.ca.changed",
|
||||
"certificates.certs.changed",),
|
||||
"scale_in": (
|
||||
"endpoint.coordinator.departed",
|
||||
"endpoint.cluster.departed",),
|
||||
|
||||
},
|
||||
"when_not": {
|
||||
"leader_install": ("charm.installed",),
|
||||
|
@ -91,6 +95,24 @@ class TestRegisteredHooks(test_utils.TestRegisteredHooks):
|
|||
"signal_clustered": ("leadership.is_leader",),
|
||||
"shared_db_respond": ("charm.paused",),
|
||||
"db_router_respond": ("charm.paused",),
|
||||
"scale_out": ("local.cluster.unit.departing",),
|
||||
"request_certificates": ("local.cluster.unit.departing",),
|
||||
"configure_certificates": ("local.cluster.unit.departing",),
|
||||
},
|
||||
"when_any": {
|
||||
"scale_in": (
|
||||
"endpoint.coordinator.departed",
|
||||
"endpoint.cluster.departed",),
|
||||
"configure_certificates": (
|
||||
"certificates.ca.changed",
|
||||
"certificates.certs.changed",
|
||||
"endpoint.certificates.departed",),
|
||||
},
|
||||
"when_none": {
|
||||
"shared_db_respond": (
|
||||
"charm.paused", "local.cluster.unit.departing",),
|
||||
"db_router_respond": (
|
||||
"charm.paused", "local.cluster.unit.departing",),
|
||||
},
|
||||
}
|
||||
# test that the hooks were registered via the
|
||||
|
|
Loading…
Reference in New Issue