Prometheus mysqld exporter

Install mysqld-exporter with snap. It's a prometheus exporter for mysql,
which provide the http interface for prometheus to pull the metrics.
It should only install & setup configuration when relation connect to prometheus,
and disable when relation remove.
The charm also need to create read only user for the exporter to get the
metrics.

https://github.com/prometheus/mysqld_exporter

Change-Id: Id6a6337004586699c7f943530fa7a32deae26db0
func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/859
This commit is contained in:
Alex Kavanagh
2022-08-16 21:04:29 +01:00
committed by jneo8
parent 570fff375f
commit 85a3deee22
14 changed files with 652 additions and 62 deletions

View File

@@ -183,3 +183,9 @@ options:
default:
description: |
The hostname or address of the db-router endpoint for mysql-innodb-cluster
prometheus-exporter-snap-channel:
type: string
default:
description: |
Choose which channel to install for mysql prometheus exporter
Supported values are: "stable", "candidate", "beta" and "edge"

View File

@@ -7,6 +7,7 @@ includes:
- interface:mysql-router
- interface:mysql-innodb-cluster
- interface:mysql-monitor
- interface:http
options:
basic:
use_venv: True

View File

@@ -21,6 +21,7 @@ import subprocess
import tenacity
import tempfile
import uuid
from typing import Literal
import charms.coordinator as coordinator
import charms_openstack.charm
@@ -197,7 +198,46 @@ class CannotConnectToMySQL(Exception):
pass
class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
class MySQLPrometheusExporterMixin:
"""Mixin for the Prometheus exporter service.
The mixin should only inheritance by MySQLInnoDBClusterCharm.
"""
@property
def prometheus_exporter_user(self):
"""Return the prometheus exporter username.
:returns: Exporter username
:rtype: str
"""
return "prom_exporter"
@property
def prometheus_exporter_password(self):
"""Return or set password for the prometheus exporter user.
:returns: Exporter password
:rtype: str
"""
return self._get_password("prom_exporter_password")
@property
def prometheus_exporter_port(self):
"""Return this unit's prometheus exporter port.
Using the class method determine this unit's prom_exporter address.
:returns: Port
:rtype: str
"""
return "9104"
class MySQLInnoDBClusterCharm(
MySQLPrometheusExporterMixin,
charms_openstack.charm.OpenStackCharm,
):
"""Charm class for the MySQLInnoDBCluster charm."""
name = "mysql-innodb-cluster"
release = "train"
@@ -468,7 +508,12 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
return _helper
@staticmethod
def _grant_cluster_user_privileges(m_helper, address, user, read_only):
def _grant_user_privileges(
m_helper,
address,
user,
privilege: Literal["all", "read_only", "prom_exporter"],
):
"""Grant privileges for cluster user.
:param m_helper: connected RW instance of the MySQLDB8Helper class
@@ -477,14 +522,17 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
:type address: str
:param user: Cluster user's username
:type user: str
:param read_only: Grand read-only permissions [False]
:type read_only: bool
:param privilege: User permission
:type privilege:
Literal["all", "read_only", "prom_exporter"]
:side effect: Executes SQL to revoke and grand privileges for user
"""
sql_grant = "GRANT {permissions} ON *.* TO '{user}'@'{host}'"
sql_revoke = "REVOKE ALL PRIVILEGES ON *.* FROM '{user}'@'{host}'"
if read_only:
if privilege == "read_only":
permissions = "SELECT, SHOW VIEW"
elif privilege == "prom_exporter":
permissions = "PROCESS, REPLICATION CLIENT"
else:
permissions = "ALL PRIVILEGES"
# NOTE (rgildein): The WITH GRANT OPTION clause gives the user the
@@ -512,14 +560,14 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
m_helper.execute("FLUSH PRIVILEGES")
def create_cluster_user(
self,
cluster_address,
cluster_user,
cluster_password,
read_only=False
def create_user(
self,
address,
user,
password,
user_privilege,
):
"""Create cluster user and grant permissions in the MySQL DB.
"""Create user and grant permissions in the MySQL DB.
This user will be used by the leader for instance configuration and
initial cluster creation.
@@ -527,14 +575,14 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
The grants are specific to cluster creation and management as
documented upstream.
:param cluster_address: Cluster user's address
:type cluster_address: str
:param cluster_user: Cluster user's username
:type cluster_user: str
:param cluster_password: Cluster user's password
:type cluster_password: str
:param read_only: Grand read-only permissions [False]
:type read_only: bool
:param address: User's address
:type address: str
:param exporter_user: User's username
:type user: str
:param password: User's password
:type password: str
:param privilege: User permission
:type privilege: Literal[all, read_only, prom_exporter]
:side effect: Executes SQL to create DB user
:returns: True if successful, False if there are failures
:rtype: Boolean
@@ -543,8 +591,8 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
"CREATE USER '{user}'@'{host}' "
"IDENTIFIED BY '{password}'")
addresses = [cluster_address]
if cluster_address in self.cluster_address:
addresses = [address]
if address in self.cluster_address:
addresses.append("localhost")
# If this is scale out and the cluster already exists, use the cluster
@@ -557,12 +605,12 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
for address in addresses:
try:
m_helper.execute(SQL_CLUSTER_USER_CREATE.format(
user=cluster_user,
user=user,
host=address,
password=cluster_password)
password=password)
)
self._grant_cluster_user_privileges(
m_helper, address, cluster_user, read_only
self._grant_user_privileges(
m_helper, address, user, user_privilege,
)
except mysql.MySQLdb._exceptions.OperationalError as e:
if e.args[0] == self._read_only_error:
@@ -575,11 +623,11 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
if e.args[0] == self._user_create_failed:
ch_core.hookenv.log(
"User {} exists."
.format(cluster_user), "WARNING")
.format(user), "WARNING")
# NOTE (rgildein): This is necessary to ensure that the
# existing user has the correct privileges.
self._grant_cluster_user_privileges(
m_helper, address, cluster_user, read_only
self._grant_user_privileges(
m_helper, address, user, user_privilege,
)
continue
else:
@@ -1098,10 +1146,11 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm):
"create cluster user for {}.".format(address))
# Make sure we have the user in the DB
for unit in cluster.all_joined_units:
if not self.create_cluster_user(
if not self.create_user(
unit.received['cluster-address'],
unit.received['cluster-user'],
unit.received['cluster-password']):
unit.received['cluster-password'],
"all"):
raise Exception(
"Not all cluster users created.")
self.configure_instance(address)

View File

@@ -18,6 +18,8 @@ provides:
db-monitor:
interface: mysql-monitor
scope: container
prometheus:
interface: http
peers:
cluster:
interface: mysql-innodb-cluster
@@ -27,3 +29,8 @@ resources:
filename: mysql-shell.snap
description: |
Snap for mysql-shell
mysqld-exporter:
type: file
filename: mysqld-exporter.snap
description: |
snap for mysqld prometheus exporter

View File

@@ -12,6 +12,10 @@ import charmhelpers.contrib.openstack.cert_utils as cert_utils
import charms.coordinator as coordinator
import charm.openstack.mysql_innodb_cluster as mysql_innodb_cluster # noqa
from .prometheus_mysql_exporter_handlers import (
create_remote_prometheus_exporter_user
)
charms_openstack.bus.discover()
charm.use_defaults('update-status')
@@ -68,10 +72,12 @@ def create_local_cluster_user():
"""
ch_core.hookenv.log("Creating local cluster user.", "DEBUG")
with charm.provide_charm_instance() as instance:
if not instance.create_cluster_user(
if not instance.create_user(
instance.cluster_address,
instance.cluster_user,
instance.cluster_password):
instance.cluster_password,
"all",
):
ch_core.hookenv.log("Local cluster user was not created.",
"WARNING")
return
@@ -116,10 +122,11 @@ def create_remote_cluster_user():
ch_core.hookenv.log("Creating remote users.", "DEBUG")
with charm.provide_charm_instance() as instance:
for unit in cluster.all_joined_units:
if not instance.create_cluster_user(
if not instance.create_user(
unit.received['cluster-address'],
unit.received['cluster-user'],
unit.received['cluster-password']):
unit.received['cluster-password'],
"all"):
ch_core.hookenv.log("Not all remote users created.", "WARNING")
return
@@ -368,6 +375,10 @@ def scale_out():
"WARNING")
return
create_remote_cluster_user()
if reactive.endpoint_from_flag("prometheus.available"):
create_remote_prometheus_exporter_user()
configure_instances_for_clustering()
add_instances_to_cluster()
reactive.clear_flag('endpoint.cluster.changed.unit-configure-ready')
@@ -503,8 +514,8 @@ def db_monitor_respond():
with charm.provide_charm_instance() as instance:
# NOTE (rgildein): Create a custom user with administrator privileges,
# but read-only access.
if not instance.create_cluster_user(
db_monitor.relation_ip, username, password, True
if not instance.create_user(
db_monitor.relation_ip, username, password, "read_only"
):
ch_core.hookenv.log("db-monitor user was not created.",
ch_core.hookenv.WARNING)

View File

@@ -0,0 +1,211 @@
import subprocess
import charms.reactive as reactive
from charms.layer import snap
import charms_openstack.charm as charm
import charmhelpers.core as ch_core
SVC_NAME = "snap.mysqld-exporter.mysqld-exporter.service"
SNAP_NAME = "mysqld-exporter"
@reactive.when("prometheus.available")
@reactive.when("local.cluster.user-created")
@reactive.when_not("local.prom-exporter.user-created")
def create_local_prometheus_exporter_user():
"""Create local exporter user in the DB."""
with charm.provide_charm_instance() as instance:
if not instance.create_user(
instance.cluster_address,
instance.prometheus_exporter_user,
instance.prometheus_exporter_password,
"prom_exporter",
):
ch_core.hookenv.log(
"Local prometheus exporter user was not created.",
"WARNING")
return
reactive.set_flag("local.prom-exporter.user-created")
instance.assess_status()
ch_core.hookenv.log(
"Create prometheus mysql exporter user in the mysql db",
"INFO")
@reactive.when_not("local.prom-exporter.all-user-created")
@reactive.when("prometheus.available")
@reactive.when("cluster.available")
@reactive.when("local.prom-exporter.user-created")
def create_remote_prometheus_exporter_user():
"""Create remote cluster user.
Create the remote exporter peer user and grant exporter permissions in the
MySQL DB.
:param cluster: Cluster interface
:type cluster: MySQLInnoDBClusterPeers object
"""
cluster = reactive.endpoint_from_flag("cluster.available")
ch_core.hookenv.log("Creating remote users.", "DEBUG")
with charm.provide_charm_instance() as instance:
for unit in cluster.all_joined_units:
if not instance.create_user(
unit.received['cluster-address'],
instance.prometheus_exporter_user,
instance.prometheus_exporter_password,
"prom_exporter"):
ch_core.hookenv.log(
"Not all remote exporter users created.", "WARNING")
return
# Optimize clustering by causing a cluster relation changed
cluster.set_unit_configure_ready()
reactive.set_flag("local.prom-exporter.all-user-created")
instance.assess_status()
@reactive.when("prometheus.available")
@reactive.when_not("snap.installed.prometheus-exporter")
def snap_install_prometheus_exporter():
"""Create local cluster user in the DB."""
config = ch_core.hookenv.config()
channel = config.get("prometheus-exporter-snap-channel", "stable")
ch_core.hookenv.status_set(
"maintenance",
"Snap install {}. channel={}".format(SNAP_NAME, channel),
)
try:
snap.install(SNAP_NAME, channel=channel, force_dangerous=False)
reactive.set_flag("snap.installed.prometheus-exporter")
ch_core.hookenv.log(
"Snap install prometheus mysql exporter .",
"INFO")
ch_core.hookenv.status_set(
"active",
"Snap install {} success. channel={}".format(SNAP_NAME, channel),
)
except subprocess.CalledProcessError as e:
ch_core.hookenv.status_set(
"block",
"Snap install {} fail. channel={}".format(SNAP_NAME, channel),
)
ch_core.hookenv.log(
str(e),
level=ch_core.hookenv.ERROR,
)
def snap_config_prometheus_exporter(instance):
"""Snap config prometheus exporter."""
# Connection information
snap.set(SNAP_NAME, "mysql.user", instance.prometheus_exporter_user)
snap.set(
SNAP_NAME, "mysql.password", instance.prometheus_exporter_password)
snap.set(SNAP_NAME, "mysql.host", instance.cluster_address)
snap.set(SNAP_NAME, "mysql.port", instance.cluster_port)
reactive.set_flag("snap.prometheus-exporter.configed")
ch_core.hookenv.log(
f"Config snap {SNAP_NAME}",
"INFO",
)
def start_prometheus_exporter():
"""Start service prometheus exporter."""
ch_core.host.service_restart(SVC_NAME)
reactive.set_flag("snap.prometheus-exporter.started")
ch_core.hookenv.log(
f"Start service {SVC_NAME}", "INFO")
@reactive.when("prometheus.available")
@reactive.when("snap.installed.prometheus-exporter")
@reactive.when("local.prom-exporter.user-created")
@reactive.when("local.prom-exporter.all-user-created")
@reactive.when_not("snap.prometheus-exporter.started")
def start_prometheus_exporter_service():
"""Start exporter service."""
with charm.provide_charm_instance() as instance:
if not reactive.is_flag_set("snap.prometheus-exporter.configed"):
snap_config_prometheus_exporter(instance)
start_prometheus_exporter()
@reactive.when("snap.prometheus-exporter.started")
@reactive.when("prometheus.available")
@reactive.when_not("local.prometheus.send-connection-info")
def send_prometheus_connection_info(target):
"""Configure http interface for prometheus."""
with charm.provide_charm_instance() as instance:
target.configure(
port=instance.prometheus_exporter_port,
)
ch_core.hookenv.status_set(
"active", "Start prometheus exporter service")
ch_core.hookenv.log(
"Prometheus connected", "INFO")
reactive.set_flag("local.prometheus.send-connection-info")
@reactive.when("prometheus.available")
@reactive.when('config.changed')
def set_config_changed_snap_check():
reactive.set_flag("snap.prometheus_exporter.check-config-changed")
@reactive.when("snap.prometheus_exporter.check-config-changed")
def maybe_update_snap_channel():
# Stop service before snap update channel.
# After exec stop_prometheus_exporter_service method,
# the start_prometheus_exporter_service method should be triggered
# next hook invocation.
if ch_core.host.service_running(SVC_NAME):
stop_prometheus_exporter_service()
reactive.clear_flag("snap.prometheus_exporter.check-config-changed")
@reactive.when_not("prometheus.available")
@reactive.when("snap.prometheus-exporter.started")
def stop_prometheus_exporter_service():
"""Stop exporter service."""
ch_core.host.service_stop(SVC_NAME)
reactive.remove_state("snap.prometheus-exporter.configed")
reactive.remove_state("snap.prometheus-exporter.started")
ch_core.hookenv.status_set(
"active", "Stop prometheus exporter service")
ch_core.hookenv.log(
"Stop service prometheus mysql exporter", "INFO")
@reactive.when_not("prometheus.available")
@reactive.when("local.prometheus.send-connection-info")
def prometheus_disconnected():
ch_core.hookenv.status_set(
"maintenance",
"Stop prometheus exporter service",
)
reactive.remove_state("local.prometheus.send-connection-info")
ch_core.hookenv.log(
"Prometheus disconnect",
"WARNING",
)
@reactive.when("prometheus.available")
@reactive.when_not("local.prometheus.send-connection-info")
def prometheus_connected():
ch_core.hookenv.status_set(
"maintenance",
"Start prometheus exporter service",
)
ch_core.hookenv.log(
"Prometheus connect",
"WARNING",
)

View File

@@ -78,7 +78,7 @@ applications:
channel: yoga/edge
ovn-chassis:
charm: ch:ovn-chassis
channel: latest/edge
channel: latest/stable
nova-compute:
charm: ch:nova-compute
num_units: 1

View File

@@ -12,6 +12,8 @@ applications:
series: focal
charm: ../../../mysql-innodb-cluster.charm
num_units: 3
options:
prometheus-exporter-snap-channel: edge
keystone:
charm: ch:keystone
num_units: 1
@@ -20,6 +22,9 @@ applications:
charm: ch:vault
num_units: 1
channel: 1.7/edge
prometheus2:
charm: ch:prometheus2
num_units: 1
relations:
- - vault:certificates
- mysql-innodb-cluster:certificates
@@ -33,3 +38,5 @@ relations:
- mysql-innodb-cluster:db-router
- - vault-mysql-router:db-router
- mysql-innodb-cluster:db-router
- - prometheus2:target
- mysql-innodb-cluster:prometheus

View File

@@ -11,6 +11,8 @@ applications:
mysql-innodb-cluster:
charm: ../../../mysql-innodb-cluster.charm
num_units: 3
options:
prometheus-exporter-snap-channel: edge
keystone:
charm: ch:keystone
num_units: 1
@@ -19,6 +21,10 @@ applications:
charm: ch:vault
num_units: 1
channel: 1.7/edge
prometheus2:
charm: ch:prometheus2
num_units: 1
series: focal
relations:
- - vault:certificates
- mysql-innodb-cluster:certificates
@@ -32,3 +38,5 @@ relations:
- mysql-innodb-cluster:db-router
- - vault-mysql-router:db-router
- mysql-innodb-cluster:db-router
- - prometheus2:target
- mysql-innodb-cluster:prometheus

View File

@@ -24,6 +24,7 @@ tests:
- zaza.openstack.charm_tests.mysql.tests.MySQLInnoDBClusterColdStartTest
- zaza.openstack.charm_tests.mysql.tests.MySQLInnoDBClusterScaleTest
- zaza.openstack.charm_tests.keystone.tests.AuthenticationAuthorizationTest
- zaza.openstack.charm_tests.mysql.test_prometheus_mysql_exporter.PrometheusMySQLExporterTest
gate_bundles:
- scale_in_out: focal

View File

@@ -41,6 +41,10 @@ charms.leadership = mock.MagicMock()
sys.modules['charms.leadership'] = charms.leadership
charms.coordinator = mock.MagicMock()
sys.modules['charms.coordinator'] = charms.coordinator
charms.layer = mock.MagicMock()
sys.modules['charms.layer'] = charms.layer
charms.layer.snap = mock.MagicMock()
sys.modules['charms.layer.snap'] = charms.layer.snap
charmhelpers = mock.MagicMock()
charmhelpers.contrib.database = mock.MagicMock()

View File

@@ -467,7 +467,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm()
# All privileges
midbc._grant_cluster_user_privileges(mock_helper, _addr, _user, False)
midbc._grant_user_privileges(mock_helper, _addr, _user, "all")
mock_helper.execute.assert_has_calls([
mock.call("REVOKE ALL PRIVILEGES ON *.* FROM '{}'@'{}'"
.format(_user, _addr)),
@@ -478,7 +478,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
mock_helper.reset_mock()
# read-only privileges
midbc._grant_cluster_user_privileges(mock_helper, _addr, _user, True)
midbc._grant_user_privileges(mock_helper, _addr, _user, "read_only")
mock_helper.execute.assert_has_calls([
mock.call("REVOKE ALL PRIVILEGES ON *.* FROM '{}'@'{}'"
.format(_user, _addr)),
@@ -488,7 +488,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
])
mock_helper.reset_mock()
def test_create_cluster_user(self):
def test_create_user(self):
_user = "user"
_pass = "pass"
_addr = "10.10.20.20"
@@ -498,7 +498,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
midbc.get_db_helper.return_value = _helper
midbc.get_cluster_rw_db_helper = mock.MagicMock(return_value=None)
# Non-local
midbc.create_cluster_user(_addr, _user, _pass)
midbc.create_user(_addr, _user, _pass, "all")
_calls = [
mock.call("CREATE USER '{}'@'{}' IDENTIFIED BY '{}'"
.format(_user, _addr, _pass)),
@@ -514,7 +514,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
_localhost = "localhost"
_helper.reset_mock()
self.get_relation_ip.return_value = _addr
midbc.create_cluster_user(_localhost, _user, _pass)
midbc.create_user(_localhost, _user, _pass, "all")
_calls = [
mock.call("CREATE USER '{}'@'{}' IDENTIFIED BY '{}'"
.format(_user, _localhost, _pass)),
@@ -527,7 +527,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
_calls, any_order=True)
# Read only privileges
midbc.create_cluster_user(_localhost, _user, _pass, read_only=True)
midbc.create_user(_localhost, _user, _pass, "read_only")
_calls = [
mock.call("CREATE USER '{}'@'{}' IDENTIFIED BY '{}'"
.format(_user, _localhost, _pass)),
@@ -552,20 +552,20 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
mock.MagicMock(), # gran privileges
mock.MagicMock(), # flush privileges
]
self.assertTrue(midbc.create_cluster_user(_localhost, _user, _pass))
self.assertTrue(midbc.create_user(_localhost, _user, _pass, "all"))
# Read only node
_helper.reset_mock()
_helper.execute.side_effect = (
self._exceptions.OperationalError(1290, "Super read only"))
self.assertFalse(midbc.create_cluster_user(_localhost, _user, _pass))
self.assertFalse(midbc.create_user(_localhost, _user, _pass, "all"))
# Unhandled Exception
_helper.reset_mock()
_helper.execute.side_effect = (
self._exceptions.OperationalError(99999, "BROKEN"))
with self.assertRaises(FakeException):
midbc.create_cluster_user(_localhost, _user, _pass)
midbc.create_user(_localhost, _user, _pass, "all")
def test_configure_instance(self):
_pass = "clusterpass"
@@ -1140,7 +1140,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
mysql_innodb_cluster.charms_openstack.charm.OpenStackCharm,
"states_to_check", "super_states")
self.super_states.return_value = {}
_required_rels = ["cluster"]
_required_rels = ["all"]
_name = "jujuCluster"
_addr = "10.20.20.20"
self.get_relation_ip.return_value = _addr
@@ -1759,22 +1759,22 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
"cluster-user": _user,
"cluster-password": _pass,
}
_create_cluster_user = mock.MagicMock()
_create_cluster_user.return_value = True
midbc.create_cluster_user = _create_cluster_user
_create_user = mock.MagicMock()
_create_user.return_value = True
midbc.create_user = _create_user
_configure_instance = mock.MagicMock()
midbc.configure_instance = _configure_instance
_add_instance_to_cluster = mock.MagicMock()
midbc.add_instance_to_cluster = _add_instance_to_cluster
midbc.configure_and_add_instance(address=_remote_addr)
_create_cluster_user.assert_called_once_with(
_remote_addr, _user, _pass)
_create_user.assert_called_once_with(
_remote_addr, _user, _pass, "all")
_configure_instance.assert_called_once_with(_remote_addr)
_add_instance_to_cluster.assert_called_once_with(_remote_addr)
# Not all users created
_create_cluster_user.return_value = False
_create_user.return_value = False
with self.assertRaises(Exception):
midbc.configure_and_add_instance(address=_remote_addr)
@@ -1868,3 +1868,25 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper):
("SET GLOBAL group_replication_ip_allowlist = "
"'10.0.0.0/24,10.10.0.0/24'")),
mock.call('START GROUP_REPLICATION')])
def test_prometheus_exporter_user(self):
midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm()
self.assertEqual(
midbc.prometheus_exporter_user,
"prom_exporter")
def test_prometheus_exporter_password(self):
midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm()
midbc._get_password = mock.MagicMock()
midbc._get_password.side_effect = self._fake_data
_pass = "pass123"
self.data = {"mysql.passwd": _pass}
self.assertEqual(
midbc.mysql_password,
_pass)
def test_prometheus_exporter_port(self):
midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm()
self.assertEqual(
midbc.prometheus_exporter_port,
"9104")

View File

@@ -189,16 +189,18 @@ class TestMySQLInnoDBClusterHandlers(test_utils.PatchHelper):
self.set_flag.assert_called_once_with("charm.installed")
def test_create_local_cluster_user(self):
self.midbc.create_cluster_user.return_value = True
self.midbc.create_user.return_value = True
handlers.create_local_cluster_user()
self.midbc.create_cluster_user.assert_called_once_with(
self.midbc.create_user.assert_called_once_with(
self.midbc.cluster_address,
self.midbc.cluster_user,
self.midbc.cluster_password)
self.midbc.cluster_password,
"all",
)
self.set_flag.assert_called_once_with("local.cluster.user-created")
# Not successful
self.midbc.create_cluster_user.return_value = False
self.midbc.create_user.return_value = False
self.set_flag.reset_mock()
handlers.create_local_cluster_user()
self.set_flag.assert_not_called()
@@ -218,17 +220,17 @@ class TestMySQLInnoDBClusterHandlers(test_utils.PatchHelper):
self.data = {"cluster-address": _addr,
"cluster-user": _user,
"cluster-password": _pass}
self.midbc.create_cluster_user.return_value = True
self.midbc.create_user.return_value = True
self.endpoint_from_flag.return_value = self.cluster
handlers.create_remote_cluster_user()
self.midbc.create_cluster_user.assert_called_once_with(
_addr, _user, _pass)
self.midbc.create_user.assert_called_once_with(
_addr, _user, _pass, "all")
self.cluster.set_unit_configure_ready.assert_called_once()
self.set_flag.assert_called_once_with(
"local.cluster.all-users-created")
# Not successful
self.midbc.create_cluster_user.return_value = False
self.midbc.create_user.return_value = False
self.cluster.set_unit_configure_ready.reset_mock()
handlers.create_remote_cluster_user()
self.cluster.set_unit_configure_ready.assert_not_called()

View File

@@ -0,0 +1,261 @@
# Copyright 2022 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import charm.openstack.mysql_innodb_cluster as mysql_innodb_cluster
import reactive.prometheus_mysql_exporter_handlers as handlers
import charms_openstack.test_utils as test_utils
class TestRegisteredHooks(test_utils.TestRegisteredHooks):
def test_hooks(self):
defaults = [
]
hook_set = {
"when": {
"create_local_prometheus_exporter_user": (
"prometheus.available",
"local.cluster.user-created",
),
"snap_install_prometheus_exporter": (
"prometheus.available",
),
"start_prometheus_exporter_service": (
"prometheus.available",
"snap.installed.prometheus-exporter",
"local.prom-exporter.user-created",
"local.prom-exporter.all-user-created",
),
"send_prometheus_connection_info": (
"snap.prometheus-exporter.started",
"prometheus.available",
),
"stop_prometheus_exporter_service": (
"snap.prometheus-exporter.started",
),
"prometheus_connected": (
"prometheus.available",
),
"maybe_update_snap_channel": (
"snap.prometheus_exporter.check-config-changed",
),
"prometheus_disconnected": (
"local.prometheus.send-connection-info",
),
"set_config_changed_snap_check": (
"prometheus.available",
"config.changed",
),
"create_remote_prometheus_exporter_user": (
"prometheus.available",
"cluster.available",
"local.prom-exporter.user-created",
),
},
"when_not": {
"create_local_prometheus_exporter_user": (
"local.prom-exporter.user-created",
),
"snap_install_prometheus_exporter": (
"snap.installed.prometheus-exporter",
),
"start_prometheus_exporter_service": (
"snap.prometheus-exporter.started",
),
"stop_prometheus_exporter_service": (
"prometheus.available",
),
"prometheus_disconnected": (
"prometheus.available",
),
"send_prometheus_connection_info": (
"local.prometheus.send-connection-info",
),
"prometheus_connected": (
"local.prometheus.send-connection-info",
),
"create_remote_prometheus_exporter_user": (
"local.prom-exporter.all-user-created",
),
},
}
self.registered_hooks_test_helper(handlers, hook_set, defaults)
class TestPrometheusMySQLExporterHandlers(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self._snap_name = "mysqld-exporter"
self._svc_name = (
"snap.mysqld-exporter.mysqld-exporter.service"
)
# Patch
self.patch_release(
mysql_innodb_cluster.MySQLInnoDBClusterCharm.release)
self.midbc = mock.MagicMock()
self.midbc.prometheus_exporter_user = "prom_exporter"
self.midbc.prometheus_exporter_password = "clusterpass"
self.midbc.cluster_address = "10.10.10.10"
self.midbc.cluster_port = 1234
self.patch_object(handlers.charm, "provide_charm_instance",
new=mock.MagicMock())
self.provide_charm_instance().__enter__.return_value = (
self.midbc)
self.provide_charm_instance().__exit__.return_value = None
self.patch_object(
handlers.reactive, "is_flag_set")
self.patch_object(handlers.reactive, "set_flag")
self.patch_object(handlers.reactive, "remove_state")
self.patch_object(
obj=handlers.snap, attr="install", name="snap_install")
self.patch_object(obj=handlers.snap, attr="set", name="snap_set")
self.patch_object(
handlers.ch_core.hookenv, "status_set")
self.patch_object(
handlers.ch_core.host, "service_restart")
self.patch_object(
handlers.ch_core.host, "service_stop")
self.patch_object(
handlers.ch_core.hookenv, "config"
)
self.config.return_value = {}
def test_create_local_prometheus_expoter_user(self):
self.midbc.create_user.return_value = True
handlers.create_local_prometheus_exporter_user()
self.midbc.create_user.assert_called_once_with(
self.midbc.cluster_address,
self.midbc.prometheus_exporter_user,
self.midbc.prometheus_exporter_password,
"prom_exporter",
)
self.set_flag.assert_called_once_with(
"local.prom-exporter.user-created")
def test_snap_install_prometheus_exporter(self):
handlers.snap_install_prometheus_exporter()
self.snap_install.assert_called_once_with(
self._snap_name,
channel="stable",
force_dangerous=False,
)
self.set_flag.assert_called_once_with(
"snap.installed.prometheus-exporter")
self.status_set.assert_has_calls(
[
mock.call(
"maintenance",
"Snap install {}. channel=stable".format(self._snap_name),
),
mock.call(
"active",
"Snap install {} success. channel=stable".format(
self._snap_name),
)
]
)
def test_snap_config_prometheus_exporter(self):
handlers.snap_config_prometheus_exporter(self.midbc)
self.snap_set.assert_any_call(
self._snap_name, "mysql.port", self.midbc.cluster_port
)
self.snap_set.assert_any_call(
self._snap_name, "mysql.host", self.midbc.cluster_address
)
self.snap_set.assert_any_call(
self._snap_name, "mysql.user", self.midbc.prometheus_exporter_user,
)
self.snap_set.assert_any_call(
self._snap_name,
"mysql.password",
self.midbc.prometheus_exporter_password,
)
self.set_flag.assert_called_once_with(
"snap.prometheus-exporter.configed")
def test_prometheus_connected(self):
handlers.prometheus_connected()
self.status_set.assert_called_once_with(
"maintenance", "Start prometheus exporter service")
def test_prometheus_disconnected(self):
handlers.prometheus_disconnected()
self.status_set.assert_called_once_with(
"maintenance", "Stop prometheus exporter service")
def test_start_prometheus_exporter(self):
handlers.start_prometheus_exporter()
self.service_restart.assert_called_once_with(self._svc_name)
self.set_flag("snap.prometheus-exporter.started")
@mock.patch.object(handlers, "start_prometheus_exporter")
@mock.patch.object(handlers, "snap_config_prometheus_exporter")
def test_start_prometheus_exporter_service(
self,
snap_config_prometheus_exporter,
start_prometheus_exporter,
):
# Already configed
self.is_flag_set.return_value = True
handlers.start_prometheus_exporter_service()
snap_config_prometheus_exporter.assert_not_called()
start_prometheus_exporter.assert_called_once_with()
snap_config_prometheus_exporter.reset_mock()
start_prometheus_exporter.reset_mock()
# Not configed
self.is_flag_set.return_value = False
handlers.start_prometheus_exporter_service()
snap_config_prometheus_exporter.assert_called_once_with(
self.midbc,
)
start_prometheus_exporter.assert_called_once_with()
def test_send_prometheus_connection_info(self):
target = mock.MagicMock()
handlers.send_prometheus_connection_info(target)
target.configure.assert_called_once_with(
port=self.midbc.prometheus_exporter_port,
)
self.status_set.assert_called_once_with(
"active", "Start prometheus exporter service")
self.set_flag.assert_called_once_with(
"local.prometheus.send-connection-info"
)
def test_stop_prometheus_exporter_service(self):
handlers.stop_prometheus_exporter_service()
self.service_stop.assert_called_once_with(self._svc_name)
self.remove_state.has_calls(
mock.call("snap.prometheus-exporter.configed"),
mock.call("snap.prometheus-exporter.started"),
)
self.status_set.assert_called_once_with(
"active", "Stop prometheus exporter service")