diff --git a/.stestr.conf b/.stestr.conf new file mode 100644 index 0000000..5fcccac --- /dev/null +++ b/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/src/lib/__init__.py b/src/lib/__init__.py new file mode 100644 index 0000000..5705e5d --- /dev/null +++ b/src/lib/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/lib/charm/__init__.py b/src/lib/charm/__init__.py new file mode 100644 index 0000000..5705e5d --- /dev/null +++ b/src/lib/charm/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/lib/charm/mysql_innodb_cluster.py b/src/lib/charm/mysql_innodb_cluster.py index f9ef820..b9dc067 100644 --- a/src/lib/charm/mysql_innodb_cluster.py +++ b/src/lib/charm/mysql_innodb_cluster.py @@ -35,26 +35,64 @@ MYSQLD_CNF = "/etc/mysql/mysql.conf.d/mysqld.cnf" @charms_openstack.adapters.config_property def server_id(cls): + """Determine this unit's server ID. + + :param cls: Class + :type cls: MySQLInnoDBClusterCharm class + :returns: String server ID + :rtype: str + """ unit_num = int(ch_core.hookenv.local_unit().split("/")[1]) return str(unit_num + 1000) @charms_openstack.adapters.config_property def cluster_address(cls): + """Determine this unit's cluster address. + + Using the relation binding determine this unit's cluster address. + + :param cls: Class + :type cls: MySQLInnoDBClusterCharm class + :returns: Address + :rtype: str + """ return ch_net_ip.get_relation_ip("cluster") @charms_openstack.adapters.config_property def shared_db_address(cls): + """Determine this unit's Shared-DB address. + + Using the relation binding determine this unit's address for the Shared-DB + relation. + + :param cls: Class + :type cls: MySQLInnoDBClusterCharm class + :returns: Address + :rtype: str + """ return ch_net_ip.get_relation_ip("shared-db") @charms_openstack.adapters.config_property def db_router_address(cls): + """Determine this unit's DB-Router address. + + Using the relation binding determine this unit's address for the DB-Router + relation. + + :param cls: Class + :type cls: MySQLInnoDBClusterCharm class + :returns: Address + :rtype: str + """ return ch_net_ip.get_relation_ip("db-router") class CannotConnectToMySQL(Exception): + """Exception when attempting to connect to a MySQL server. + """ pass @@ -83,15 +121,165 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): @property def mysqlsh_bin(self): + """Determine binary path for MySQL Shell. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Path to binary mysqlsh + :rtype: str + """ # The current upstream snap uses mysql-shell # When we get the alias use /snap/bin/mysqlsh # return "/snap/bin/mysqlsh" return "/snap/mysql-shell/current/usr/bin/mysqlsh" + @property + def mysql_password(self): + """Determine or set primary MySQL password. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: MySQL password + :rtype: str + """ + return self._get_password("mysql.passwd") + + @property + def cluster_name(self): + """Determine the MySQL InnoDB Cluster name. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Cluster name + :rtype: str + """ + return self.options.cluster_name + + @property + def cluster_password(self): + """Determine or set password for the cluster user. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Cluster password + :rtype: str + """ + return self._get_password("cluster-password") + + @property + def cluster_address(self): + """Determine this unit's cluster address. + + Using the class method determine this unit's cluster address. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Address + :rtype: str + """ + return self.options.cluster_address + + @property + def cluster_user(self): + """Determine the cluster username. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Cluster username + :rtype: str + """ + return "clusteruser" + + @property + def shared_db_address(self): + """Determine this unit's Shared-DB address. + + Using the class method determine this unit's address for the Shared-DB + relation. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Address + :rtype: str + """ + return self.options.shared_db_address + + @property + def db_router_address(self): + """Determine this unit's Shared-DB address. + + Using the class method determine this unit's address for the DB-Router + relation. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Address + :rtype: str + """ + return self.options.db_router_address + + # TODO: Generalize and move to mysql charmhelpers + def _get_password(self, key): + """Retrieve named password. + + This function will ensure that a consistent named password + is used across all units in the InnoDB cluster. + + The lead unit will generate or use the mysql.passwd configuration + option to seed this value into the deployment. + + Once set, it cannot be changed. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param key: Named password or None if unable to retrieve at this point + in time + :type key: str + :returns: Address + :rtype: str + """ + _password = ch_core.hookenv.leader_get(key) + if not _password and ch_core.hookenv.is_leader(): + _password = ch_core.hookenv.config(key) or ch_core.host.pwgen() + ch_core.hookenv.leader_set({key: _password}) + return _password + + # TODO: Generalize and move to mysql charmhelpers + def configure_mysql_password(self): + """ Configure debconf with mysql password. + + Prior to installation set the root-password for the MySQL server + package(s). + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :side effect: Executes debconf + :returns: This function is called for its side effect + :rtype: None + """ + dconf = subprocess.Popen( + ['debconf-set-selections'], stdin=subprocess.PIPE) + # Set password options to cover packages + packages = ["mysql-server", "mysql-server-8.0"] + for package in packages: + dconf.stdin.write("{} {}/root_password password {}\n" + .format(package, package, self.mysql_password) + .encode("utf-8")) + dconf.stdin.write("{} {}/root_password_again password {}\n" + .format(package, package, self.mysql_password) + .encode("utf-8")) + dconf.communicate() + dconf.wait() + def install(self): """Custom install function. - """ + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :side effect: Executes other functions + :returns: This function is called for its side effect + :rtype: None + """ # Set mysql password in packaging before installation self.configure_mysql_password() @@ -103,7 +291,18 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): # Render mysqld.cnf and cause a restart self.render_all_configs() + # TODO: Generalize and move to mysql charmhelpers def get_db_helper(self): + """Get an instance of the MySQLDB8Helper class. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Instance of MySQLDB8Helper class + :rtype: MySQLDB8Helper instance + """ + # NOTE: The template paths are an artifact of the original Helper code. + # Passwords are injected into leader settings. No passwords are written + # to disk by this class. return mysql.MySQL8Helper( rpasswdf_template="/var/lib/charm/{}/mysql.passwd" .format(ch_core.hookenv.service_name()), @@ -112,14 +311,33 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): def create_cluster_user( self, cluster_address, cluster_user, cluster_password): + """Create cluster user and grant permissions in the MySQL DB. + This user will be used by the leader for instance configuration and + initial cluster creation. + + The grants are specfic to cluster creation and management as documented + upstream. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param cluster_address: Cluster user's address + :type cluster_address: str + :param cluster_user: Cluster user's username + :type cluster_user: str + :param cluster_password: Cluster user's password + :type cluster_password: str + :side effect: Executes SQL to create DB user + :returns: This function is called for its side effect + :rtype: None + """ SQL_CLUSTER_USER_CREATE = ( "CREATE USER '{user}'@'{host}' " "IDENTIFIED BY '{password}'") SQL_CLUSTER_USER_GRANT = ( "GRANT {permissions} ON *.* " - "TO 'clusteruser'@'{host}'") + "TO '{user}'@'{host}'") addresses = [cluster_address] if cluster_address in self.cluster_address: @@ -151,95 +369,18 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): m_helper.execute("flush privileges") - def configure_db_for_hosts(self, hosts, database, username): - """Hosts may be a json-encoded list of hosts or a single hostname.""" - if not all([hosts, database, username]): - ch_core.hookenv.log("Remote data incomplete.", "WARNING") - return - try: - hosts = json.loads(hosts) - ch_core.hookenv.log("Multiple hostnames provided by relation: {}" - .format(', '.join(hosts)), "DEBUG") - except ValueError: - ch_core.hookenv.log( - "Single hostname provided by relation: {}".format(hosts), - level="DEBUG") - hosts = [hosts] - - db_helper = self.get_db_helper() - - for host in hosts: - password = db_helper.configure_db(host, database, username) - - return password - - def configure_db_router(self, hosts, username): - """Hosts may be a json-encoded list of hosts or a single hostname.""" - if not all([hosts, username]): - ch_core.hookenv.log("Remote data incomplete.", "WARNING") - return - try: - hosts = json.loads(hosts) - ch_core.hookenv.log("Multiple hostnames provided by relation: {}" - .format(', '.join(hosts)), "DEBUG") - except ValueError: - ch_core.hookenv.log( - "Single hostname provided by relation: {}".format(hosts), - level="DEBUG") - hosts = [hosts] - - db_helper = self.get_db_helper() - - for host in hosts: - password = db_helper.configure_router(host, username) - - return password - - def _get_password(self, key): - """Retrieve named password - - This function will ensure that a consistent named password - is used across all units in the InnoDB cluster; the lead unit - will generate or use the mysql.passwd configuration option - to seed this value into the deployment. - - Once set, it cannot be changed. - - @requires: str: named password or None if unable to retrieve - at this point in time - """ - _password = ch_core.hookenv.leader_get(key) - if not _password and ch_core.hookenv.is_leader(): - _password = ch_core.hookenv.config(key) or ch_core.host.pwgen() - ch_core.hookenv.leader_set({key: _password}) - return _password - - @property - def mysql_password(self): - return self._get_password("mysql.passwd") - - @property - def cluster_password(self): - return self._get_password("cluster-password") - - @property - def cluster_address(self): - return self.options.cluster_address - - @property - def cluster_user(self): - return "clusteruser" - - @property - def shared_db_address(self): - return self.options.shared_db_address - - @property - def db_router_address(self): - return self.options.db_router_address - def configure_instance(self, address): + """Configure MySQL instance for clustering. + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param address: Address of the MySQL instance to be configured + :type address: str + :side effect: Executes MySQL Shell script to configure the instance for + clustering + :returns: This function is called for its side effect + :rtype: None + """ if reactive.is_flag_set( "leadership.set.cluster-instance-configured-{}" .format(address)): @@ -282,12 +423,18 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): leadership.leader_set({"cluster-instance-configured-{}" .format(address): True}) - @property - def cluster_name(self): - return self.options.cluster_name - def create_cluster(self): + """Create the MySQL InnoDB cluster. + Creates the MySQL InnoDB cluster using self.cluster_name. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :side effect: Executes MySQL Shell script to create the MySQL InnoDB + Cluster + :returns: This function is called for its side effect + :rtype: None + """ if reactive.is_flag_set("leadership.set.cluster-created"): ch_core.hookenv.log("Cluster: {}, already created" .format(self.options.cluster_name), "WARNING") @@ -305,7 +452,6 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): shell.connect("{}:{}@{}") var cluster = dba.createCluster("{}"); """ - ch_core.hookenv.log("Creating cluster: {}." .format(self.options.cluster_name), "INFO") with tempfile.NamedTemporaryFile(mode="w", suffix=".js") as _script: @@ -333,7 +479,17 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): leadership.leader_set({"cluster-created": str(uuid.uuid4())}) def add_instance_to_cluster(self, address): + """Add MySQL instance to the cluster. + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param address: Address of the MySQL instance to be configured + :type address: str + :side effect: Executes MySQL Shell script to add the MySQL instance to + the cluster + :returns: This function is called for its side effect + :rtype: None + """ if reactive.is_flag_set( "leadership.set.cluster-instance-clustered-{}" .format(address)): @@ -374,107 +530,24 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): leadership.leader_set({"cluster-instance-clustered-{}" .format(address): True}) - def states_to_check(self, required_relations=None): - """Custom state check function for charm specific state check needs. - - """ - states_to_check = super().states_to_check(required_relations) - states_to_check["charm"] = [ - ("charm.installed", - "waiting", - "MySQL not installed"), - ("leadership.set.cluster-instance-configured-{}" - .format(self.cluster_address), - "waiting", - "Instance not yet configured for clustering"), - ("leadership.set.cluster-created", - "waiting", - "Cluster {} not yet created by leader" - .format(self.cluster_name)), - ("leadership.set.cluster-instances-configured", - "waiting", - "Not all instances configured for clustering"), - ("leadership.set.cluster-instance-clustered-{}" - .format(self.cluster_address), - "waiting", - "Instance not yet in the cluster"), - ("leadership.set.cluster-instances-clustered", - "waiting", - "Not all instances clustered")] - - return states_to_check - - def check_mysql_connection( - self, username=None, password=None, address=None): - """Check if local instance of mysql is accessible. - - Attempt a connection to the local instance of mysql to determine if it - is running and accessible. - - :param password: Password to use for connection test. - :type password: str - :side effect: Uses get_db_helper to execute a connection to the DB. - :returns: boolean - """ - address = address or "localhost" - password = password or self.mysql_password - username = username or "root" - - m_helper = self.get_db_helper() - try: - m_helper.connect(user=username, password=password, host=address) - return True - except mysql.MySQLdb._exceptions.OperationalError: - ch_core.hookenv.log("Could not connect to db", "DEBUG") - return False - - @tenacity.retry(wait=tenacity.wait_fixed(10), - reraise=True, - stop=tenacity.stop_after_delay(5)) - def _wait_until_connectable( - self, username=None, password=None, address=None): - - if not self.check_mysql_connection( - username=username, password=password, address=address): - raise CannotConnectToMySQL("Unable to connect to MySQL") - - def custom_assess_status_check(self): - - # Start with default checks - for f in [self.check_if_paused, - self.check_interfaces, - self.check_mandatory_config]: - state, message = f() - if state is not None: - ch_core.hookenv.status_set(state, message) - return state, message - - # We should not get here until there is a connection to the - # cluster - if not self.check_mysql_connection(): - return "blocked", "MySQL is down" - - return None, None - - # TODO: move to mysql charmhelper - def configure_mysql_password(self): - """ Configure debconf with mysql password """ - dconf = subprocess.Popen( - ['debconf-set-selections'], stdin=subprocess.PIPE) - # Set password options to cover packages - packages = ["mysql-server", "mysql-server-8.0"] - for package in packages: - dconf.stdin.write("{} {}/root_password password {}\n" - .format(package, package, self.mysql_password) - .encode("utf-8")) - dconf.stdin.write("{} {}/root_password_again password {}\n" - .format(package, package, self.mysql_password) - .encode("utf-8")) - dconf.communicate() - dconf.wait() - - # TODO: move to mysql charmhelper + # TODO: Generalize and move to mysql charmhelpers def get_allowed_units(self, database, username, relation_id): + """Get Allowed Units. + + Call MySQL8Helper.get_allowed_units and return space delimited list of + allowed unit names. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param database: Database name + :type database: str + :param username: Username + :type username: str + :param relation_id: Relation ID + :type relation_id: str + :returns: Space delimited list of unit names + :rtype: str + """ db_helper = self.get_db_helper() allowed_units = db_helper.get_allowed_units( database, username, relation_id=relation_id) @@ -483,38 +556,18 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): allowed_units = ' '.join(allowed_units) return allowed_units - # TODO: move to mysql charmhelper - def resolve_hostname_to_ip(self, hostname): - """Resolve hostname to IP - - @param hostname: hostname to be resolved - @returns IP address or None if resolution was not possible via DNS - """ - import dns.resolver - - if self.options.prefer_ipv6: - if ch_net_ip.is_ipv6(hostname): - return hostname - - query_type = 'AAAA' - elif ch_net_ip.is_ip(hostname): - return hostname - else: - query_type = 'A' - - # This may throw an NXDOMAIN exception; in which case - # things are badly broken so just let it kill the hook - answers = dns.resolver.query(hostname, query_type) - if answers: - return answers[0].address - def create_databases_and_users(self, interface): - """Create databases and users + """Create databases and users. - :param interface: Relation data - :type interface: reative.relations.Endpoint object - :side effect: interface.set_db_connection_info is exectuted - :returns: None + Take an Endpoint interface and create databases and users based on the + requests on the relation. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param interface: Interface Object (shared-db or db-router) + :type interface: reactive.relations.Endpoint object + :side effect: interface.set_db_connection_info is executed + :returns: This function is called for its side effect :rtype: None """ for unit in interface.all_joined_units: @@ -556,3 +609,212 @@ class MySQLInnoDBClusterCharm(charms_openstack.charm.OpenStackCharm): db_host, password, allowed_units=allowed_units, prefix=prefix) + + # TODO: Generalize and move to mysql charmhelpers + def configure_db_for_hosts(self, hosts, database, username): + """Configure database for user at host(s). + + Create and configure database and user with full access permissions + from host(s). + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param hosts: Hosts may be a json-encoded list of hosts or a single + hostname. + :type hosts: Union[str, Json list] + :param database: Database name + :type database: str + :param username: Username + :type username: str + :side effect: Calls MySQL8Helper.configure_db + :returns: Password for the DB user + :rtype: str + """ + if not all([hosts, database, username]): + ch_core.hookenv.log("Remote data incomplete.", "WARNING") + return + try: + hosts = json.loads(hosts) + ch_core.hookenv.log("Multiple hostnames provided by relation: {}" + .format(', '.join(hosts)), "DEBUG") + except ValueError: + ch_core.hookenv.log( + "Single hostname provided by relation: {}".format(hosts), + level="DEBUG") + hosts = [hosts] + + db_helper = self.get_db_helper() + + for host in hosts: + password = db_helper.configure_db(host, database, username) + + return password + + def configure_db_router(self, hosts, username): + """Configure database for MySQL Router user at host(s). + + Create and configure MySQL Router user with mysql router specific + permissions from host(s). + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param hosts: Hosts may be a json-encoded list of hosts or a single + hostname. + :type hosts: Union[str, Json list] + :param username: Username + :type username: str + :side effect: Calls MySQL8Helper.configure_router + :returns: Password for the DB user + :rtype: str + """ + if not all([hosts, username]): + ch_core.hookenv.log("Remote data incomplete.", "WARNING") + return + try: + hosts = json.loads(hosts) + ch_core.hookenv.log("Multiple hostnames provided by relation: {}" + .format(', '.join(hosts)), "DEBUG") + except ValueError: + ch_core.hookenv.log( + "Single hostname provided by relation: {}".format(hosts), + level="DEBUG") + hosts = [hosts] + + db_helper = self.get_db_helper() + + for host in hosts: + password = db_helper.configure_router(host, username) + + return password + + def states_to_check(self, required_relations=None): + """Custom states to check function. + + Construct a custom set of connected and available states for each + of the relations passed, along with error messages and new status + conditions. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :param required_relations: List of relations which overrides + self.relations + :type required_relations: list of strings + :returns: {relation: [(state, err_status, err_msg), (...),]} + :rtype: dict + """ + states_to_check = super().states_to_check(required_relations) + states_to_check["charm"] = [ + ("charm.installed", + "waiting", + "MySQL not installed"), + ("leadership.set.cluster-instance-configured-{}" + .format(self.cluster_address), + "waiting", + "Instance not yet configured for clustering"), + ("leadership.set.cluster-created", + "waiting", + "Cluster {} not yet created by leader" + .format(self.cluster_name)), + ("leadership.set.cluster-instances-configured", + "waiting", + "Not all instances configured for clustering"), + ("leadership.set.cluster-instance-clustered-{}" + .format(self.cluster_address), + "waiting", + "Instance not yet in the cluster"), + ("leadership.set.cluster-instances-clustered", + "waiting", + "Not all instances clustered")] + + return states_to_check + + def custom_assess_status_check(self): + """Custom assess status check. + + Custom assess status check that validates connectivity to this unit's + MySQL instance. + + Returns tuple of (sate, message), if there is a problem to report to + status output, or (None, None) if all is well. + + :param self: Self + :type self: MySQLInnoDBClusterCharm instance + :returns: Either (state, message) or (None, None) + :rtype: Union[tuple(str, str), tuple(None, None)] + """ + # Start with default checks + for f in [self.check_if_paused, + self.check_interfaces, + self.check_mandatory_config]: + state, message = f() + if state is not None: + ch_core.hookenv.status_set(state, message) + return state, message + + # We should not get here until there is a connection to the + # cluster available. + if not self.check_mysql_connection(): + return "blocked", "MySQL is down" + + return None, None + + def check_mysql_connection( + self, username=None, password=None, address=None): + """Check if an instance of MySQL is accessible. + + Attempt a connection to the given instance of mysql to determine if it + is running and accessible. + + :param username: Username + :type username: str + :param password: Password to use for connection test. + :type password: str + :param address: Address of the MySQL instance to connect to + :type address: str + :side effect: Uses get_db_helper to execute a connection to the DB. + :returns: True if connection succeeds or False if not + :rtype: boolean + """ + address = address or "localhost" + password = password or self.mysql_password + username = username or "root" + + m_helper = self.get_db_helper() + try: + m_helper.connect(user=username, password=password, host=address) + return True + except mysql.MySQLdb._exceptions.OperationalError: + ch_core.hookenv.log("Could not connect to {}@{}" + .format(username, address), "DEBUG") + return False + + @tenacity.retry(wait=tenacity.wait_fixed(10), + reraise=True, + stop=tenacity.stop_after_delay(5)) + def _wait_until_connectable( + self, username=None, password=None, address=None): + """Wait until MySQL instance is accessible. + + Attempt a connection to the given instance of mysql, retry on failure + using tenacity until successful or number of retries reached. + + This is useful for waiting when the MySQL instance may be restarting. + + Warning: Use sparingly. This function asserts connectivity and raises + CannotConnectToMySQL if it is unsuccessful on all retries. + + :param username: Username + :type username: str + :param password: Password to use for connection test. + :type password: str + :param address: Address of the MySQL instance to connect to + :type address: str + :side effect: Calls self.check_mysql_connection + :raises CannotConnectToMySQL: Raises CannotConnectToMySQL if number of + retires exceeded. + :returns: This function is called for its side effect + :rtype: None + """ + if not self.check_mysql_connection( + username=username, password=password, address=address): + raise CannotConnectToMySQL("Unable to connect to MySQL") diff --git a/src/reactive/__init__.py b/src/reactive/__init__.py new file mode 100644 index 0000000..5705e5d --- /dev/null +++ b/src/reactive/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/reactive/mysql_innodb_cluster_handlers.py b/src/reactive/mysql_innodb_cluster_handlers.py index e3153fd..096f247 100644 --- a/src/reactive/mysql_innodb_cluster_handlers.py +++ b/src/reactive/mysql_innodb_cluster_handlers.py @@ -18,17 +18,14 @@ charm.use_defaults( 'certificates.available') -@reactive.when_not('cluster-instances-clustered') -def debug(): - print("DEBUG") - for flag in reactive.flags.get_flags(): - print(flag) - - @reactive.when('leadership.is_leader') @reactive.when('snap.installed.mysql-shell') @reactive.when_not('charm.installed') def leader_install(): + """Leader install. + + Set passwords and install MySQL packages. + """ with charm.provide_charm_instance() as instance: instance.install() reactive.set_flag("charm.installed") @@ -39,16 +36,21 @@ def leader_install(): @reactive.when_not('leadership.is_leader') @reactive.when_not('charm.installed') def non_leader_install(): + """Non-leader install. + + Wait until the leader node has set passwords before installing the MySQL + packages. + """ # Wait for leader to set mysql.passwd - with charm.provide_charm_instance() as instance: - instance.install() - reactive.set_flag("charm.installed") - instance.assess_status() + with charm.provide_charm_instance() as instance: instance.install() + reactive.set_flag("charm.installed") instance.assess_status() @reactive.when('charm.installed') @reactive.when_not('local.cluster.user-created') def create_local_cluster_user(): + """Create local cluster user in the DB. + """ ch_core.hookenv.log("Creating local cluster user.", "DEBUG") with charm.provide_charm_instance() as instance: instance.create_cluster_user( @@ -63,6 +65,14 @@ def create_local_cluster_user(): @reactive.when('cluster.connected') @reactive.when_not('cluster.available') def send_cluster_connection_info(cluster): + """Send cluster connection information. + + Send cluster user, password and address information over the cluster + relation on how to connect to this unit. + + :param cluster: Cluster interface + :type cluster: MySQLInnoDBClusterPeers object + """ ch_core.hookenv.log("Send cluster connection information.", "DEBUG") with charm.provide_charm_instance() as instance: cluster.set_cluster_connection_info( @@ -75,6 +85,14 @@ def send_cluster_connection_info(cluster): @reactive.when_not('local.cluster.all-users-created') @reactive.when('cluster.available') def create_remote_cluster_user(cluster): + """Create remote cluster user. + + Create the remote cluster peer user and grant cluster permissions in the + MySQL DB. + + :param cluster: Cluster interface + :type cluster: MySQLInnoDBClusterPeers object + """ ch_core.hookenv.log("Creating remote users.", "DEBUG") with charm.provide_charm_instance() as instance: for unit in cluster.all_joined_units: @@ -93,6 +111,10 @@ def create_remote_cluster_user(cluster): @reactive.when('local.cluster.user-created') @reactive.when_not('leadership.set.cluster-created') def initialize_cluster(): + """Initialize the cluster. + + Create the InnoDB cluster. + """ ch_core.hookenv.log("Initializing InnoDB cluster.", "DEBUG") with charm.provide_charm_instance() as instance: instance.configure_instance(instance.cluster_address) @@ -106,6 +128,13 @@ def initialize_cluster(): @reactive.when('cluster.available') @reactive.when_not('leadership.set.cluster-instances-configured') def configure_instances_for_clustering(cluster): + """Configure cluster peers for clustering. + + Prepare peers to be added to the cluster. + + :param cluster: Cluster interface + :type cluster: MySQLInnoDBClusterPeers object + """ ch_core.hookenv.log("Configuring instances for clustering.", "DEBUG") with charm.provide_charm_instance() as instance: for unit in cluster.all_joined_units: @@ -132,6 +161,11 @@ def configure_instances_for_clustering(cluster): @reactive.when('cluster.available') @reactive.when_not('leadership.set.cluster-instances-clustered') def add_instances_to_cluster(cluster): + """Add cluster peers to the cluster. + + :param cluster: Cluster interface + :type cluster: MySQLInnoDBClusterPeers object + """ ch_core.hookenv.log("Adding instances to cluster.", "DEBUG") with charm.provide_charm_instance() as instance: for unit in cluster.all_joined_units: @@ -154,6 +188,13 @@ def add_instances_to_cluster(cluster): @reactive.when('leadership.set.cluster-created') @reactive.when('cluster.available') def signal_clustered(cluster): + """Signal unit clustered to peers. + + Set this unit clustered on the cluster peer relation. + + :param cluster: Cluster interface + :type cluster: MySQLInnoDBClusterPeers object + """ # Optimize clustering by causing a cluster relation changed with charm.provide_charm_instance() as instance: if reactive.is_flag_set( @@ -167,6 +208,11 @@ def signal_clustered(cluster): @reactive.when('leadership.set.cluster-instances-clustered') @reactive.when('shared-db.available') def shared_db_respond(shared_db): + """Respond to Shared DB Requests. + + :param shared_db: Shared-DB interface + :type shared-db: MySQLSharedProvides object + """ with charm.provide_charm_instance() as instance: instance.create_databases_and_users(shared_db) instance.assess_status() @@ -176,6 +222,11 @@ def shared_db_respond(shared_db): @reactive.when('leadership.set.cluster-instances-clustered') @reactive.when('db-router.available') def db_router_respond(db_router): + """Respond to DB Router Requests. + + :param db_router: DB-Router interface + :type db_router_interface: MySQLRouterRequires object + """ with charm.provide_charm_instance() as instance: instance.create_databases_and_users(db_router) instance.assess_status() diff --git a/test-requirements.txt b/test-requirements.txt index 14b380e..125af12 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -10,4 +10,5 @@ charms.reactive mock>=1.2 nose>=1.3.7 coverage>=3.6 +tenacity git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack diff --git a/tox.ini b/tox.ini index fdd5617..b0a8454 100644 --- a/tox.ini +++ b/tox.ini @@ -38,6 +38,11 @@ basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt commands = stestr run {posargs} +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/test-requirements.txt +commands = stestr run {posargs} + [testenv:pep8] basepython = python3 deps = -r{toxinidir}/test-requirements.txt diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index 582f542..f23112a 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -12,16 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +import mock import os import sys -# Mock out charmhelpers so that we can test without it. -import charms_openstack.test_mocks # noqa -charms_openstack.test_mocks.mock_charmhelpers() - _path = os.path.dirname(os.path.realpath(__file__)) -_src = os.path.abspath(os.path.join(_path, 'src')) -_lib = os.path.abspath(os.path.join(_path, 'src/lib')) +_src = os.path.abspath(os.path.join(_path, "../src")) +_lib = os.path.abspath(os.path.join(_path, "../src/lib")) +_reactive = os.path.abspath(os.path.join(_path, "../src/reactive")) def _add_path(path): @@ -30,3 +28,25 @@ def _add_path(path): _add_path(_src) _add_path(_lib) +_add_path(_reactive) + +# Mock out charmhelpers so that we can test without it. +import charms_openstack.test_mocks # noqa +charms_openstack.test_mocks.mock_charmhelpers() + +# Mock out charm dependencies +import charms +charms.leadership = mock.MagicMock() +sys.modules['charms.leadership'] = charms.leadership + +charmhelpers = mock.MagicMock() +charmhelpers.contrib.database = mock.MagicMock() +charmhelpers.contrib.database.mysql = mock.MagicMock() +sys.modules['charmhelpers.contrib.database'] = charmhelpers.contrib.database +sys.modules['charmhelpers.contrib.database.mysql'] = ( + charmhelpers.contrib.database.mysql) + +# Tenacity decorators need to be mocked before import +tenacity = mock.MagicMock() +tenacity.retry.side_effect = lambda *args, **kwargs: lambda x: x +sys.modules['tenacity'] = tenacity diff --git a/unit_tests/test_lib_charm_openstack_mysql_innodb_cluster.py b/unit_tests/test_lib_charm_openstack_mysql_innodb_cluster.py new file mode 100644 index 0000000..f29d8ad --- /dev/null +++ b/unit_tests/test_lib_charm_openstack_mysql_innodb_cluster.py @@ -0,0 +1,864 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import collections +import mock + +import charms_openstack.test_utils as test_utils + +import charm.mysql_innodb_cluster as mysql_innodb_cluster + + +class TestMySQLInnoDBClusterProperties(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.cls = mock.MagicMock() + self.patch_object(mysql_innodb_cluster.ch_core.hookenv, "local_unit") + self.patch_object(mysql_innodb_cluster.ch_net_ip, "get_relation_ip") + + def test_server_id(self): + self.local_unit.return_value = "unit/5" + self.assertEqual(mysql_innodb_cluster.server_id(self.cls), "1005") + + def test_cluster_address(self): + _addr = "10.10.10.10" + self.get_relation_ip.return_value = _addr + self.assertEqual(mysql_innodb_cluster.cluster_address(self.cls), _addr) + self.get_relation_ip.assert_called_once_with("cluster") + + def test_shared_db_address(self): + _addr = "10.10.10.20" + self.get_relation_ip.return_value = _addr + self.assertEqual( + mysql_innodb_cluster.shared_db_address(self.cls), _addr) + self.get_relation_ip.assert_called_once_with("shared-db") + + def test_db_router_address(self): + _addr = "10.10.10.30" + self.get_relation_ip.return_value = _addr + self.assertEqual( + mysql_innodb_cluster.db_router_address(self.cls), _addr) + self.get_relation_ip.assert_called_once_with("db-router") + + +class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.patch_object(mysql_innodb_cluster, "subprocess") + self.patch_object(mysql_innodb_cluster.uuid, "uuid4") + self.uuid_of_cluster = "uuid-of-cluster" + self.uuid4.return_value = self.uuid_of_cluster + self.patch_object(mysql_innodb_cluster.reactive, "is_flag_set") + self.patch_object(mysql_innodb_cluster.reactive, "set_flag") + self.patch_object(mysql_innodb_cluster.ch_net_ip, "get_relation_ip") + self.patch_object(mysql_innodb_cluster.ch_core.host, "pwgen") + self.patch_object(mysql_innodb_cluster.ch_core.hookenv, "is_leader") + self.patch_object(mysql_innodb_cluster.leadership, "leader_set") + self.patch_object(mysql_innodb_cluster.ch_core.hookenv, "leader_get") + self.patch_object(mysql_innodb_cluster.ch_core.hookenv, "config") + self.leader_get.side_effect = self._fake_leader_data + self.config.side_effect = self._fake_config_data + self.leader_data = {} + self.config_data = {} + self.data = {} + self.stdin = mock.MagicMock() + self.filename = "script.js" + self.file = mock.MagicMock() + self.file.name = self.filename + self.ntf = mock.MagicMock() + self.ntf.__enter__.return_value = self.file + self.ntf.__enter__.name.return_value = self.filename + self.wait_until = mock.MagicMock() + self.patch_object(mysql_innodb_cluster.tempfile, "NamedTemporaryFile") + self.NamedTemporaryFile.return_value = self.ntf + self.subprocess.STDOUT = self.stdin + + # Complex setup for create_databases_and_users tests + # mimics a reactive env + self.mock_unprefixed = "UNPREFIXED" + self.keystone_shared_db = mock.MagicMock() + self.keystone_shared_db.relation_id = "shared-db:5" + self.nova_shared_db = mock.MagicMock() + self.nova_shared_db.relation_id = "shared-db:20" + self.kmr_db_router = mock.MagicMock() + self.kmr_db_router.relation_id = "db-router:7" + self.nmr_db_router = mock.MagicMock() + self.nmr_db_router.relation_id = "db-router:10" + # Keystone shared-db + self.keystone_unit5_name = "keystone/5" + self.keystone_unit5_ip = "10.10.10.50" + self.keystone_unit5 = mock.MagicMock() + self.keystone_unit5.received = { + "database": "keystone", "username": "keystone", + "hostname": self.keystone_unit5_ip} + self.keystone_unit5.unit_name = self.keystone_unit5_name + self.keystone_unit5.relation = self.keystone_shared_db + self.keystone_unit7_name = "keystone/7" + self.keystone_unit7_ip = "10.10.10.70" + self.keystone_unit7 = mock.MagicMock() + self.keystone_unit7.received = { + "database": "keystone", "username": "keystone", + "hostname": self.keystone_unit7_ip} + self.keystone_unit7.unit_name = self.keystone_unit7_name + self.keystone_unit7.relation = self.keystone_shared_db + self.keystone_shared_db.joined_units = [ + self.keystone_unit5, self.keystone_unit7] + # Nova shared-db + self.nova_unit5_name = "nova/5" + self.nova_unit5_ip = "10.20.20.50" + self.nova_unit5 = mock.MagicMock() + self.nova_unit5.unit_name = self.nova_unit5_name + self.nova_unit5.relation = self.nova_shared_db + self.nova_unit5.received = { + "nova_database": "nova", "nova_username": "nova", + "nova_hostname": self.nova_unit5_ip, + "novaapi_database": "nova_api", "novaapi_username": "nova", + "novaapi_hostname": self.nova_unit5_ip, + "novacell0_database": "nova_cell0", "novacell0_username": "nova", + "novacell0_hostname": self.nova_unit5_ip} + self.nova_unit7_name = "nova/7" + self.nova_unit7_ip = "10.20.20.70" + self.nova_unit7 = mock.MagicMock() + self.nova_unit7.unit_name = self.nova_unit7_name + self.nova_unit7.received = { + "nova_database": "nova", "nova_username": "nova", + "nova_hostname": self.nova_unit7_ip, + "novaapi_database": "nova_api", "novaapi_username": "nova", + "novaapi_hostname": self.nova_unit7_ip, + "novacell0_database": "nova_cell0", "novacell0_username": "nova", + "novacell0_hostname": self.nova_unit7_ip} + self.nova_unit7.relation = self.nova_shared_db + self.nova_shared_db.joined_units = [self.nova_unit5, self.nova_unit7] + # Keystone db-router + self.kmr_unit5_name = "kmr/5" + self.kmr_unit5_ip = "10.30.30.50" + self.kmr_unit5 = mock.MagicMock() + self.kmr_unit5.unit_name = self.kmr_unit5_name + self.kmr_unit5.relation = self.kmr_db_router + self.kmr_unit5.received = { + "{}_database".format(self.mock_unprefixed): "keystone", + "{}_username".format(self.mock_unprefixed): "keystone", + "{}_hostname".format(self.mock_unprefixed): self.kmr_unit5_ip, + "mysqlrouter_username": "mysqlrouteruser", + "mysqlrouter_hostname": self.kmr_unit5_ip} + self.kmr_unit7_name = "kmr/7" + self.kmr_unit7_ip = "10.30.30.70" + self.kmr_unit7 = mock.MagicMock() + self.kmr_unit7.unit_name = self.kmr_unit7_name + self.kmr_unit7.relation = self.kmr_db_router + self.kmr_db_router.joined_units = [self.kmr_unit5, self.kmr_unit7] + self.kmr_unit7.received = { + "{}_database".format(self.mock_unprefixed): "keystone", + "{}_username".format(self.mock_unprefixed): "keystone", + "{}_hostname".format(self.mock_unprefixed): self.kmr_unit7_ip, + "mysqlrouter_username": "mysqlrouteruser", + "mysqlrouter_hostname": self.kmr_unit7_ip} + # Nova Router db-router + self.nmr_unit5_name = "nmr/5" + self.nmr_unit5_ip = "10.40.40.50" + self.nmr_unit5 = mock.MagicMock() + self.nmr_unit5.unit_name = self.nmr_unit5_name + self.nmr_unit5.relation = self.nmr_db_router + self.nmr_unit5.received = { + "nova_database": "nova", "nova_username": "nova", + "nova_hostname": self.nmr_unit5_ip, + "novaapi_database": "nova_api", "novaapi_username": "nova", + "novaapi_hostname": self.nmr_unit5_ip, + "novacell0_database": "nova_cell0", + "novacell0_username": "nova", + "novacell0_hostname": self.nmr_unit5_ip, + "mysqlrouter_username": "mysqlrouteruser", + "mysqlrouter_hostname": self.nmr_unit5_ip} + self.nmr_unit7_name = "nmr/7" + self.nmr_unit7_ip = "10.40.40.70" + self.nmr_unit7 = mock.MagicMock() + self.nmr_unit7.unit_name = self.nmr_unit7_name + self.nmr_unit7.relation = self.nmr_db_router + self.nmr_db_router.joined_units = [self.nmr_unit5, self.nmr_unit7] + self.nmr_unit7.received = { + "nova_database": "nova", "nova_username": "nova", + "nova_hostname": self.nmr_unit7_ip, + "novaapi_database": "nova_api", "novaapi_username": "nova", + "novaapi_hostname": self.nmr_unit7_ip, + "novacell0_database": "nova_cell0", + "novacell0_username": "nova", + "novacell0_hostname": self.nmr_unit7_ip, + "mysqlrouter_username": "mysqlrouteruser", + "mysqlrouter_hostname": self.nmr_unit7_ip} + + # Generic interface + self.interface = mock.MagicMock() + + def _fake_leader_data(self, key): + return self.leader_data.get(key) + + def _fake_config_data(self, key=None): + if key is None: + return {} + return self.config_data.get(key) + + def _fake_data(self, key): + return self.data.get(key) + + def _fake_configure(self, *args, **kwargs): + # For use mocking configure_db_router and configure_db_for_hosts + # Return the same password for the same username + if len(args) == 3: + # configure_db_for_hosts + return "{}-pwd".format(args[2]) + elif len(args) == 2: + # configure_db_router + return "{}-pwd".format(args[1]) + + def _fake_get_allowed_units(self, *args, **kwargs): + return " ".join( + [x.unit_name for x in + self.interface.relations[args[2]].joined_units]) + + def _fake_get_db_data(self, relation_data, unprefixed=None): + # This "fake" get_db_data looks a lot like the real thing. + # Charmhelpers is mocked out entirely and attempting to + # mock the output made the test setup more difficult. + settings = copy.deepcopy(relation_data) + databases = collections.OrderedDict() + + singleset = {"database", "username", "hostname"} + if singleset.issubset(settings): + settings["{}_{}".format(unprefixed, "hostname")] = ( + settings["hostname"]) + settings.pop("hostname") + settings["{}_{}".format(unprefixed, "database")] = ( + settings["database"]) + settings.pop("database") + settings["{}_{}".format(unprefixed, "username")] = ( + settings["username"]) + settings.pop("username") + + for k, v in settings.items(): + db = k.split("_")[0] + x = "_".join(k.split("_")[1:]) + if db not in databases: + databases[db] = collections.OrderedDict() + databases[db][x] = v + + return databases + + def test_mysqlsh_bin(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + self.assertEqual( + midbc.mysqlsh_bin, + "/snap/mysql-shell/current/usr/bin/mysqlsh") + + def test_mysql_password(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc._get_password = mock.MagicMock() + midbc._get_password.side_effect = self._fake_data + _pass = "pass123" + self.data = {"mysql.passwd": _pass} + self.assertEqual( + midbc.mysql_password, + _pass) + + def test_cluster_name(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + _name = "jujuCluster" + midbc.options.cluster_name = _name + self.assertEqual( + midbc.cluster_name, + _name) + + def test_cluster_password(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc._get_password = mock.MagicMock() + midbc._get_password.side_effect = self._fake_data + _pass = "pass321" + self.data = {"cluster-password": _pass} + self.assertEqual( + midbc.cluster_password, + _pass) + + def test_cluster_address(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + _addr = "10.10.10.50" + self.get_relation_ip.return_value = _addr + self.assertEqual( + midbc.cluster_address, + _addr) + + def test_cluster_user(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + self.assertEqual( + midbc.cluster_user, + "clusteruser") + + def test_shared_db_address(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + _addr = "10.10.10.60" + self.get_relation_ip.return_value = _addr + self.assertEqual( + midbc.shared_db_address, + _addr) + + def test_db_router_address(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + _addr = "10.10.10.70" + self.get_relation_ip.return_value = _addr + self.assertEqual( + midbc.db_router_address, + _addr) + + def test__get_password(self): + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + # Pwgen + _pwgenpass = "pwgenpass" + self.pwgen.return_value = _pwgenpass + self.assertEqual( + midbc._get_password("pwgenpw"), + _pwgenpass) + # Config + _configpass = "configpass" + self.config_data = {"configpw": _configpass} + self.assertEqual( + midbc._get_password("configpw"), + _configpass) + # Leader settings + _leaderpass = "leaderpass" + self.leader_data = {"leaderpw": _leaderpass} + self.assertEqual( + midbc._get_password("leaderpw"), + _leaderpass) + + def test_configure_mysql_password(self): + _pass = "mysql-pass" + self.data = {"mysql.passwd": _pass} + _debconf = mock.MagicMock() + self.subprocess.Popen.return_value = _debconf + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc._get_password = mock.MagicMock() + midbc._get_password.side_effect = self._fake_data + midbc.configure_mysql_password() + _calls = [] + for package in ["mysql-server", "mysql-server-8.0"]: + _calls.append( + mock.call("{} {}/root_password password {}\n" + .format(package, package, _pass).encode("UTF-8"))) + _calls.append( + mock.call("{} {}/root_password_again password {}\n" + .format(package, package, _pass).encode("UTF-8"))) + _debconf.stdin.write.assert_has_calls(_calls, any_order=True) + + def test_install(self): + self.patch_object( + mysql_innodb_cluster.charms_openstack.charm.OpenStackCharm, + "install", "super_install") + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.configure_mysql_password = mock.MagicMock() + midbc.configure_source = mock.MagicMock() + midbc.render_all_configs = mock.MagicMock() + midbc.install() + self.super_install.assert_called_once() + midbc.configure_mysql_password.assert_called_once() + midbc.configure_source.assert_called_once() + midbc.render_all_configs.assert_called_once() + + def test_get_db_helper(self): + _helper = mock.MagicMock() + self.patch_object( + mysql_innodb_cluster.mysql, "MySQL8Helper") + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + self.MySQL8Helper.return_value = _helper + self.assertEqual(_helper, midbc.get_db_helper()) + self.MySQL8Helper.assert_called_once() + + def test_create_cluster_user(self): + _user = "user" + _pass = "pass" + _addr = "10.10.20.20" + _helper = mock.MagicMock() + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.get_db_helper = mock.MagicMock() + midbc.get_db_helper.return_value = _helper + # Non-local + midbc.create_cluster_user(_addr, _user, _pass) + _calls = [ + mock.call("CREATE USER '{}'@'{}' IDENTIFIED BY '{}'" + .format(_user, _addr, _pass)), + mock.call("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}'" + .format(_user, _addr)), + mock.call("GRANT GRANT OPTION ON *.* TO '{}'@'{}'" + .format(_user, _addr)), + mock.call("flush privileges")] + _helper.execute.assert_has_calls(_calls) + + # Local + _localhost = "localhost" + _helper.reset_mock() + self.get_relation_ip.return_value = _addr + midbc.create_cluster_user(_addr, _user, _pass) + _calls = [ + mock.call("CREATE USER '{}'@'{}' IDENTIFIED BY '{}'" + .format(_user, _addr, _pass)), + mock.call("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}'" + .format(_user, _addr)), + mock.call("GRANT GRANT OPTION ON *.* TO '{}'@'{}'" + .format(_user, _addr)), + mock.call('flush privileges'), + mock.call("CREATE USER '{}'@'{}' IDENTIFIED BY '{}'" + .format(_user, _localhost, _pass)), + mock.call("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}'" + .format(_user, _localhost)), + mock.call("GRANT GRANT OPTION ON *.* TO '{}'@'{}'" + .format(_user, _localhost)), + mock.call("flush privileges")] + _helper.execute.assert_has_calls(_calls) + + def test_configure_instance(self): + _pass = "clusterpass" + _addr = "10.10.30.30" + self.data = {"cluster-password": _pass} + self.is_flag_set.return_value = False + + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc._get_password = mock.MagicMock() + midbc._get_password.side_effect = self._fake_data + midbc._wait_until_connectable = mock.MagicMock() + _script_template = """ + dba.configureInstance('{}:{}@{}'); + var myshell = shell.connect('{}:{}@{}'); + myshell.runSql("RESTART;"); + """.format( + midbc.cluster_user, midbc.cluster_password, _addr, + midbc.cluster_user, midbc.cluster_password, _addr) + + midbc.configure_instance(_addr) + self.is_flag_set.assert_called_once_with( + "leadership.set.cluster-instance-configured-{}".format(_addr)) + self.subprocess.check_output.assert_called_once_with( + [midbc.mysqlsh_bin, "--no-wizard", "-f", self.filename], + stderr=self.stdin) + self.file.write.assert_called_once_with(_script_template) + midbc._wait_until_connectable.assert_called_once_with( + address=_addr, username=midbc.cluster_user, + password=midbc.cluster_password) + self.leader_set.assert_called_once_with( + {"cluster-instance-configured-{}".format(_addr): True}) + + def test_create_cluster(self): + _pass = "clusterpass" + _addr = "10.10.40.40" + _name = "jujuCluster" + self.get_relation_ip.return_value = _addr + self.data = {"cluster-password": _pass} + self.is_flag_set.side_effect = [False, True] + + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc._get_password = mock.MagicMock() + midbc._get_password.side_effect = self._fake_data + midbc._wait_until_connectable = mock.MagicMock() + midbc.options.cluster_name = _name + _script_template = """ + shell.connect("{}:{}@{}") + var cluster = dba.createCluster("{}"); + """.format( + midbc.cluster_user, midbc.cluster_password, + midbc.cluster_address, midbc.cluster_name) + + midbc.create_cluster() + _is_flag_set_calls = [ + mock.call("leadership.set.cluster-created"), + mock.call("leadership.set.cluster-instance-configured-{}" + .format(_addr))] + self.is_flag_set.assert_has_calls(_is_flag_set_calls) + self.subprocess.check_output.assert_called_once_with( + [midbc.mysqlsh_bin, "--no-wizard", "-f", self.filename], + stderr=self.stdin) + self.file.write.assert_called_once_with(_script_template) + _leader_set_calls = [ + mock.call({"cluster-instance-clustered-{}".format(_addr): True}), + mock.call({"cluster-created": self.uuid_of_cluster})] + self.leader_set.assert_has_calls(_leader_set_calls) + + def test_add_instance_to_cluster(self): + _pass = "clusterpass" + _local_addr = "10.10.50.50" + _remote_addr = "10.10.60.60" + _name = "theCluster" + self.get_relation_ip.return_value = _local_addr + self.data = {"cluster-password": _pass} + self.is_flag_set.return_value = False + + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc._get_password = mock.MagicMock() + midbc._get_password.side_effect = self._fake_data + midbc._wait_until_connectable = mock.MagicMock() + midbc.options.cluster_name = _name + _script_template = """ + shell.connect("{}:{}@{}") + var cluster = dba.getCluster("{}"); + + print("Adding instances to the cluster."); + cluster.addInstance( + {{user: "{}", host: "{}", password: "{}", port: "3306"}}, + {{recoveryMethod: "clone"}}); + """.format( + midbc.cluster_user, midbc.cluster_password, + midbc.cluster_address, midbc.cluster_name, + midbc.cluster_user, _remote_addr, midbc.cluster_password) + + midbc.add_instance_to_cluster(_remote_addr) + self.is_flag_set.assert_called_once_with( + "leadership.set.cluster-instance-clustered-{}" + .format(_remote_addr)) + self.subprocess.check_output.assert_called_once_with( + [midbc.mysqlsh_bin, "--no-wizard", "-f", self.filename], + stderr=self.stdin) + self.file.write.assert_called_once_with(_script_template) + self.leader_set.assert_called_once_with( + {"cluster-instance-clustered-{}".format(_remote_addr): True}) + + def test_get_allowed_units(self): + _allowed = ["unit/2", "unit/1", "unit/0"] + _expected = "unit/0 unit/1 unit/2" + _helper = mock.MagicMock() + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.get_db_helper = mock.MagicMock() + midbc.get_db_helper.return_value = _helper + _helper.get_allowed_units.return_value = _allowed + self.assertEqual( + _expected, + midbc.get_allowed_units("db", "user", "rel:2")) + + def test_create_databases_and_users_shared_db(self): + # The test setup is a bit convoluted and requires mimicking reactive, + # however, this is the heart of the charm and therefore deserves to + # be thoroughly tested. It is important to have multiple relations and + # multiple units per relation. + self.patch_object( + mysql_innodb_cluster.mysql, "get_db_data") + self.get_db_data.side_effect = self._fake_get_db_data + + _addr = "10.99.99.99" + self.get_relation_ip.return_value = _addr + + self.interface.relations = { + self.keystone_shared_db.relation_id: self.keystone_shared_db, + self.nova_shared_db.relation_id: self.nova_shared_db} + + self.interface.all_joined_units = [] + for rel in self.interface.relations.values(): + self.interface.all_joined_units.extend(rel.joined_units) + + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.get_allowed_units = mock.MagicMock() + midbc.get_allowed_units.side_effect = self._fake_get_allowed_units + + midbc.configure_db_for_hosts = mock.MagicMock() + midbc.configure_db_for_hosts.side_effect = self._fake_configure + midbc.configure_db_router = mock.MagicMock() + + # Execute the function under test + midbc.create_databases_and_users(self.interface) + + # Validate + midbc.configure_db_router.assert_not_called() + + _configure_db_calls = [ + mock.call(self.keystone_unit5_ip, "keystone", "keystone"), + mock.call(self.keystone_unit7_ip, "keystone", "keystone"), + mock.call(self.nova_unit5_ip, "nova", "nova"), + mock.call(self.nova_unit5_ip, "nova_api", "nova"), + mock.call(self.nova_unit5_ip, "nova_cell0", "nova"), + mock.call(self.nova_unit7_ip, "nova", "nova"), + mock.call(self.nova_unit7_ip, "nova_api", "nova"), + mock.call(self.nova_unit7_ip, "nova_cell0", "nova")] + midbc.configure_db_for_hosts.assert_has_calls(_configure_db_calls) + + _set_calls = [ + mock.call( + self.keystone_shared_db.relation_id, _addr, "keystone-pwd", + allowed_units=self._fake_get_allowed_units( + None, None, self.keystone_shared_db.relation_id), + prefix=None), + mock.call( + self.nova_shared_db.relation_id, _addr, "nova-pwd", + allowed_units=self._fake_get_allowed_units( + None, None, self.nova_shared_db.relation_id), + prefix="nova"), + mock.call( + self.nova_shared_db.relation_id, _addr, "nova-pwd", + allowed_units=self._fake_get_allowed_units( + None, None, self.nova_shared_db.relation_id), + prefix="novaapi"), + mock.call( + self.nova_shared_db.relation_id, _addr, "nova-pwd", + allowed_units=self._fake_get_allowed_units( + None, None, self.nova_shared_db.relation_id), + prefix="novacell0")] + self.interface.set_db_connection_info.assert_has_calls(_set_calls) + + def test_create_databases_and_users_db_router(self): + # The test setup is a bit convoluted and requires mimicking reactive, + # however, this is the heart of the charm and therefore deserves to + # be thoroughly tested. It is important to have multiple relations and + # multiple units per relation. + self.patch_object( + mysql_innodb_cluster.mysql, "get_db_data") + self.get_db_data.side_effect = self._fake_get_db_data + + _addr = "10.99.99.99" + self.get_relation_ip.return_value = _addr + + self.interface.relations = { + self.kmr_db_router.relation_id: self.kmr_db_router, + self.nmr_db_router.relation_id: self.nmr_db_router} + + self.interface.all_joined_units = [] + for rel in self.interface.relations.values(): + self.interface.all_joined_units.extend(rel.joined_units) + + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.get_allowed_units = mock.MagicMock() + midbc.get_allowed_units.side_effect = self._fake_get_allowed_units + midbc.configure_db_for_hosts = mock.MagicMock() + midbc.configure_db_for_hosts.side_effect = self._fake_configure + midbc.configure_db_router = mock.MagicMock() + midbc.configure_db_router.side_effect = self._fake_configure + + # Execute the function under test + midbc.create_databases_and_users(self.interface) + + # Validate + _conigure_db_router_calls = [ + mock.call(self.kmr_unit5_ip, "mysqlrouteruser"), + mock.call(self.kmr_unit7_ip, "mysqlrouteruser"), + mock.call(self.nmr_unit5_ip, "mysqlrouteruser"), + mock.call(self.nmr_unit7_ip, "mysqlrouteruser")] + midbc.configure_db_router.assert_has_calls(_conigure_db_router_calls) + + _configure_db_calls = [ + mock.call(self.kmr_unit5_ip, "keystone", "keystone"), + mock.call(self.kmr_unit7_ip, "keystone", "keystone"), + mock.call(self.nmr_unit5_ip, "nova", "nova"), + mock.call(self.nmr_unit5_ip, "nova_api", "nova"), + mock.call(self.nmr_unit5_ip, "nova_cell0", "nova"), + mock.call(self.nmr_unit7_ip, "nova", "nova"), + mock.call(self.nmr_unit7_ip, "nova_api", "nova"), + mock.call(self.nmr_unit7_ip, "nova_cell0", "nova")] + midbc.configure_db_for_hosts.assert_has_calls(_configure_db_calls) + + _set_calls = [ + mock.call( + self.kmr_db_router.relation_id, _addr, "keystone-pwd", + allowed_units=self._fake_get_allowed_units( + None, None, self.kmr_db_router.relation_id), + prefix=self.mock_unprefixed), + mock.call( + self.kmr_db_router.relation_id, _addr, "mysqlrouteruser-pwd", + allowed_units=" ".join( + [x.unit_name for x in self.kmr_db_router.joined_units]), + prefix="mysqlrouter"), + + mock.call( + self.nmr_db_router.relation_id, _addr, "nova-pwd", + allowed_units=self._fake_get_allowed_units( + None, None, self.nmr_db_router.relation_id), + prefix="nova"), + mock.call( + self.nmr_db_router.relation_id, _addr, "nova-pwd", + allowed_units=self._fake_get_allowed_units( + None, None, self.nmr_db_router.relation_id), + prefix="novaapi"), + mock.call( + self.nmr_db_router.relation_id, _addr, "nova-pwd", + allowed_units=self._fake_get_allowed_units( + None, None, self.nmr_db_router.relation_id), + prefix="novacell0"), + mock.call( + self.nmr_db_router.relation_id, _addr, "mysqlrouteruser-pwd", + allowed_units=" ".join( + [x.unit_name for x in self.nmr_db_router.joined_units]), + prefix="mysqlrouter")] + self.interface.set_db_connection_info.assert_has_calls(_set_calls) + + def test_configure_db_for_hosts(self): + _db = "db" + _user = "user" + _addr = "10.10.80.80" + _pass = "newpass" + _json_addrs = '["10.20.10.10", "10.20.10.20", "10.20.10.30"]' + _helper = mock.MagicMock() + _helper.configure_db.return_value = _pass + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.get_db_helper = mock.MagicMock() + midbc.get_db_helper.return_value = _helper + + # One host + self.assertEqual( + _pass, + midbc.configure_db_for_hosts(_addr, _db, _user)) + + _helper.configure_db.assert_called_once_with(_addr, _db, _user) + + # Json multiple hosts + _helper.reset_mock() + _calls = [ + mock.call("10.20.10.10", _db, _user), + mock.call("10.20.10.20", _db, _user), + mock.call("10.20.10.30", _db, _user)] + self.assertEqual( + _pass, + midbc.configure_db_for_hosts(_json_addrs, _db, _user)) + _helper.configure_db.assert_has_calls(_calls) + + def test_configure_db_router(self): + _user = "user" + _addr = "10.10.90.90" + _pass = "newpass" + _json_addrs = '["10.30.10.10", "10.30.10.20", "10.30.10.30"]' + _helper = mock.MagicMock() + _helper.configure_router.return_value = _pass + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.get_db_helper = mock.MagicMock() + midbc.get_db_helper.return_value = _helper + + # One host + self.assertEqual( + _pass, + midbc.configure_db_router(_addr, _user)) + + _helper.configure_router.assert_called_once_with(_addr, _user) + + # Json multiple hosts + _helper.reset_mock() + _calls = [ + mock.call("10.30.10.10", _user), + mock.call("10.30.10.20", _user), + mock.call("10.30.10.30", _user)] + self.assertEqual( + _pass, + midbc.configure_db_router(_json_addrs, _user)) + _helper.configure_router.assert_has_calls(_calls) + + def test_states_to_check(self): + self.patch_object( + mysql_innodb_cluster.charms_openstack.charm.OpenStackCharm, + "states_to_check", "super_states") + self.super_states.return_value = {} + _required_rels = ["cluster"] + _name = "jujuCluster" + _addr = "10.20.20.20" + self.get_relation_ip.return_value = _addr + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.options.cluster_name = _name + _results = midbc.states_to_check(_required_rels) + _states_to_check = [x[0] for x in _results["charm"]] + self.super_states.assert_called_once_with(_required_rels) + self.assertTrue("charm.installed" in _states_to_check) + self.assertTrue( + "leadership.set.cluster-instance-configured-{}".format(_addr) in + _states_to_check) + self.assertTrue("leadership.set.cluster-created" in _states_to_check) + self.assertTrue( + "leadership.set.cluster-instances-configured" in _states_to_check) + self.assertTrue( + "leadership.set.cluster-instance-clustered-{}".format(_addr) in + _states_to_check) + self.assertTrue( + "leadership.set.cluster-instances-clustered" in _states_to_check) + + def test_custom_assess_status_check(self): + _check = mock.MagicMock() + _check.return_value = None, None + _conn_check = mock.MagicMock() + _conn_check.return_value = True + + # All is well + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.check_if_paused = _check + midbc.check_interfaces = _check + midbc.check_mandatory_config = _check + midbc.check_mysql_connection = _conn_check + + self.assertEqual((None, None), midbc.custom_assess_status_check()) + self.assertEqual(3, len(_check.mock_calls)) + _conn_check.assert_called_once_with() + + # First checks fail + _check.return_value = "blocked", "for some reason" + self.assertEqual( + ("blocked", "for some reason"), + midbc.custom_assess_status_check()) + + # MySQL connect fails + _check.return_value = None, None + _conn_check.return_value = False + self.assertEqual( + ("blocked", "MySQL is down"), + midbc.custom_assess_status_check()) + + def test_check_mysql_connection(self): + self.patch_object( + mysql_innodb_cluster.mysql.MySQLdb, "_exceptions") + self._exceptions.OperationalError = Exception + _helper = mock.MagicMock() + _pass = "pass" + _root_pass = "differentpass" + _user = "user" + _addr = "10.20.30.30" + self.data = {"mysql.passwd": _root_pass} + + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.get_db_helper = mock.MagicMock() + midbc.get_db_helper.return_value = _helper + midbc._get_password = mock.MagicMock() + midbc._get_password.side_effect = self._fake_data + + self.assertTrue( + midbc.check_mysql_connection( + username=_user, password=_pass, address=_addr)) + _helper.connect.assert_called_once_with( + user=_user, password=_pass, host=_addr) + + _helper.reset_mock() + _helper.connect.side_effect = self._exceptions.OperationalError + self.assertFalse(midbc.check_mysql_connection()) + _helper.connect.assert_called_once_with( + user="root", password=_root_pass, host="localhost") + + def test__wait_unit_connectable(self): + _pass = "pass" + _user = "user" + _addr = "10.20.40.40" + _conn_check = mock.MagicMock() + + midbc = mysql_innodb_cluster.MySQLInnoDBClusterCharm() + midbc.check_mysql_connection = _conn_check + + # Successful connect + _conn_check.return_value = True + midbc._wait_until_connectable( + username=_user, password=_pass, address=_addr) + _conn_check.assert_called_once_with( + username=_user, password=_pass, address=_addr) + + # Failed to connect + _conn_check.reset_mock() + _conn_check.return_value = False + with self.assertRaises(mysql_innodb_cluster.CannotConnectToMySQL): + midbc._wait_until_connectable() + _conn_check.assert_called_once_with( + username=None, password=None, address=None) diff --git a/unit_tests/test_mysql_innodb_cluster_handlers.py b/unit_tests/test_mysql_innodb_cluster_handlers.py new file mode 100644 index 0000000..cdbd7ae --- /dev/null +++ b/unit_tests/test_mysql_innodb_cluster_handlers.py @@ -0,0 +1,225 @@ +# Copyright 2018 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import mock + +import charm.mysql_innodb_cluster as mysql_innodb_cluster +import reactive.mysql_innodb_cluster_handlers as handlers + +import charms_openstack.test_utils as test_utils + + +class TestRegisteredHooks(test_utils.TestRegisteredHooks): + + def test_hooks(self): + defaults = [ + "config.changed", + "update-status", + "upgrade-charm", + "charm.installed"] + hook_set = { + "when": { + "leader_install": ( + "leadership.is_leader", "snap.installed.mysql-shell",), + "non_leader_install": ("leadership.set.mysql.passwd",), + "create_local_cluster_user": ("charm.installed",), + "send_cluster_connection_info": ( + "local.cluster.user-created", "cluster.connected",), + "create_remote_cluster_user": ("cluster.available",), + "initialize_cluster": ( + "leadership.is_leader", "local.cluster.user-created",), + "configure_instances_for_clustering": ( + "leadership.is_leader", "local.cluster.all-users-created", + "leadership.set.cluster-created", "cluster.available"), + "add_instances_to_cluster": ( + "leadership.is_leader", "leadership.set.cluster-created", + "leadership.set.cluster-instances-configured", + "cluster.available",), + "signal_clustered": ( + "leadership.set.cluster-created", "cluster.available",), + "shared_db_respond": ( + "leadership.is_leader", + "leadership.set.cluster-instances-clustered", + "shared-db.available",), + "db_router_respond": ( + "leadership.is_leader", + "leadership.set.cluster-instances-clustered", + "db-router.available",), + }, + "when_not": { + "leader_install": ("charm.installed",), + "non_leader_install": ( + "leadership.is_leader", "charm.installed",), + "create_local_cluster_user": ("local.cluster.user-created",), + "send_cluster_connection_info": ("cluster.available",), + "create_remote_cluster_user": ( + "local.cluster.all-users-created",), + "initialize_cluster": ("leadership.set.cluster-created",), + "configure_instances_for_clustering": ( + "leadership.set.cluster-instances-configured",), + "add_instances_to_cluster": ( + "leadership.set.cluster-instances-clustered",), + "signal_clustered": ("leadership.is_leader",), + }, + } + # test that the hooks were registered via the + # reactive.mysql_innodb_cluster_handlers + self.registered_hooks_test_helper(handlers, hook_set, defaults) + + +class TestMySQLInnoDBClusterHandlers(test_utils.PatchHelper): + + def setUp(self): + super().setUp() + self.patch_release( + mysql_innodb_cluster.MySQLInnoDBClusterCharm.release) + self.midbc = mock.MagicMock() + self.midbc.cluster_name = "jujuCluster" + self.midbc.cluster_address = "10.10.10.10" + self.midbc.cluster_user = "clusteruser" + self.midbc.cluster_password = "clusterpass" + self.patch_object(handlers.charm, "provide_charm_instance", + new=mock.MagicMock()) + self.provide_charm_instance().__enter__.return_value = ( + self.midbc) + self.provide_charm_instance().__exit__.return_value = None + self.patch_object(handlers.leadership, "leader_set") + self.patch_object(handlers.reactive, "is_flag_set") + self.patch_object(handlers.reactive, "set_flag") + + self.unit1 = mock.MagicMock(name="FakeUnit") + self.unit1.received.__getitem__.side_effect = self._fake_data + self.cluster = mock.MagicMock() + self.cluster.all_joined_units = [self.unit1] + self.shared_db = mock.MagicMock() + self.shared_db.all_joined_units = [self.unit1] + self.db_router = mock.MagicMock() + self.db_router.all_joined_units = [self.unit1] + self.data = {} + + def _fake_data(self, key): + return self.data.get(key) + + def test_leader_install(self): + handlers.leader_install() + self.midbc.install.assert_called_once() + self.set_flag.assert_called_once_with("charm.installed") + + def test_non_leader_install(self): + handlers.non_leader_install() + self.midbc.install.assert_called_once() + self.set_flag.assert_called_once_with("charm.installed") + + def test_create_local_cluster_user(self): + handlers.create_local_cluster_user() + self.midbc.create_cluster_user.assert_called_once_with( + self.midbc.cluster_address, + self.midbc.cluster_user, + self.midbc.cluster_password) + self.set_flag.assert_called_once_with("local.cluster.user-created") + + def test_send_cluster_connection_info(self): + handlers.send_cluster_connection_info(self.cluster) + self.cluster.set_cluster_connection_info.assert_called_once_with( + self.midbc.cluster_address, + self.midbc.cluster_user, + self.midbc.cluster_password) + + def test_create_remote_cluster_user(self): + _addr = "10.10.10.20" + _pass = "pass" + _user = "user" + self.data = {"cluster-address": _addr, + "cluster-user": _user, + "cluster-password": _pass} + handlers.create_remote_cluster_user(self.cluster) + self.midbc.create_cluster_user.assert_called_once_with( + _addr, _user, _pass) + self.cluster.set_unit_configure_ready.assert_called_once() + self.set_flag.assert_called_once_with( + "local.cluster.all-users-created") + + def test_initialize_cluster(self): + handlers.initialize_cluster() + self.midbc.configure_instance.assert_called_once_with( + self.midbc.cluster_address) + self.midbc.create_cluster.assert_called_once() + + def test_configure_instances_for_clustering(self): + _addr = "10.10.10.30" + # Not ready + self.is_flag_set.return_value = False + self.data = {"cluster-address": _addr} + handlers.configure_instances_for_clustering(self.cluster) + self.midbc.configure_instance.assert_not_called() + self.midbc.add_instance_to_cluster.assert_not_called() + self.leader_set.assert_not_called() + + # Some but not all + self.midbc.reset_mock() + self.is_flag_set.return_value = False + self.data = {"cluster-address": _addr, "unit-configure-ready": True} + handlers.configure_instances_for_clustering(self.cluster) + self.midbc.configure_instance.assert_called_once_with(_addr) + self.midbc.add_instance_to_cluster.assert_called_once_with(_addr) + self.leader_set.assert_not_called() + + # All ready + self.midbc.reset_mock() + self.is_flag_set.return_value = True + handlers.configure_instances_for_clustering(self.cluster) + self.midbc.configure_instance.assert_called_once_with(_addr) + self.midbc.add_instance_to_cluster.assert_called_once_with(_addr) + self.leader_set.assert_called_once_with( + {"cluster-instances-configured": True}) + + def test_add_instances_to_cluster(self): + _addr = "10.10.10.30" + + # Some but not all + self.is_flag_set.return_value = False + self.data = {"cluster-address": _addr} + handlers.add_instances_to_cluster(self.cluster) + self.midbc.add_instance_to_cluster.assert_called_once_with(_addr) + self.leader_set.assert_not_called() + + # All ready + self.midbc.reset_mock() + self.is_flag_set.return_value = True + handlers.add_instances_to_cluster(self.cluster) + self.midbc.add_instance_to_cluster.assert_called_once_with(_addr) + self.leader_set.assert_called_once_with( + {"cluster-instances-clustered": True}) + + def test_signal_clustered(self): + # Unit not clustered + self.is_flag_set.return_value = False + handlers.signal_clustered(self.cluster) + self.cluster.set_unit_clustered.assert_not_called() + + # Unit Clustered + self.midbc.reset_mock() + self.is_flag_set.return_value = True + handlers.signal_clustered(self.cluster) + self.cluster.set_unit_clustered.assert_called_once() + + def test_shared_db_respond(self): + handlers.shared_db_respond(self.shared_db) + self.midbc.create_databases_and_users.assert_called_once_with( + self.shared_db) + + def test_db_router_respond(self): + handlers.db_router_respond(self.db_router) + self.midbc.create_databases_and_users.assert_called_once_with( + self.db_router)