From 1ad5a1a78e02ba396053e43e234285571f90a1df Mon Sep 17 00:00:00 2001 From: David Ames Date: Wed, 16 Oct 2019 14:06:58 -0700 Subject: [PATCH] Upstream charm Bring charm into alignment with other OpenStack charms. Updates to requirements, tox, README and zuul.yaml. Change-Id: I8a30200a2047146f94af0b45198c07d82f04a1fa --- .gitreview | 4 ++ .zuul.yaml | 5 ++ requirements.txt | 7 +- src/HACKING.md | 2 +- src/README.md | 68 +++++++++++++++++++ src/files/.gitkeep | 0 src/layer.yaml | 2 +- src/reactive/mysql_innodb_cluster_handlers.py | 8 ++- src/test-requirements.txt | 7 +- src/tox.ini | 18 +++-- test-requirements.txt | 17 +++-- tox.ini | 27 +++++--- ...ib_charm_openstack_mysql_innodb_cluster.py | 31 ++++++--- 13 files changed, 158 insertions(+), 38 deletions(-) create mode 100644 .gitreview create mode 100644 .zuul.yaml create mode 100644 src/README.md create mode 100644 src/files/.gitkeep diff --git a/.gitreview b/.gitreview new file mode 100644 index 0000000..afac541 --- /dev/null +++ b/.gitreview @@ -0,0 +1,4 @@ +[gerrit] +host=review.opendev.org +port=29418 +project=openstack/charm-mysql-innodb-cluster.git diff --git a/.zuul.yaml b/.zuul.yaml new file mode 100644 index 0000000..7332a87 --- /dev/null +++ b/.zuul.yaml @@ -0,0 +1,5 @@ +- project: + templates: + - python35-charm-jobs + - openstack-python3-train-jobs + - openstack-cover-jobs diff --git a/requirements.txt b/requirements.txt index 20f335d..b1d4872 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,7 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools # # Build requirements charm-tools>=2.4.4 diff --git a/src/HACKING.md b/src/HACKING.md index 2f78fd7..48c1e85 100644 --- a/src/HACKING.md +++ b/src/HACKING.md @@ -4,7 +4,7 @@ This charm is developed as part of the OpenStack Charms project, and as such you should refer to the [OpenStack Charm Development Guide](https://github.com/openstack/charm-guide) for details on how to contribute to this charm. -You can find its source code here: . +You can find its source code here: . # To Do diff --git a/src/README.md b/src/README.md new file mode 100644 index 0000000..6f3f3ca --- /dev/null +++ b/src/README.md @@ -0,0 +1,68 @@ +# Overview + +This charm provides a MySQL 8 InnoDB clustered database. + +Ubuntu 19.10 or above is required. + +# Usage + +The charm is intended for deploying a cluster and therefore does not deploy on a single unit. + +## Cluster deployment + +``` +juju deploy -n 3 mysql-innodb-cluster +``` + +The charm is designed to be used with the +[db-router relation](https://github.com/openstack-charmers/charm-interface-mysql-router) +in conjunction with the [MySQL Router charm](https://github.com/openstack-charmers/charm-mysql-router): + +``` +juju add-relation mysql-innodb-cluster:db-router msyql-router:db-router +``` + +The charm can be related to existing charms that use the [shared-db relation](https://github.com/openstack/charm-interface-mysql-shared). +However, this relation should be considered deprecated: + +``` +juju add-relation mysql-innodb-cluster:shared-db keystone:shared-db +``` + +## Scale out Usage + +Nodes can be added to the cluster as Read Only nodes: + +``` +juju add-unit mysql-innodb-cluster +``` + +## Known Limitations and Issues + +> **Warning**: This charm is in preview state. + +The charm is under active development and is not yet production ready. Its +current intended use is for validation of MySQL 8 InnoDB cluster for use with +OpenStack. + +# Configuration + +The name of the cluster can be customized at deploy time: + +``` +juju deploy -n 3 mysql-innodb-cluster --config cluster-name myCluster +``` + +# Contact Information + +OpenStack Charmers + +## Upstream MySQL + + - [Upstream documentation](https://dev.mysql.com/doc/refman/8.0/en/mysql-innodb-cluster-userguide.html) + +# Bugs + +Please report bugs on [Launchpad](https://bugs.launchpad.net/charm-mysql-innodb-cluster/+filebug). + +For general questions please refer to the OpenStack [Charm Guide](https://docs.openstack.org/charm-guide/latest/). diff --git a/src/files/.gitkeep b/src/files/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/src/layer.yaml b/src/layer.yaml index ea267cb..4e7be36 100644 --- a/src/layer.yaml +++ b/src/layer.yaml @@ -13,7 +13,7 @@ options: mysql-shell: channel: edge devmode: True -repo: https://github.com/openstack-charmers/charm-mysql-innodb-cluster +repo: https://opendev.org/openstack/charm-mysql-innodb-cluster config: deletes: - verbose diff --git a/src/reactive/mysql_innodb_cluster_handlers.py b/src/reactive/mysql_innodb_cluster_handlers.py index 096f247..7bea5aa 100644 --- a/src/reactive/mysql_innodb_cluster_handlers.py +++ b/src/reactive/mysql_innodb_cluster_handlers.py @@ -42,8 +42,10 @@ def non_leader_install(): packages. """ # Wait for leader to set mysql.passwd - with charm.provide_charm_instance() as instance: instance.install() - reactive.set_flag("charm.installed") instance.assess_status() + with charm.provide_charm_instance() as instance: + instance.install() + reactive.set_flag("charm.installed") + instance.assess_status() @reactive.when('charm.installed') @@ -86,7 +88,7 @@ def send_cluster_connection_info(cluster): @reactive.when('cluster.available') def create_remote_cluster_user(cluster): """Create remote cluster user. - + Create the remote cluster peer user and grant cluster permissions in the MySQL DB. diff --git a/src/test-requirements.txt b/src/test-requirements.txt index f2912ba..d3c9be8 100644 --- a/src/test-requirements.txt +++ b/src/test-requirements.txt @@ -1,3 +1,8 @@ -# zaza +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools +# +# Functional Test Requirements (let Zaza's dependencies solve all dependencies here!) git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack diff --git a/src/tox.ini b/src/tox.ini index ea2987f..07a7adc 100644 --- a/src/tox.ini +++ b/src/tox.ini @@ -1,12 +1,22 @@ +# Source charm (with zaza): ./src/tox.ini +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools + [tox] envlist = pep8 skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 whitelist_externals = juju -passenv = HOME TERM CS_API_* OS_* AMULET_* +passenv = HOME TERM CS_* OS_* TEST_* deps = -r{toxinidir}/test-requirements.txt install_command = pip install {opts} {packages} @@ -19,7 +29,7 @@ commands = charm-proof [testenv:func-noop] basepython = python3 commands = - true + functest-run-suite --help [testenv:func] basepython = python3 @@ -31,10 +41,10 @@ basepython = python3 commands = functest-run-suite --keep-model --smoke -[testenv:func-dev] +[testenv:func-target] basepython = python3 commands = - functest-run-suite --keep-model --dev + functest-run-suite --keep-model --bundle {posargs} [testenv:venv] commands = {posargs} diff --git a/test-requirements.txt b/test-requirements.txt index 125af12..0ab97f6 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,6 +1,7 @@ -# This file is managed centrally. If you find the need to modify this as a -# one-off, please don't. Intead, consult #openstack-charms and ask about -# requirements management in charms via bot-control. Thank you. +# This file is managed centrally by release-tools and should not be modified +# within individual charm repos. See the 'global' dir contents for available +# choices of *requirements.txt files for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools # # Lint and unit test requirements flake8>=2.2.4,<=2.4.1 @@ -10,5 +11,13 @@ charms.reactive mock>=1.2 nose>=1.3.7 coverage>=3.6 -tenacity git+https://github.com/openstack/charms.openstack.git#egg=charms.openstack +# +# Revisit for removal / mock improvement: +netifaces # vault +psycopg2-binary # vault +tenacity # vault +pbr # vault +cryptography # vault, keystone-saml-mellon +lxml # keystone-saml-mellon +hvac # vault, barbican-vault diff --git a/tox.ini b/tox.ini index b0a8454..5b41c1d 100644 --- a/tox.ini +++ b/tox.ini @@ -1,18 +1,25 @@ # Source charm: ./tox.ini # This file is managed centrally by release-tools and should not be modified -# within individual charm repos. +# within individual charm repos. See the 'global' dir contents for available +# choices of tox.ini for OpenStack Charms: +# https://github.com/openstack-charmers/release-tools + [tox] skipsdist = True envlist = pep8,py3 +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False [testenv] setenv = VIRTUAL_ENV={envdir} PYTHONHASHSEED=0 TERM=linux - CHARM_LAYER_PATH={toxinidir}/layers - CHARM_INTERFACES_DIR={toxinidir}/interfaces + LAYER_PATH={toxinidir}/layers + INTERFACE_PATH={toxinidir}/interfaces JUJU_REPOSITORY={toxinidir}/build -passenv = http_proxy https_proxy OS_* +passenv = http_proxy https_proxy INTERFACE_PATH LAYER_PATH JUJU_REPOSITORY install_command = pip install {opts} {packages} deps = @@ -26,22 +33,22 @@ commands = [testenv:py3] basepython = python3 deps = -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} [testenv:py35] basepython = python3.5 deps = -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} [testenv:py36] basepython = python3.6 deps = -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} [testenv:py37] basepython = python3.7 deps = -r{toxinidir}/test-requirements.txt -commands = stestr run {posargs} +commands = stestr run --slowest {posargs} [testenv:pep8] basepython = python3 @@ -59,7 +66,7 @@ setenv = PYTHON=coverage run commands = coverage erase - stestr run {posargs} + stestr run --slowest {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml @@ -82,4 +89,4 @@ commands = {posargs} [flake8] # E402 ignore necessary for path append before sys module import in actions -ignore = E402 +ignore = E402,W504 diff --git a/unit_tests/test_lib_charm_openstack_mysql_innodb_cluster.py b/unit_tests/test_lib_charm_openstack_mysql_innodb_cluster.py index f29d8ad..ac4ca05 100644 --- a/unit_tests/test_lib_charm_openstack_mysql_innodb_cluster.py +++ b/unit_tests/test_lib_charm_openstack_mysql_innodb_cluster.py @@ -402,7 +402,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): mock.call("GRANT GRANT OPTION ON *.* TO '{}'@'{}'" .format(_user, _addr)), mock.call("flush privileges")] - _helper.execute.assert_has_calls(_calls) + _helper.execute.assert_has_calls( + _calls, any_order=True) # Local _localhost = "localhost" @@ -424,7 +425,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): mock.call("GRANT GRANT OPTION ON *.* TO '{}'@'{}'" .format(_user, _localhost)), mock.call("flush privileges")] - _helper.execute.assert_has_calls(_calls) + _helper.execute.assert_has_calls( + _calls, any_order=True) def test_configure_instance(self): _pass = "clusterpass" @@ -482,7 +484,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): mock.call("leadership.set.cluster-created"), mock.call("leadership.set.cluster-instance-configured-{}" .format(_addr))] - self.is_flag_set.assert_has_calls(_is_flag_set_calls) + self.is_flag_set.assert_has_calls(_is_flag_set_calls, any_order=True) self.subprocess.check_output.assert_called_once_with( [midbc.mysqlsh_bin, "--no-wizard", "-f", self.filename], stderr=self.stdin) @@ -490,7 +492,7 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): _leader_set_calls = [ mock.call({"cluster-instance-clustered-{}".format(_addr): True}), mock.call({"cluster-created": self.uuid_of_cluster})] - self.leader_set.assert_has_calls(_leader_set_calls) + self.leader_set.assert_has_calls(_leader_set_calls, any_order=True) def test_add_instance_to_cluster(self): _pass = "clusterpass" @@ -585,7 +587,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): mock.call(self.nova_unit7_ip, "nova", "nova"), mock.call(self.nova_unit7_ip, "nova_api", "nova"), mock.call(self.nova_unit7_ip, "nova_cell0", "nova")] - midbc.configure_db_for_hosts.assert_has_calls(_configure_db_calls) + midbc.configure_db_for_hosts.assert_has_calls( + _configure_db_calls, any_order=True) _set_calls = [ mock.call( @@ -608,7 +611,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): allowed_units=self._fake_get_allowed_units( None, None, self.nova_shared_db.relation_id), prefix="novacell0")] - self.interface.set_db_connection_info.assert_has_calls(_set_calls) + self.interface.set_db_connection_info.assert_has_calls( + _set_calls, any_order=True) def test_create_databases_and_users_db_router(self): # The test setup is a bit convoluted and requires mimicking reactive, @@ -647,7 +651,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): mock.call(self.kmr_unit7_ip, "mysqlrouteruser"), mock.call(self.nmr_unit5_ip, "mysqlrouteruser"), mock.call(self.nmr_unit7_ip, "mysqlrouteruser")] - midbc.configure_db_router.assert_has_calls(_conigure_db_router_calls) + midbc.configure_db_router.assert_has_calls( + _conigure_db_router_calls, any_order=True) _configure_db_calls = [ mock.call(self.kmr_unit5_ip, "keystone", "keystone"), @@ -658,7 +663,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): mock.call(self.nmr_unit7_ip, "nova", "nova"), mock.call(self.nmr_unit7_ip, "nova_api", "nova"), mock.call(self.nmr_unit7_ip, "nova_cell0", "nova")] - midbc.configure_db_for_hosts.assert_has_calls(_configure_db_calls) + midbc.configure_db_for_hosts.assert_has_calls( + _configure_db_calls, any_order=True) _set_calls = [ mock.call( @@ -692,7 +698,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): allowed_units=" ".join( [x.unit_name for x in self.nmr_db_router.joined_units]), prefix="mysqlrouter")] - self.interface.set_db_connection_info.assert_has_calls(_set_calls) + self.interface.set_db_connection_info.assert_has_calls( + _set_calls, any_order=True) def test_configure_db_for_hosts(self): _db = "db" @@ -722,7 +729,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): self.assertEqual( _pass, midbc.configure_db_for_hosts(_json_addrs, _db, _user)) - _helper.configure_db.assert_has_calls(_calls) + _helper.configure_db.assert_has_calls( + _calls, any_order=True) def test_configure_db_router(self): _user = "user" @@ -751,7 +759,8 @@ class TestMySQLInnoDBClusterCharm(test_utils.PatchHelper): self.assertEqual( _pass, midbc.configure_db_router(_json_addrs, _user)) - _helper.configure_router.assert_has_calls(_calls) + _helper.configure_router.assert_has_calls( + _calls, any_order=True) def test_states_to_check(self): self.patch_object(