From 65f39119485e36aa28e4a4946e341e67add4a17d Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Fri, 26 Jun 2020 11:29:25 +0200 Subject: [PATCH] Add OVS to OVN migration actions Switch smoke to focal-ussuri. Unpin flake8. Change-Id: Ifa99988612eaaeb9d60a0d99db172f97e27cfc93 --- src/actions.yaml | 56 ++++ src/actions/actions.py | 276 +++++++++++++++++ src/actions/migrate-mtu | 1 + src/actions/migrate-ovn-db | 1 + src/actions/offline-neutron-morph-db | 1 + .../neutron_offline_network_type_update.py | 239 +++++++++++++++ src/tests/tests.yaml | 2 +- src/wheelhouse.txt | 1 + test-requirements.txt | 2 +- unit_tests/__init__.py | 4 + unit_tests/test_actions.py | 289 ++++++++++++++++++ 11 files changed, 870 insertions(+), 2 deletions(-) create mode 100644 src/actions.yaml create mode 100755 src/actions/actions.py create mode 120000 src/actions/migrate-mtu create mode 120000 src/actions/migrate-ovn-db create mode 120000 src/actions/offline-neutron-morph-db create mode 100755 src/files/scripts/neutron_offline_network_type_update.py create mode 100644 unit_tests/test_actions.py diff --git a/src/actions.yaml b/src/actions.yaml new file mode 100644 index 0000000..ab27191 --- /dev/null +++ b/src/actions.yaml @@ -0,0 +1,56 @@ +migrate-ovn-db: + description: | + Run the Neutron OVN DB Sync utility. + params: + i-really-mean-it: + type: boolean + default: false + description: | + The default of false will cause the action to perform a dry-run and log + output. Set to true to perform the actual sync. + . + NOTE: The neutron-api units should be paused while running this action. + required: + - i-really-mean-it +migrate-mtu: + description: | + Reduce MTU on overlay networks prior to migration to Geneve. + params: + i-really-mean-it: + type: boolean + default: false + description: | + The default of false will cause the action to verify that all overlay + networks have been adjusted. Set to true to perform the actual + migration. + . + NOTE: To avoid connectivity issues, running instances should already + have been reconfigured with a lower MTU prior to running this action. + . + NOTE: The neutron-api units should NOT be paused while running this + action. + required: + - i-really-mean-it +offline-neutron-morph-db: + description: | + Perform optional offline morphing of tunnel networks in Neutron DB. + params: + i-really-mean-it: + type: boolean + default: false + description: | + The default of false will cause the action to not commit the database + transaction, effectively performing a dry run. Set to true to perform + the actual operation. + . + NOTE: Performing this action is optional and will allow migrated + networks to show as type 'geneve' to the end user of the cloud which + also allows other `openstack network set` operations to succeed + post-migration. + . + NOTE: Before running this action you should make a backup of the + Neutron database. + . + NOTE: The neutron-api units MUST be paused while running this action. + required: + - i-really-mean-it diff --git a/src/actions/actions.py b/src/actions/actions.py new file mode 100755 index 0000000..4d5498c --- /dev/null +++ b/src/actions/actions.py @@ -0,0 +1,276 @@ +#!/usr/local/sbin/charm-env python3 +# +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import os +import subprocess +import sys +import traceback + +from oslo_config import cfg + +# Load modules from $CHARM_DIR/lib +sys.path.append('lib') +sys.path.append('reactive') + +from charms.layer import basic +basic.bootstrap_charm_deps() +basic.init_config_states() + +import charms_openstack.bus + +import charmhelpers.core as ch_core + +charms_openstack.bus.discover() + + +NEUTRON_CONF = '/etc/neutron/neutron.conf' +NEUTRON_OVN_DB_SYNC_CONF = '/etc/neutron/neutron-ovn-db-sync.conf' + + +def get_neutron_credentials(): + """Retrieve service credentials from Neutron's configuration file. + + Since we are a subordinate of the neutron-api charm and have no direct + relationship with Keystone ourselves we rely on gleaning Neutron's + credentials from its config file. + + :returns: Map of environment variable name and appropriate value for auth. + :rtype: Dict[str,str] + """ + sections = {} + parser = cfg.ConfigParser(NEUTRON_CONF, sections) + parser.parse() + auth_section = 'keystone_authtoken' + return { + 'OS_USER_DOMAIN_NAME': sections[auth_section]['user_domain_name'][0], + 'OS_PROJECT_DOMAIN_NAME': sections[auth_section][ + 'project_domain_name'][0], + 'OS_AUTH_URL': sections[auth_section]['auth_url'][0], + 'OS_PROJECT_NAME': sections[auth_section]['project_name'][0], + 'OS_USERNAME': sections[auth_section]['username'][0], + 'OS_PASSWORD': sections[auth_section]['password'][0], + } + + +def get_neutron_db_connection_string(): + """Retrieve db connection string from Neutron's configuration file. + + Since we are a subordinate of the neutron-api charm and have no direct + relationship with the database ourselves we rely on gleaning Neutron's + credentials from its config file. + + :returns: SQLAlchemy consumable DB connection string. + :rtype: str + """ + sections = {} + parser = cfg.ConfigParser(NEUTRON_CONF, sections) + parser.parse() + return sections['database']['connection'][0] + + +@contextlib.contextmanager +def write_filtered_neutron_config_for_sync_util(): + """This helper exists to work around LP: #1894048. + + Load neutron config and write out a copy with any sections or options + offending the `neutron-ovn-db-sync-util` removed. + + The helper should be used as a context manager to have the temporary config + file removed when done. Example: + + with write_filtered_neutron_config_for_sync_util(): + do_something() + """ + # Make sure the file we create has safe permissions + stored_mask = os.umask(0o0027) + try: + with open(NEUTRON_CONF, 'r') as fin: + with open(NEUTRON_OVN_DB_SYNC_CONF, 'w') as fout: + for line in fin.readlines(): + # The ovn-db-sync-util chokes on this. LP: #1894048 + if line.startswith('auth_section'): + continue + fout.write(line) + finally: + # Restore umask for further execution regardless of any exception + # occurring above. + os.umask(stored_mask) + + yield + + # remove the temporary config file + os.unlink(NEUTRON_OVN_DB_SYNC_CONF) + + +def migrate_mtu(args): + """Reduce MTU on overlay networks prior to migration to Geneve. + + :param args: Argument list + :type args: List[str] + """ + action_name = os.path.basename(args[0]) + dry_run = not ch_core.hookenv.action_get('i-really-mean-it') + mode = 'verify' if dry_run else 'update' + cp = subprocess.run( + ( + 'neutron-ovn-migration-mtu', + mode, + 'mtu', + ), + capture_output=True, + universal_newlines=True, + env={ + 'PATH': '/usr/bin', + **get_neutron_credentials(), + }) + if dry_run: + banner_msg = '{}: OUTPUT FROM VERIFY'.format(action_name) + else: + banner_msg = '{}: OUTPUT FROM UPDATE'.format(action_name) + + # we pass the output through and it will be captured both in log and + # action output + output_indicates_failure = False + for output_name in ('stdout', 'stderr'): + fh = getattr(sys, output_name) + data = getattr(cp, output_name) + print('{} ON {}:\n'.format(banner_msg, output_name.upper()) + data, + file=fh) + for fail_word in ('Exception', 'Traceback'): + if fail_word in data: + # the `neutron-ovn-migration-mtu` tool does not set an error + # code on failure, look for errors in the output and set action + # status accordingly. + output_indicates_failure = True + + if cp.returncode != 0 or output_indicates_failure: + ch_core.hookenv.action_fail( + 'Execution failed, please investigate output.') + + +def migrate_ovn_db(args): + """Migrate the Neutron DB into OVN with the `neutron-ovn-db-sync-util`. + + :param args: Argument list + :type args: List[str] + """ + action_name = os.path.basename(args[0]) + dry_run = not ch_core.hookenv.action_get('i-really-mean-it') + sync_mode = 'log' if dry_run else 'repair' + with write_filtered_neutron_config_for_sync_util(): + cp = subprocess.run( + ( + 'neutron-ovn-db-sync-util', + '--config-file', NEUTRON_OVN_DB_SYNC_CONF, + '--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini', + '--ovn-neutron_sync_mode', sync_mode, + ), + capture_output=True, + universal_newlines=True, + ) + if dry_run: + banner_msg = '{}: OUTPUT FROM DRY-RUN'.format(action_name) + else: + banner_msg = '{}: OUTPUT FROM SYNC'.format(action_name) + + # we pass the output through and it will be captured both in log and + # action output + output_indicates_failure = False + for output_name in ('stdout', 'stderr'): + fh = getattr(sys, output_name) + data = getattr(cp, output_name) + print('{} ON {}:\n'.format(banner_msg, output_name.upper()) + data, + file=fh) + if 'ERROR' in data: + # the `neutron-ovn-db-sync-util` tool does not set an error code on + # failure, look for errors in the output and set action status + # accordingly. + output_indicates_failure = True + + if cp.returncode != 0 or output_indicates_failure: + ch_core.hookenv.action_fail( + 'Execution failed, please investigate output.') + + +def offline_neutron_morph_db(args): + """Perform offline moprhing of tunnel networks in the Neutron DB. + + :param args: Argument list + :type args: List[str] + """ + action_name = os.path.basename(args[0]) + dry_run = not ch_core.hookenv.action_get('i-really-mean-it') + mode = 'dry' if dry_run else 'morph' + cp = subprocess.run( + ( + '{}'.format( + os.path.join( + ch_core.hookenv.charm_dir(), + 'files/scripts/neutron_offline_network_type_update.py')), + get_neutron_db_connection_string(), + mode, + ), + capture_output=True, + universal_newlines=True, + # We want this tool to run outside of the charm venv to let it consume + # system Python packages. + env={'PATH': '/usr/bin'}, + ) + if dry_run: + banner_msg = '{}: OUTPUT FROM DRY-RUN'.format(action_name) + else: + banner_msg = '{}: OUTPUT FROM MORPH'.format(action_name) + + # we pass the output through and it will be captured both in log and + # action output + for output_name in ('stdout', 'stderr'): + fh = getattr(sys, output_name) + data = getattr(cp, output_name) + print('{} ON {}:\n'.format(banner_msg, output_name.upper()) + data, + file=fh) + + if cp.returncode != 0: + ch_core.hookenv.action_fail( + 'Execution failed, please investigate output.') + + +ACTIONS = { + 'migrate-mtu': migrate_mtu, + 'migrate-ovn-db': migrate_ovn_db, + 'offline-neutron-morph-db': offline_neutron_morph_db, +} + + +def main(args): + action_name = os.path.basename(args[0]) + try: + action = ACTIONS[action_name] + except KeyError: + return 'Action {} undefined'.format(action_name) + else: + try: + action(args) + except Exception as e: + ch_core.hookenv.log('action "{}" failed: "{}" "{}"' + .format(action_name, str(e), + traceback.format_exc()), + level=ch_core.hookenv.ERROR) + ch_core.hookenv.action_fail(str(e)) + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/src/actions/migrate-mtu b/src/actions/migrate-mtu new file mode 120000 index 0000000..405a394 --- /dev/null +++ b/src/actions/migrate-mtu @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/src/actions/migrate-ovn-db b/src/actions/migrate-ovn-db new file mode 120000 index 0000000..405a394 --- /dev/null +++ b/src/actions/migrate-ovn-db @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/src/actions/offline-neutron-morph-db b/src/actions/offline-neutron-morph-db new file mode 120000 index 0000000..405a394 --- /dev/null +++ b/src/actions/offline-neutron-morph-db @@ -0,0 +1 @@ +actions.py \ No newline at end of file diff --git a/src/files/scripts/neutron_offline_network_type_update.py b/src/files/scripts/neutron_offline_network_type_update.py new file mode 100755 index 0000000..a8647b5 --- /dev/null +++ b/src/files/scripts/neutron_offline_network_type_update.py @@ -0,0 +1,239 @@ +#!/usr/bin/env python3 + +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""neutron_offline_network_type_update + +The purpose of this module is to provide a tool that allow the user to perform +Neutron database surgery to change the type of tunnel networks from 'gre' and +'vxlan' to 'geneve'. + +It is an optional part of a migration from a legacy Neutron ML2+OVS to ML2+OVN +deployment. + +At the time of this writing the Neutron OVN ML2 driver will assume that all +chassis participating in a network to use the 'geneve' tunnel protocol and it +will ignore the value of the `network_type` field in any non-physical network +in the Neutron database. It will also ignore the `segmentation_id` field and +let OVN assign the VNIs [0]. + +The Neutron API currently does not support changing the type of a network, so +when doing a migration the above described behaviour is actually a welcomed +one. + +However, after the migration is done and all the primary functions are working, +the end user of the cloud will be left with the false impression of their +existing 'gre' or 'vxlan' typed networks still being operational on said tunnel +protocols. In reality 'geneve' is used under the hood. + +The end user will also run into issues with modifying any existing networks +with `openstack network set` throwing error messages about networks of type +'gre' or 'vxlan' not being supported. + +After running this script said networks will have their `network_type` field +changed to 'geneve' which will fix the above described problems. + +NOTE: Use this script with caution, it is of absolute importance that the + `neutron-server` process is stopped while the script is running. + +NOTE: While we regularly exercise the script as part of our functional testing + of the charmed migration path and the script is touching fundamental data + structures that are not likely to have their definition changed much in + the Neutron database, we would still advise you to take a fresh backup of + the Neutron database and keep it for a while just in case. + +0: https://github.com/ovn-org/ovn/blob/1e07781310d8155997672bdce01a2ff4f5a93e83/northd/ovn-northd.c#L1188-L1268 +""" # noqa + +import os +import sys + +from oslo_db.sqlalchemy import session + +import sqlalchemy + + +class NotFound(Exception): + pass + + +def main(argv): + """Main function. + + :param argv: Argument list + :type argv: List[str] + :returns: POSIX exit code + :rtype: int + """ + program = os.path.basename(argv[0]) + if len(argv) < 2: + usage(program) + return os.EX_USAGE + elif len(argv) < 3 or argv[2] != 'morph': + print('DRY-RUN, WILL NOT COMMIT TRANSACTION') + + db_engine = session.create_engine(argv[1]) + db_maker = session.get_maker(db_engine, autocommit=False) + db_session = db_maker(bind=db_engine) + + to_network_type = 'geneve' + for network_type in ('gre', 'vxlan'): + n_morphed = morph_networks(db_session, network_type, to_network_type) + print('Morphed {} networks of type {} to {}.' + .format(n_morphed, network_type, to_network_type)) + + if len(argv) < 3 or argv[2] != 'morph': + print('DRY-RUN, WILL NOT COMMIT TRANSACTION') + return os.EX_USAGE + + db_session.commit() + db_session.close() + db_engine.dispose() + return os.EX_OK + + +def usage(program): + """Print information about how to use program. + + :param program: Name of program + :type program: str + """ + print('usage {} db-connection-string [morph]\n' + '\n' + 'Morph non-physical networks of type "gre" and "vxlan" into ' + 'geneve networks.\n' + '\n' + 'The Neutron database must already have enough free "geneve" VNIs\n' + 'before running this tool. If the process stops because there are\n' + 'no more VNIs, increase the VNI range with the `vni_ranges`\n' + 'configuration option on the `ml2_type_geneve` section and then\n' + 'start and stop the neutron-server before trying again.\n' + '\n' + 'The second argument must be the literal string "morph" for the\n' + 'tool to perform an action, otherwise it will not commit the\n' + 'transaction to the database, effectively performing a dry run.\n' + ''.format(program), + file=sys.stderr) + + +def allocate_segment(db_session, network_type): + """Allocate VNI for network_type. + + :param db_session: SQLAlchemy DB Session object. + :type db_session: SQLAlchemy DB Session object. + :param network_type: Network type to allocate vni for. + :type network_type: str + :returns: Allocated VNI + :rtype: int + """ + alloc_table = 'ml2_{}_allocations'.format(network_type) + vni_row = '{}_vni'.format(network_type) + + # Get next available VNI + vni = None + stmt = sqlalchemy.text( + 'SELECT MIN({}) FROM {} WHERE allocated=0' + .format(vni_row, alloc_table)) + rs = db_session.execute(stmt) + for row in rs: + vni = next(row.itervalues()) + # A aggregated query will always provide a result, check for NULL + if vni is None: + raise NotFound( + 'unable to allocate "{}" segment.'.format(network_type)) + break + + # Allocate VNI + stmt = sqlalchemy.text( + 'UPDATE {} SET allocated=1 WHERE {}=:vni'.format(alloc_table, vni_row)) + db_session.execute(stmt, {'vni': vni}) + return vni + + +def deallocate_segment(db_session, network_type, vni): + """Deallocate VNI for network_type. + + :param db_session: SQLAlchemy DB Session object. + :type db_session: SQLAlchemy DB Session object. + :param network_type: Network type to de-allocate vni for. + :type network_type: str + :param vni: VNI + :type vni: int + """ + alloc_table = 'ml2_{}_allocations'.format(network_type) + vni_row = '{}_vni'.format(network_type) + + # De-allocate VNI + stmt = sqlalchemy.text( + 'UPDATE {} SET allocated=0 WHERE {}=:vni'.format(alloc_table, vni_row)) + db_session.execute(stmt, {'vni': vni}) + + +def get_network_segments(db_session, network_type): + """Get tunnel networks of certain type. + + :param db_session: SQLAlchemy DB Session object. + :type db_session: SQLAlchemy DB Session object. + :param network_type: Network type to iterate over. + :type network_type: str + :returns: Iterator for data + :rtype: Iterator[str,str,str,int] + """ + # Get networks + stmt = sqlalchemy.text( + 'SELECT id,network_id,network_type,segmentation_id ' + 'FROM networksegments ' + 'WHERE physical_network IS NULL AND ' + ' network_type=:network_type') + rs = db_session.execute(stmt, {'network_type': network_type}) + for row in rs: + yield row.values() + + +def morph_networks(db_session, from_network_type, to_network_type): + """Morph all networks of one network type to another. + + :param db_session: SQLAlchemy DB Session object. + :type db_session: SQLAlchemy DB Session object. + :param from_network_type: Network type to morph from. + :type from_network_type: str + :param to_network_type: Network type to morph to. + :type to_network_type: str + :returns: Number of networks morphed + :rtype: int + """ + stmt = sqlalchemy.text( + 'UPDATE networksegments ' + 'SET network_type=:new_network_type,segmentation_id=:new_vni ' + 'WHERE id=:id') + n_morphed = 0 + for segment_id, network_id, network_type, vni in get_network_segments( + db_session, from_network_type): + new_vni = allocate_segment(db_session, to_network_type) + db_session.execute(stmt, { + 'new_network_type': to_network_type, + 'new_vni': new_vni, + 'id': segment_id, + }) + print('segment {} for network {} changed from {}:{} to {}:{}' + .format(segment_id, network_id, network_type, vni, + to_network_type, new_vni)) + deallocate_segment(db_session, from_network_type, vni) + n_morphed += 1 + return n_morphed + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/src/tests/tests.yaml b/src/tests/tests.yaml index f4ddae3..193295f 100644 --- a/src/tests/tests.yaml +++ b/src/tests/tests.yaml @@ -1,6 +1,6 @@ charm_name: neutron-api-plugin-ovn smoke_bundles: -- bionic-ussuri +- focal-ussuri gate_bundles: - bionic-train - bionic-ussuri diff --git a/src/wheelhouse.txt b/src/wheelhouse.txt index 3e812d7..bcda05c 100644 --- a/src/wheelhouse.txt +++ b/src/wheelhouse.txt @@ -1 +1,2 @@ zipp<2.0.0 +oslo_config diff --git a/test-requirements.txt b/test-requirements.txt index 0ab97f6..1c8aff7 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,7 +4,7 @@ # https://github.com/openstack-charmers/release-tools # # Lint and unit test requirements -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 requests>=2.18.4 charms.reactive diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index c573fcb..21d2ab1 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -35,6 +35,8 @@ class _fake_decorator(object): charms = mock.MagicMock() sys.modules['charms'] = charms +charms.layer = mock.MagicMock() +sys.modules['charms.layer'] = charms.layer charms.leadership = mock.MagicMock() sys.modules['charms.leadership'] = charms.leadership charms.reactive = mock.MagicMock() @@ -60,3 +62,5 @@ sys.modules['charms.reactive.flags'] = charms.reactive.flags sys.modules['charms.reactive.relations'] = charms.reactive.relations netaddr = mock.MagicMock() sys.modules['netaddr'] = netaddr +oslo_config = mock.MagicMock() +sys.modules['oslo_config'] = oslo_config diff --git a/unit_tests/test_actions.py b/unit_tests/test_actions.py new file mode 100644 index 0000000..0544950 --- /dev/null +++ b/unit_tests/test_actions.py @@ -0,0 +1,289 @@ +# Copyright 2020 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import unittest.mock as mock + +sys.path.append('src') + +import charms_openstack.test_utils as test_utils + +import actions.actions as actions + + +class FakeCalledProcess(object): + + returncode = 0 + stdout = 'fake-output-on-stdout' + stderr = 'fake-output-on-stderr' + + +class TestActions(test_utils.PatchHelper): + + def test_neutron_credentials(self): + self.patch_object(actions.cfg, 'ConfigParser') + parser = mock.MagicMock() + self.maxDiff = None + + expect = { + 'OS_USER_DOMAIN_NAME': 'fake-user-domain-name', + 'OS_PROJECT_DOMAIN_NAME': 'fake-project-domain-name', + 'OS_AUTH_URL': 'fake-auth-url', + 'OS_PROJECT_NAME': 'fake-project-name', + 'OS_USERNAME': 'fake-username', + 'OS_PASSWORD': 'fake-password', + } + + def _fakeparser(x, y): + y.update( + { + 'keystone_authtoken': { + 'user_domain_name': ['fake-user-domain-name'], + 'project_domain_name': ['fake-project-domain-name'], + 'auth_url': ['fake-auth-url'], + 'project_name': ['fake-project-name'], + 'username': ['fake-username'], + 'password': ['fake-password'], + }, + }) + return parser + + self.ConfigParser.side_effect = _fakeparser + self.assertDictEqual(actions.get_neutron_credentials(), expect) + self.ConfigParser.assert_called_once_with( + '/etc/neutron/neutron.conf', mock.ANY) + + def test_migrate_mtu(self): + self.patch_object(actions.ch_core.hookenv, 'action_get') + self.action_get.return_value = False + self.patch_object(actions.subprocess, 'run') + fcp = FakeCalledProcess() + self.run.return_value = fcp + self.patch_object(actions, 'get_neutron_credentials') + self.get_neutron_credentials.return_value = { + 'fake-creds': 'from-neutron'} + self.patch('builtins.print', name='builtin_print') + self.patch_object(actions.ch_core.hookenv, 'action_fail') + + actions.migrate_mtu(['/some/path/migrate-mtu']) + self.run.assert_called_once_with( + ( + 'neutron-ovn-migration-mtu', + 'verify', + 'mtu', + ), + capture_output=True, + universal_newlines=True, + env={ + 'PATH': '/usr/bin', + 'fake-creds': 'from-neutron', + }) + self.builtin_print.assert_has_calls([ + mock.call('migrate-mtu: OUTPUT FROM VERIFY ON STDOUT:\n' + 'fake-output-on-stdout', + file=mock.ANY), + mock.call('migrate-mtu: OUTPUT FROM VERIFY ON STDERR:\n' + 'fake-output-on-stderr', + file=mock.ANY), + ]) + self.run.reset_mock() + self.builtin_print.reset_mock() + self.action_get.return_value = True + actions.migrate_mtu(['/some/path/migrate-mtu']) + self.run.assert_called_once_with( + ( + 'neutron-ovn-migration-mtu', + 'update', + 'mtu', + ), + capture_output=True, + universal_newlines=True, + env={ + 'PATH': '/usr/bin', + 'fake-creds': 'from-neutron', + }) + self.builtin_print.assert_has_calls([ + mock.call('migrate-mtu: OUTPUT FROM UPDATE ON STDOUT:\n' + 'fake-output-on-stdout', + file=mock.ANY), + mock.call('migrate-mtu: OUTPUT FROM UPDATE ON STDERR:\n' + 'fake-output-on-stderr', + file=mock.ANY), + ]) + # check that errors are detected + fcp.returncode = 1 + actions.migrate_mtu(['/some/path/migrate-mtu']) + self.action_fail.assert_called_once() + fcp.returncode = 0 + self.action_fail.reset_mock() + fcp.stderr = 'Traceback' + actions.migrate_mtu(['/some/path/migrate-mtu']) + self.action_fail.assert_called_once() + self.action_fail.reset_mock() + fcp.stderr = 'Exception' + actions.migrate_mtu(['/some/path/migrate-mtu']) + self.action_fail.assert_called_once() + + def test_migrate_ovn_db(self): + self.patch_object(actions.ch_core.hookenv, 'action_get') + self.action_get.return_value = False + self.patch_object(actions.subprocess, 'run') + + fcp = FakeCalledProcess() + self.run.return_value = fcp + self.patch('builtins.print', name='builtin_print') + self.patch_object(actions.ch_core.hookenv, 'action_fail') + # NOTE: strictly speaking these really belong to a unit test for the + # write_filtered_neutron_config_for_sync_util helper but since it + # exists only to work around a bug let's just mock them here for + # simplicity and remove it again when the bug is fixed. + self.patch_object(actions.os, 'umask') + self.patch_object(actions.os, 'unlink') + + with mock.patch('builtins.open', create=True): + actions.migrate_ovn_db(['/some/path/migrate-ovn-db']) + self.run.assert_called_once_with( + ( + 'neutron-ovn-db-sync-util', + '--config-file', '/etc/neutron/neutron-ovn-db-sync.conf', + '--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini', + '--ovn-neutron_sync_mode', 'log', + ), + capture_output=True, + universal_newlines=True, + ) + self.builtin_print.assert_has_calls([ + mock.call('migrate-ovn-db: OUTPUT FROM DRY-RUN ON STDOUT:\n' + 'fake-output-on-stdout', + file=mock.ANY), + mock.call('migrate-ovn-db: OUTPUT FROM DRY-RUN ON STDERR:\n' + 'fake-output-on-stderr', + file=mock.ANY), + ]) + self.run.reset_mock() + self.builtin_print.reset_mock() + self.action_get.return_value = True + actions.migrate_ovn_db(['/some/path/migrate-ovn-db']) + self.run.assert_called_once_with( + ( + 'neutron-ovn-db-sync-util', + '--config-file', '/etc/neutron/neutron-ovn-db-sync.conf', + '--config-file', '/etc/neutron/plugins/ml2/ml2_conf.ini', + '--ovn-neutron_sync_mode', 'repair', + ), + capture_output=True, + universal_newlines=True, + ) + self.builtin_print.assert_has_calls([ + mock.call('migrate-ovn-db: OUTPUT FROM SYNC ON STDOUT:\n' + 'fake-output-on-stdout', + file=mock.ANY), + mock.call('migrate-ovn-db: OUTPUT FROM SYNC ON STDERR:\n' + 'fake-output-on-stderr', + file=mock.ANY), + ]) + # check that errors are detected + fcp.returncode = 1 + actions.migrate_ovn_db(['/some/path/migrate-ovn-db']) + self.action_fail.assert_called_once() + fcp.returncode = 0 + self.action_fail.reset_mock() + fcp.stderr = 'ERROR' + actions.migrate_ovn_db(['/some/path/migrate-ovn-db']) + self.action_fail.assert_called_once() + + def test_get_neutron_db_connection_string(self): + self.patch_object(actions.cfg, 'ConfigParser') + parser = mock.MagicMock() + self.maxDiff = None + + def _fakeparser(x, y): + y.update( + { + 'database': { + 'connection': ['fake-connection'], + }, + }) + return parser + + self.ConfigParser.side_effect = _fakeparser + self.assertEquals( + actions.get_neutron_db_connection_string(), 'fake-connection') + + def test_offline_neutron_morph_db(self): + self.patch_object(actions.ch_core.hookenv, 'action_get') + self.action_get.return_value = False + self.patch_object(actions.subprocess, 'run') + self.patch_object(actions.ch_core.hookenv, 'charm_dir') + self.charm_dir.return_value = '/path/to/charm' + self.patch_object(actions, 'get_neutron_db_connection_string') + self.get_neutron_db_connection_string.return_value = 'fake-connection' + + fcp = FakeCalledProcess() + self.run.return_value = fcp + self.patch('builtins.print', name='builtin_print') + self.patch_object(actions.ch_core.hookenv, 'action_fail') + + actions.offline_neutron_morph_db( + ['/some/path/offline-neutron-morph-db']) + self.run.assert_called_once_with( + ( + os.path.join( + '/path/to/charm/', + 'files/scripts/neutron_offline_network_type_update.py'), + 'fake-connection', + 'dry', + ), + capture_output=True, + universal_newlines=True, + env={'PATH': '/usr/bin'}, + ) + self.builtin_print.assert_has_calls([ + mock.call('offline-neutron-morph-db: OUTPUT FROM DRY-RUN ON ' + 'STDOUT:\nfake-output-on-stdout', + file=mock.ANY), + mock.call('offline-neutron-morph-db: OUTPUT FROM DRY-RUN ON ' + 'STDERR:\nfake-output-on-stderr', + file=mock.ANY), + ]) + self.run.reset_mock() + self.action_get.return_value = True + actions.offline_neutron_morph_db( + ['/some/path/offline-neutron-morph-db']) + self.run.assert_called_once_with( + ( + os.path.join( + '/path/to/charm/', + 'files/scripts/neutron_offline_network_type_update.py'), + 'fake-connection', + 'morph', + ), + capture_output=True, + universal_newlines=True, + env={'PATH': '/usr/bin'}, + ) + self.builtin_print.assert_has_calls([ + mock.call('offline-neutron-morph-db: OUTPUT FROM MORPH ON ' + 'STDOUT:\nfake-output-on-stdout', + file=mock.ANY), + mock.call('offline-neutron-morph-db: OUTPUT FROM MORPH ON ' + 'STDERR:\nfake-output-on-stderr', + file=mock.ANY), + ]) + # check that errors are detected + fcp.returncode = 1 + actions.offline_neutron_morph_db( + ['/some/path/offline-neutron-morph-db']) + self.action_fail.assert_called_once()