From 003c96b9488e7a025b96fa009b2b7ac2bc200694 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Fri, 3 Dec 2021 21:28:12 +0000 Subject: [PATCH] Add the ability to enable/disable cephadm SSH user Add `openstack overcloud ceph user enable | disable` command line option. As requested by those who wish to disable cephadm and the user which supports it after deployment. The SSH user and cephadm may be re-enabled when it is necessary to administer the Ceph cluster. This also allows the cephadm SSH user to be created in a separate step. Add --skip-user-create to 'openstack overcloud ceph deploy' in case 'openstack overcloud ceph user enable' was used to handle that step earlier. Conflicts: tripleoclient/utils.py Depends-On: I648cdf8c8920c120049f05f13f8b7b73513899f1 Change-Id: Ibd4513183f59ebb94d841a847ecfab0425ba8f5a (cherry picked from commit 8ab4cc19b376b1018c22629e30b0a51c9e71af59) --- ...isable_and_re_enable-18f3102031a802d0.yaml | 17 + setup.cfg | 2 + tripleoclient/tests/test_utils.py | 137 ++++++++ .../v2/overcloud_ceph/test_overcloud_ceph.py | 167 ++++++++++ tripleoclient/utils.py | 34 ++ tripleoclient/v2/overcloud_ceph.py | 305 +++++++++++++++++- 6 files changed, 651 insertions(+), 11 deletions(-) create mode 100644 releasenotes/notes/ceph_user_disable_and_re_enable-18f3102031a802d0.yaml diff --git a/releasenotes/notes/ceph_user_disable_and_re_enable-18f3102031a802d0.yaml b/releasenotes/notes/ceph_user_disable_and_re_enable-18f3102031a802d0.yaml new file mode 100644 index 000000000..cb888e033 --- /dev/null +++ b/releasenotes/notes/ceph_user_disable_and_re_enable-18f3102031a802d0.yaml @@ -0,0 +1,17 @@ +--- +features: + - | + Two new commands, "openstack overcloud ceph user enable" and + "openstack overcloud ceph user disable" are added. The "enable" + option will create the cephadm SSH user and distribute their + SSH keys to Ceph nodes in the overcloud. The "disable" option + may be run after "openstack overcloud ceph deploy" has been run + to disable cephadm so that it may not be used to administer the + Ceph cluster and no "ceph orch ..." CLI commands will function. + This will also prevent Ceph node overcloud scale operations though + the Ceph cluster will still be able to read/write data. The "ceph + user disable" option will also remove the public and private SSH + keys of the cephadm SSH user on overclouds which host Ceph. The + "ceph user enable" option may also be used to re-distribute the + public and private SSH keys of the cephadm SSH user and re-enable + the cephadm mgr module. \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index 85fc5d983..88a9e3e98 100644 --- a/setup.cfg +++ b/setup.cfg @@ -43,6 +43,8 @@ openstack.tripleoclient.v2 = overcloud_netenv_validate = tripleoclient.v1.overcloud_netenv_validate:ValidateOvercloudNetenv overcloud_cell_export = tripleoclient.v1.overcloud_cell:ExportCell overcloud_ceph_deploy = tripleoclient.v2.overcloud_ceph:OvercloudCephDeploy + overcloud_ceph_user_disable = tripleoclient.v2.overcloud_ceph:OvercloudCephUserDisable + overcloud_ceph_user_enable = tripleoclient.v2.overcloud_ceph:OvercloudCephUserEnable overcloud_config_download = tripleoclient.v1.overcloud_config:DownloadConfig overcloud_container_image_upload = tripleoclient.v1.container_image:UploadImage overcloud_container_image_build = tripleoclient.v1.container_image:BuildImage diff --git a/tripleoclient/tests/test_utils.py b/tripleoclient/tests/test_utils.py index 9e4344af8..6202e5d7b 100644 --- a/tripleoclient/tests/test_utils.py +++ b/tripleoclient/tests/test_utils.py @@ -2385,3 +2385,140 @@ class TestGetCephNetworks(TestCase): net_name = utils.get_ceph_networks(cfgfile.name, 'storage', 'storage_mgmt') self.assertEqual(expected, net_name) + + +class TestGetHostsFromCephSpec(TestCase): + + specs = [] + specs.append(yaml.safe_load(''' + addr: 192.168.24.13 + hostname: ceph-0 + labels: + - _admin + - mon + - mgr + service_type: host + ''')) + + specs.append(yaml.safe_load(''' + addr: 192.168.24.20 + hostname: ceph-1 + labels: + - _admin + - mon + - mgr + service_type: host + ''')) + + specs.append(yaml.safe_load(''' + addr: 192.168.24.16 + hostname: ceph-2 + labels: + - _admin + - mon + - mgr + service_type: host + ''')) + + specs.append(yaml.safe_load(''' + addr: 192.168.24.14 + hostname: ceph-3 + labels: + - osd + service_type: host + ''')) + + specs.append(yaml.safe_load(''' + addr: 192.168.24.21 + hostname: ceph-4 + labels: + - osd + service_type: host + ''')) + + specs.append(yaml.safe_load(''' + addr: 192.168.24.17 + hostname: ceph-5 + labels: + - osd + service_type: host + ''')) + + specs.append(yaml.safe_load(''' + placement: + hosts: + - ceph-0 + - ceph-1 + - ceph-2 + service_id: mon + service_name: mon + service_type: mon + ''')) + + specs.append(yaml.safe_load(''' + placement: + hosts: + - ceph-0 + - ceph-1 + - ceph-2 + service_id: mgr + service_name: mgr + service_type: mgr + ''')) + + specs.append(yaml.safe_load(''' + data_devices: + all: true + placement: + hosts: + - ceph-3 + - ceph-4 + - ceph-5 + service_id: default_drive_group + service_name: osd.default_drive_group + service_type: osd + ''')) + + def test_get_hosts_from_ceph_spec(self): + expected = {'ceph__admin': ['ceph-0', 'ceph-1', 'ceph-2'], + 'ceph_mon': ['ceph-0', 'ceph-1', 'ceph-2'], + 'ceph_mgr': ['ceph-0', 'ceph-1', 'ceph-2'], + 'ceph_osd': ['ceph-3', 'ceph-4', 'ceph-5'], + 'ceph_non_admin': ['ceph-3', 'ceph-4', 'ceph-5']} + + cfgfile = tempfile.NamedTemporaryFile() + for spec in self.specs: + with open(cfgfile.name, 'a') as f: + f.write('---\n') + f.write(yaml.safe_dump(spec)) + hosts = utils.get_host_groups_from_ceph_spec(cfgfile.name, + prefix='ceph_') + cfgfile.close() + + self.assertEqual(expected, hosts) + + def test_get_addr_from_ceph_spec(self): + expected = {'_admin': ['192.168.24.13', + '192.168.24.20', + '192.168.24.16'], + 'mon': ['192.168.24.13', + '192.168.24.20', + '192.168.24.16'], + 'mgr': ['192.168.24.13', + '192.168.24.20', + '192.168.24.16'], + 'osd': ['192.168.24.14', + '192.168.24.21', + '192.168.24.17']} + + cfgfile = tempfile.NamedTemporaryFile() + for spec in self.specs: + with open(cfgfile.name, 'a') as f: + f.write('---\n') + f.write(yaml.safe_dump(spec)) + hosts = utils.get_host_groups_from_ceph_spec(cfgfile.name, + key='addr', + get_non_admin=False) + cfgfile.close() + + self.assertEqual(expected, hosts) diff --git a/tripleoclient/tests/v2/overcloud_ceph/test_overcloud_ceph.py b/tripleoclient/tests/v2/overcloud_ceph/test_overcloud_ceph.py index 3cedcca33..a581f07eb 100644 --- a/tripleoclient/tests/v2/overcloud_ceph/test_overcloud_ceph.py +++ b/tripleoclient/tests/v2/overcloud_ceph/test_overcloud_ceph.py @@ -43,6 +43,8 @@ class TestOvercloudCephDeploy(fakes.FakePlaybookExecution): mock_get_ceph_networks): arglist = ['deployed-metal.yaml', '--yes', '--stack', 'overcloud', + '--skip-user-create', + '--cephadm-ssh-user', 'jimmy', '--output', 'deployed-ceph.yaml', '--container-namespace', 'quay.io/ceph', '--container-image', 'ceph', @@ -55,11 +57,14 @@ class TestOvercloudCephDeploy(fakes.FakePlaybookExecution): workdir=mock.ANY, playbook_dir=mock.ANY, verbosity=3, + skip_tags='cephadm_ssh_user', + reproduce_command=False, extra_vars={ "baremetal_deployed_path": mock.ANY, "deployed_ceph_tht_path": mock.ANY, "working_dir": mock.ANY, "stack_name": 'overcloud', + 'tripleo_cephadm_ssh_user': 'jimmy', 'tripleo_roles_path': mock.ANY, 'tripleo_cephadm_container_ns': 'quay.io/ceph', 'tripleo_cephadm_container_image': 'ceph', @@ -77,3 +82,165 @@ class TestOvercloudCephDeploy(fakes.FakePlaybookExecution): parsed_args = self.check_parser(self.cmd, arglist, []) self.assertRaises(osc_lib_exc.CommandError, self.cmd.take_action, parsed_args) + + +class TestOvercloudCephUserDisable(fakes.FakePlaybookExecution): + def setUp(self): + super(TestOvercloudCephUserDisable, self).setUp() + + # Get the command object to test + app_args = mock.Mock() + app_args.verbose_level = 1 + self.app.options = fakes.FakeOptions() + self.cmd = overcloud_ceph.OvercloudCephUserDisable(self.app, + app_args) + + @mock.patch('tripleoclient.utils.parse_ansible_inventory', + autospect=True, return_value=['ceph0', 'ceph1', 'compute0']) + @mock.patch('tripleoclient.utils.get_host_groups_from_ceph_spec', + autospect=True, return_value={'_admin': ['ceph0'], + 'non_admin': ['ceph1']}) + @mock.patch('tripleoclient.utils.TempDirs', autospect=True) + @mock.patch('os.path.abspath', autospect=True) + @mock.patch('os.path.exists', autospect=True) + @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) + def test_ceph_user_disable(self, mock_playbook, mock_abspath, + mock_path_exists, mock_tempdirs, + mock_get_host_groups_from_ceph_spec, + mock_parse_ansible_inventory): + arglist = ['ceph_spec.yaml', '--yes', + '--cephadm-ssh-user', 'ceph-admin', + '--stack', 'overcloud', + '--fsid', '7bdfa1a6-d606-562c-bbf7-05f17c35763e'] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.cmd.take_action(parsed_args) + mock_playbook.assert_any_call( + playbook='disable_cephadm.yml', + inventory=mock.ANY, + limit_hosts=mock.ANY, + workdir=mock.ANY, + playbook_dir=mock.ANY, + verbosity=3, + reproduce_command=False, + extra_vars={ + "tripleo_cephadm_fsid": '7bdfa1a6-d606-562c-bbf7-05f17c35763e', + "tripleo_cephadm_action": 'disable' + } + ) + mock_playbook.assert_any_call( + playbook='ceph-admin-user-disable.yml', + inventory=mock.ANY, + limit_hosts='ceph0,ceph1', + workdir=mock.ANY, + playbook_dir=mock.ANY, + verbosity=3, + reproduce_command=False, + extra_vars={ + 'tripleo_cephadm_ssh_user': 'ceph-admin', + } + ) + + @mock.patch('os.path.abspath', autospect=True) + @mock.patch('os.path.exists', autospect=True) + def test_ceph_user_disable_no_yes(self, mock_abspath, + mock_path_exists): + arglist = ['ceph_spec.yaml', + '--cephadm-ssh-user', 'ceph-admin', + '--stack', 'overcloud', + '--fsid', '7bdfa1a6-d606-562c-bbf7-05f17c35763e'] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.assertRaises(osc_lib_exc.CommandError, + self.cmd.take_action, parsed_args) + + @mock.patch('os.path.abspath', autospect=True) + @mock.patch('os.path.exists', autospect=True) + def test_ceph_user_disable_invalid_fsid(self, mock_abspath, + mock_path_exists): + arglist = ['ceph_spec.yaml', + '--cephadm-ssh-user', 'ceph-admin', + '--stack', 'overcloud', + '--fsid', 'invalid_fsid'] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.assertRaises(osc_lib_exc.CommandError, + self.cmd.take_action, parsed_args) + + +class TestOvercloudCephUserEnable(fakes.FakePlaybookExecution): + def setUp(self): + super(TestOvercloudCephUserEnable, self).setUp() + + # Get the command object to test + app_args = mock.Mock() + app_args.verbose_level = 1 + self.app.options = fakes.FakeOptions() + self.cmd = overcloud_ceph.OvercloudCephUserEnable(self.app, + app_args) + + @mock.patch('tripleoclient.utils.parse_ansible_inventory', + autospect=True, return_value=['ceph0', 'ceph1', 'compute0']) + @mock.patch('tripleoclient.utils.get_host_groups_from_ceph_spec', + autospect=True, return_value={'_admin': ['ceph0'], + 'non_admin': ['ceph1']}) + @mock.patch('tripleoclient.utils.TempDirs', autospect=True) + @mock.patch('os.path.abspath', autospect=True) + @mock.patch('os.path.exists', autospect=True) + @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) + def test_ceph_user_enable_no_fsid(self, mock_playbook, mock_abspath, + mock_path_exists, mock_tempdirs, + mock_get_host_groups_from_ceph_spec, + mock_parse_ansible_inventory): + arglist = ['ceph_spec.yaml', + '--cephadm-ssh-user', 'ceph-admin', + '--stack', 'overcloud'] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.cmd.take_action(parsed_args) + # only passes if the call is the most recent one + mock_playbook.assert_called_with( + playbook='ceph-admin-user-playbook.yml', + inventory=mock.ANY, + limit_hosts='ceph1,undercloud', + workdir=mock.ANY, + playbook_dir=mock.ANY, + verbosity=3, + reproduce_command=False, + extra_vars={ + "tripleo_admin_user": 'ceph-admin', + "distribute_private_key": False, + } + ) + + @mock.patch('tripleoclient.utils.parse_ansible_inventory', + autospect=True) + @mock.patch('tripleoclient.utils.get_host_groups_from_ceph_spec', + autospect=True) + @mock.patch('tripleoclient.utils.TempDirs', autospect=True) + @mock.patch('os.path.abspath', autospect=True) + @mock.patch('os.path.exists', autospect=True) + @mock.patch('tripleoclient.utils.run_ansible_playbook', autospec=True) + def test_ceph_user_enable_fsid(self, mock_playbook, mock_abspath, + mock_path_exists, mock_tempdirs, + mock_get_host_groups_from_ceph_spec, + mock_parse_ansible_inventory): + arglist = ['ceph_spec.yaml', + '--cephadm-ssh-user', 'ceph-admin', + '--stack', 'overcloud', + '--fsid', '7bdfa1a6-d606-562c-bbf7-05f17c35763e'] + parsed_args = self.check_parser(self.cmd, arglist, []) + self.cmd.take_action(parsed_args) + # ceph-admin-user-playbook.yml is not called when + # get_host_groups_from_ceph_spec returns empty lists + # that use case is covered in test_ceph_user_enable_no_fsid + mock_playbook.assert_called_with( + playbook='disable_cephadm.yml', + inventory=mock.ANY, + limit_hosts=mock.ANY, + workdir=mock.ANY, + playbook_dir=mock.ANY, + verbosity=3, + reproduce_command=False, + extra_vars={ + "tripleo_cephadm_fsid": '7bdfa1a6-d606-562c-bbf7-05f17c35763e', + "tripleo_cephadm_backend": 'cephadm', + "tripleo_cephadm_action": 'enable' + } + ) diff --git a/tripleoclient/utils.py b/tripleoclient/utils.py index 9ffcbbc79..1e2b83b1b 100644 --- a/tripleoclient/utils.py +++ b/tripleoclient/utils.py @@ -3105,3 +3105,37 @@ def get_ceph_networks(network_data_path, storage_net_map['ms_bind_ipv6'] = True return storage_net_map + + +def get_host_groups_from_ceph_spec(ceph_spec_path, prefix='', + key='hostname', get_non_admin=True): + """Get hosts per group based on labels in ceph_spec_path file + :param ceph_spec_path: the path to a ceph_spec.yaml file + :param (prefix) append a prefix of the group, e.g. 'ceph_' + :param (key) can be set to 'addr' to retrun IP, defaults to 'hostname' + :param (get_non_admin), get hosts without _admin label, defaults to True + :return: dict mapping each label to a hosts list + """ + hosts = {} + if get_non_admin: + non_admin_key = prefix + 'non_admin' + hosts[non_admin_key] = [] + + with open(ceph_spec_path, 'r') as stream: + try: + for spec in yaml.safe_load_all(stream): + if spec.get('service_type', None) == 'host' and \ + 'labels' in spec.keys(): + for label in spec['labels']: + group_key = prefix + label + if group_key not in hosts.keys(): + hosts[group_key] = [] + hosts[group_key].append(spec[key]) + if get_non_admin and \ + '_admin' not in spec['labels']: + hosts[non_admin_key].append(spec[key]) + except yaml.YAMLError as exc: + raise RuntimeError( + "yaml.safe_load_all(%s) returned '%s'" % (ceph_spec_path, exc)) + + return hosts diff --git a/tripleoclient/v2/overcloud_ceph.py b/tripleoclient/v2/overcloud_ceph.py index 5998c1204..7cbcaf28f 100644 --- a/tripleoclient/v2/overcloud_ceph.py +++ b/tripleoclient/v2/overcloud_ceph.py @@ -15,6 +15,7 @@ import logging import os +import uuid from osc_lib import exceptions as oscexc from osc_lib.i18n import _ @@ -25,9 +26,51 @@ from tripleoclient import constants from tripleoclient import utils as oooutils +def arg_parse_common(parser): + """Multiple classes below need these arguments added + """ + parser.add_argument('--cephadm-ssh-user', dest='cephadm_ssh_user', + help=_("Name of the SSH user used by cephadm. " + "Warning: if this option is used, it " + "must be used consistently for every " + "'openstack overcloud ceph' call. " + "Defaults to 'ceph-admin'. " + "(default=Env: CEPHADM_SSH_USER)"), + default=utils.env("CEPHADM_SSH_USER", + default="ceph-admin")) + + parser.add_argument('--stack', dest='stack', + help=_('Name or ID of heat stack ' + '(default=Env: OVERCLOUD_STACK_NAME)'), + default=utils.env('OVERCLOUD_STACK_NAME', + default='overcloud')) + parser.add_argument( + '--working-dir', action='store', + help=_('The working directory for the deployment where all ' + 'input, output, and generated files will be stored.\n' + 'Defaults to "$HOME/overcloud-deploy/"')) + + return parser + + +def ceph_hosts_in_inventory(ceph_hosts, ceph_spec, inventory): + """Raise command error if any ceph_hosts are not in the inventory + """ + all_host_objs = oooutils.parse_ansible_inventory(inventory, 'all') + all_hosts = list(map(lambda x: str(x), all_host_objs)) + for ceph_host in ceph_hosts['_admin'] + ceph_hosts['non_admin']: + if ceph_host not in all_hosts: + raise oscexc.CommandError( + "Ceph host '%s' from Ceph spec '%s' was " + "not found in Ansible inventory '%s' so " + "unable to modify that host via Ansible." + % (ceph_host, ceph_spec, inventory)) + + class OvercloudCephDeploy(command.Command): log = logging.getLogger(__name__ + ".OvercloudCephDeploy") + auth_required = False def get_parser(self, prog_name): parser = super(OvercloudCephDeploy, self).get_parser(prog_name) @@ -46,16 +89,13 @@ class OvercloudCephDeploy(command.Command): help=_('Skip yes/no prompt before overwriting an ' 'existing output file ' '(assume yes).')) - parser.add_argument('--stack', dest='stack', - help=_('Name or ID of heat stack ' - '(default=Env: OVERCLOUD_STACK_NAME)'), - default=utils.env('OVERCLOUD_STACK_NAME', - default='overcloud')) - parser.add_argument( - '--working-dir', action='store', - help=_('The working directory for the deployment where all ' - 'input, output, and generated files will be stored.\n' - 'Defaults to "$HOME/overcloud-deploy/"')) + parser.add_argument('--skip-user-create', default=False, + action='store_true', + help=_("Do not create the cephadm SSH user. " + "This user is necessary to deploy but " + "may be created in a separate step via " + "'openstack overcloud ceph user enable'.")) + parser = arg_parse_common(parser) parser.add_argument('--roles-data', help=_( "Path to an alternative roles_data.yaml. " @@ -230,7 +270,6 @@ class OvercloudCephDeploy(command.Command): "working_dir": working_dir, "stack_name": parsed_args.stack, } - # optional paths to pass to playbook if parsed_args.roles_data: if not os.path.exists(parsed_args.roles_data): @@ -339,6 +378,15 @@ class OvercloudCephDeploy(command.Command): extra_vars['tripleo_cephadm_registry_username'] = \ parsed_args.registry_username + if parsed_args.skip_user_create: + skip_tags = 'cephadm_ssh_user' + else: + skip_tags = '' + + if parsed_args.cephadm_ssh_user: + extra_vars["tripleo_cephadm_ssh_user"] = \ + parsed_args.cephadm_ssh_user + # call the playbook with oooutils.TempDirs() as tmp: oooutils.run_ansible_playbook( @@ -348,4 +396,239 @@ class OvercloudCephDeploy(command.Command): playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, verbosity=oooutils.playbook_verbosity(self=self), extra_vars=extra_vars, + reproduce_command=False, + skip_tags=skip_tags, ) + + +class OvercloudCephUserDisable(command.Command): + + log = logging.getLogger(__name__ + ".OvercloudCephUserDisable") + auth_required = False + + def get_parser(self, prog_name): + parser = super(OvercloudCephUserDisable, self).get_parser(prog_name) + parser.add_argument('ceph_spec', + metavar='', + help=_( + "Path to an existing Ceph spec file " + "which describes the Ceph cluster " + "where the cephadm SSH user will have " + "their public and private keys removed " + "and cephadm will be disabled. " + "Spec file is necessary to determine " + "which nodes to modify. " + "WARNING: Ceph cluster administration or " + "modification will no longer function.")) + parser.add_argument('-y', '--yes', default=False, action='store_true', + help=_('Skip yes/no prompt before disabling ' + 'cephadm and its SSH user. ' + '(assume yes).')) + parser = arg_parse_common(parser) + required = parser.add_argument_group('required named arguments') + required.add_argument('--fsid', + metavar='', required=True, + help=_("The FSID of the Ceph cluster to be " + "disabled. Required for disable option.")) + + return parser + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)" % parsed_args) + + ceph_spec = os.path.abspath(parsed_args.ceph_spec) + + if not os.path.exists(ceph_spec): + raise oscexc.CommandError( + "Ceph spec file does not exist:" + " %s" % parsed_args.ceph_spec) + + overwrite = parsed_args.yes + if (not overwrite + and not oooutils.prompt_user_for_confirmation( + 'Are you sure you want to disable Ceph ' + 'cluster management [y/N]?', + self.log)): + raise oscexc.CommandError("Will not disable cephadm and delete " + "the cephadm SSH user :" + " %s. See the --yes parameter to " + "override this behavior. " % + parsed_args.cephadm_ssh_user) + else: + overwrite = True + + # use stack and working_dir to find inventory + if not parsed_args.working_dir: + working_dir = oooutils.get_default_working_dir( + parsed_args.stack) + else: + working_dir = os.path.abspath(parsed_args.working_dir) + oooutils.makedirs(working_dir) + + inventory = os.path.join(working_dir, + constants.TRIPLEO_STATIC_INVENTORY) + if not os.path.exists(inventory): + raise oscexc.CommandError( + "Inventory file not found in working directory: " + "%s. It should have been created by " + "'openstack overcloud node provision'." + % inventory) + ceph_hosts = oooutils.get_host_groups_from_ceph_spec(ceph_spec) + ceph_hosts_in_inventory(ceph_hosts, ceph_spec, inventory) + + if parsed_args.fsid: + try: + uuid.UUID(parsed_args.fsid) + except ValueError: + raise oscexc.CommandError( + "--fsid %s is not a valid UUID." + % parsed_args.fsid) + + if parsed_args.fsid: # if no FSID, then no ceph cluster to disable + # call the playbook to toggle cephadm w/ disable + # if tripleo_cephadm_backend isn't set it defaults to '' + extra_vars = { + "tripleo_cephadm_fsid": parsed_args.fsid, + "tripleo_cephadm_action": 'disable', + } + with oooutils.TempDirs() as tmp: + oooutils.run_ansible_playbook( + playbook='disable_cephadm.yml', + inventory=inventory, + workdir=tmp, + playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, + verbosity=oooutils.playbook_verbosity(self=self), + extra_vars=extra_vars, + limit_hosts=ceph_hosts['_admin'][0], + reproduce_command=False, + ) + + # call the playbook to remove ssh_user_keys + extra_vars = { + "tripleo_cephadm_ssh_user": parsed_args.cephadm_ssh_user + } + if len(ceph_hosts['_admin']) > 0 or len(ceph_hosts['non_admin']) > 0: + with oooutils.TempDirs() as tmp: + oooutils.run_ansible_playbook( + playbook='ceph-admin-user-disable.yml', + inventory=inventory, + workdir=tmp, + playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, + verbosity=oooutils.playbook_verbosity(self=self), + extra_vars=extra_vars, + limit_hosts=",".join(ceph_hosts['_admin'] + + ceph_hosts['non_admin']), + reproduce_command=False, + ) + + +class OvercloudCephUserEnable(command.Command): + + log = logging.getLogger(__name__ + ".OvercloudCephUserEnable") + auth_required = False + + def get_parser(self, prog_name): + parser = super(OvercloudCephUserEnable, self).get_parser(prog_name) + parser.add_argument('ceph_spec', + metavar='', + help=_( + "Path to an existing Ceph spec file " + "which describes the Ceph cluster " + "where the cephadm SSH user will be " + "created (if necessary) and have their " + "public and private keys installed. " + "Spec file is necessary to determine " + "which nodes to modify and if " + "a public or private key is required.")) + parser.add_argument('--fsid', + metavar='', required=False, + help=_("The FSID of the Ceph cluster to be " + "(re-)enabled. If the user disable " + "option has been used, the FSID may " + "be passed to the user enable option " + "so that cephadm will be re-enabled " + "for the Ceph cluster idenified " + "by the FSID.")) + parser = arg_parse_common(parser) + + return parser + + def take_action(self, parsed_args): + self.log.debug("take_action(%s)" % parsed_args) + + if parsed_args.fsid: + try: + uuid.UUID(parsed_args.fsid) + except ValueError: + raise oscexc.CommandError( + "--fsid %s is not a valid UUID." + % parsed_args.fsid) + + ceph_spec = os.path.abspath(parsed_args.ceph_spec) + + if not os.path.exists(ceph_spec): + raise oscexc.CommandError( + "Ceph spec file does not exist:" + " %s" % parsed_args.ceph_spec) + + # use stack and working_dir to find inventory + if not parsed_args.working_dir: + working_dir = oooutils.get_default_working_dir( + parsed_args.stack) + else: + working_dir = os.path.abspath(parsed_args.working_dir) + oooutils.makedirs(working_dir) + + inventory = os.path.join(working_dir, + constants.TRIPLEO_STATIC_INVENTORY) + if not os.path.exists(inventory): + raise oscexc.CommandError( + "Inventory file not found in working directory: " + "%s. It should have been created by " + "'openstack overcloud node provision'." + % inventory) + + # get ceph hosts from spec and make sure they're in the inventory + ceph_hosts = oooutils.get_host_groups_from_ceph_spec(ceph_spec) + ceph_hosts_in_inventory(ceph_hosts, ceph_spec, inventory) + + extra_vars = { + "tripleo_admin_user": parsed_args.cephadm_ssh_user, + "distribute_private_key": True + } + for limit_list in [ceph_hosts['_admin'], ceph_hosts['non_admin']]: + if len(limit_list) > 0: + # need to include the undercloud where the keys are generated + limit_list.append('undercloud') + with oooutils.TempDirs() as tmp: + oooutils.run_ansible_playbook( + playbook='ceph-admin-user-playbook.yml', + inventory=inventory, + workdir=tmp, + playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, + verbosity=oooutils.playbook_verbosity(self=self), + extra_vars=extra_vars, + limit_hosts=",".join(limit_list), + reproduce_command=False, + ) + # _admin hosts are done now so don't distribute private key + extra_vars["distribute_private_key"] = False + + if parsed_args.fsid: # if no FSID, then no ceph cluster to disable + # Call the playbook to toggle cephadm w/ enable + extra_vars = { + "tripleo_cephadm_fsid": parsed_args.fsid, + "tripleo_cephadm_backend": 'cephadm', + "tripleo_cephadm_action": 'enable' + } + with oooutils.TempDirs() as tmp: + oooutils.run_ansible_playbook( + playbook='disable_cephadm.yml', + inventory=inventory, + workdir=tmp, + playbook_dir=constants.ANSIBLE_TRIPLEO_PLAYBOOKS, + verbosity=oooutils.playbook_verbosity(self=self), + extra_vars=extra_vars, + limit_hosts=ceph_hosts['_admin'][0], + reproduce_command=False, + )