Browse Source

Merge "Introduce tripleo_cephadm role"

changes/69/776969/1
Zuul 2 months ago
committed by Gerrit Code Review
parent
commit
ca3063c48b
27 changed files with 2724 additions and 11 deletions
  1. +1
    -0
      .pre-commit-config.yaml
  2. +117
    -0
      doc/source/roles/role-tripleo_cephadm.rst
  3. +748
    -0
      tripleo_ansible/ansible_plugins/modules/ceph_key.py
  4. +766
    -0
      tripleo_ansible/ansible_plugins/modules/ceph_pool.py
  5. +36
    -0
      tripleo_ansible/roles/tripleo_cephadm/defaults/main.yml
  6. +0
    -0
      tripleo_ansible/roles/tripleo_cephadm/files/.gitkeep
  7. +18
    -0
      tripleo_ansible/roles/tripleo_cephadm/files/ceph_spec.yaml
  8. +42
    -0
      tripleo_ansible/roles/tripleo_cephadm/meta/main.yml
  9. +37
    -0
      tripleo_ansible/roles/tripleo_cephadm/molecule/default/Dockerfile
  10. +50
    -0
      tripleo_ansible/roles/tripleo_cephadm/molecule/default/converge.yml
  11. +109
    -0
      tripleo_ansible/roles/tripleo_cephadm/molecule/default/mock_ceph_keys.yml
  12. +92
    -0
      tripleo_ansible/roles/tripleo_cephadm/molecule/default/mock_ceph_mon_dump.json
  13. +48
    -0
      tripleo_ansible/roles/tripleo_cephadm/molecule/default/molecule.yml
  14. +40
    -0
      tripleo_ansible/roles/tripleo_cephadm/molecule/default/prepare.yml
  15. +90
    -0
      tripleo_ansible/roles/tripleo_cephadm/molecule/default/verify.yml
  16. +55
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/apply_spec.yaml
  17. +83
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/bootstrap.yaml
  18. +27
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/ceph_cli.yaml
  19. +80
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/export.yaml
  20. +38
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/keys.yaml
  21. +2
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/main.yml
  22. +41
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/pools.yaml
  23. +113
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/pre.yaml
  24. +44
    -0
      tripleo_ansible/roles/tripleo_cephadm/tasks/wait_for_expected_num_mons.yaml
  25. +13
    -0
      tripleo_ansible/roles/tripleo_cephadm/templates/ceph.conf.j2
  26. +14
    -0
      tripleo_ansible/roles/tripleo_cephadm/templates/ceph_client.yaml.j2
  27. +20
    -11
      zuul.d/molecule.yaml

+ 1
- 0
.pre-commit-config.yaml View File

@ -13,6 +13,7 @@ repos:
- id: debug-statements
- id: check-yaml
files: .*\.(yaml|yml)$
args: [--allow-multiple-documents]
- repo: https://gitlab.com/pycqa/flake8
rev: 3.8.3
hooks:


+ 117
- 0
doc/source/roles/role-tripleo_cephadm.rst View File

@ -0,0 +1,117 @@
======================
Role - tripleo_cephadm
======================
.. ansibleautoplugin::
:role: tripleo_ansible/roles/tripleo_cephadm
About
~~~~~
An Ansible role for TripleO integration with Ceph clusters deployed with
`cephadm`_ and managed with Ceph `orchestrator`_.
This role is provided as part of the implementation of the `tripleo_ceph_spec`_.
It is an Ansible wrapper to call the Ceph tools `cephadm`_ and `orchestrator`_
and it contains the Ansible module `ceph_key`_ from `ceph-ansible`_.
Assumptions
~~~~~~~~~~~
- This role assumes it has an inventory with a single host, known as the
`bootstrap_host`. An inventory genereated by `tripleo-ansible-inventory`
will have a `mons` group so the first node in this group is a good
candidate for this host.
- The `cephadm`_ binary must be installed on the `bootstrap_host`.
- Though there only needs to be one Ceph node in the inventory `cephadm`_
will configure the other servers with SSH. Thus, the following playbook
should be run before one which uses this role to configure the `ceph-admin`
user on the overcloud with the SSH keys that `cephadm`_ requires.
.. code-block:: bash
ansible-playbook -i $INV \
tripleo-ansible/tripleo_ansible/playbooks/cli-enable-ssh-admin.yaml \
-e @ceph-admin.yml
Where `ceph-admin.yml` contains something like the following:
.. code-block:: YAML
---
tripleo_admin_user: ceph-admin
ssh_servers: "{{ groups['mons'] }}"
distribute_private_key: true
The `ssh_servers` variable should be expanded to contain another other nodes
hosting Ceph, e.g. `osds`.
- A `cephadm-spec`_ file should be provided which references the Ceph services
to be run on the other `ssh_hosts`. The path to this file can be set with
the `ceph_spec` variable.
Usage
~~~~~
Here is an example of a playbook which bootstraps the first Ceph monitor
and then applies a spec file to add other hosts. It then creates RBD pools
for Nova, Cinder, and Glance and a cephx keyring called `openstack` to access
those pools. It then creates a file which can be passed as input to the role
`tripleo_ceph_client` so that an overcloud can be configured to use the deployed
Ceph cluster.
.. code-block:: YAML
- name: Deploy Ceph with cephadm
hosts: mons[0]
vars:
bootstrap_host: "{{ groups['mons'][0] }}"
tripleo_cephadm_spec_on_bootstrap: false
pools:
- vms
- volumes
- images
tasks:
- name: Satisfy Ceph prerequisites
import_role:
role: tripleo_cephadm
tasks_from: pre
- name: Bootstrap Ceph
import_role:
role: tripleo_cephadm
tasks_from: bootstrap
- name: Apply Ceph spec
import_role:
role: tripleo_cephadm
tasks_from: apply_spec
when: not tripleo_cephadm_spec_on_bootstrap
- name: Create Pools
import_role:
role: tripleo_cephadm
tasks_from: pools
- name: Create Keys
import_role:
role: tripleo_cephadm
tasks_from: keys
- name: Export configuration for tripleo_ceph_client
import_role:
role: tripleo_cephadm
tasks_from: export
vars:
cephx_keys:
- client.openstack
.. _tripleo_ceph_spec: https://specs.openstack.org/openstack/tripleo-specs/specs/wallaby/tripleo-ceph.html
.. _cephadm: https://docs.ceph.com/en/latest/cephadm/
.. _orchestrator: https://docs.ceph.com/en/latest/mgr/orchestrator/
.. _ceph_key: https://github.com/ceph/ceph-ansible/blob/master/library/ceph_key.py
.. _ceph-ansible: https://github.com/ceph/ceph-ansible/
.. _cephadm-spec: https://tracker.ceph.com/issues/44205

+ 748
- 0
tripleo_ansible/ansible_plugins/modules/ceph_key.py View File

@ -0,0 +1,748 @@
#!/usr/bin/python3
# Copyright 2018, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Included from: https://github.com/ceph/ceph-ansible/blob/master/library/ceph_key.py
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
import datetime
import json
import yaml
import os
import struct
import time
import base64
import socket
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: ceph_key
author: Sebastien Han <seb@redhat.com>
short_description: Manage Cephx key(s)
version_added: "2.6"
notes: []
description:
- Manage CephX creation, deletion and updates.
It can also list and get information about keyring(s).
requirements:
- None
options:
cluster:
description:
- The ceph cluster name.
required: false
type: str
default: ceph
name:
description:
- name of the CephX key
type: str
required: true
user:
description:
- entity used to perform operation.
It corresponds to the -n option (--name)
type: str
required: false
default: client.admin
user_key:
description:
- the path to the keyring corresponding to the
user being used. It corresponds to the -k
option (--keyring)
type: str
state:
description:
- If 'present' is used, the module creates a keyring
with the associated capabilities.
If 'present' is used and a secret is provided the module
will always add the key. Which means it will update
the keyring if the secret changes, the same goes for
the capabilities.
If 'absent' is used, the module will simply delete the keyring.
If 'list' is used, the module will list all the keys and will
return a json output.
If 'info' is used, the module will return in a json format the
description of a given keyring.
If 'generate_secret' is used, the module will simply output a cephx keyring.
required: false
type: str
choices: ['present', 'update', 'absent', 'list', 'info', 'fetch_initial_keys', 'generate_secret']
default: 'present'
caps:
description:
- CephX key capabilities
type: dict
required: false
secret:
description:
- keyring's secret value
required: false
type: str
import_key:
description:
- Wether or not to import the created keyring into Ceph.
This can be useful for someone that only wants to generate keyrings
but not add them into Ceph.
required: false
type: bool
default: true
dest:
description:
- Destination to write the keyring, can a file or a directory
required: false
type: str
default: '/etc/ceph/'
fetch_initial_keys:
description:
- Fetch client.admin and bootstrap key.
This is only needed for Nautilus and above.
Writes down to the filesystem the initial keys generated by the monitor.
This command can ONLY run from a monitor node.
required: false
type: str
default: 'false'
output_format:
description:
- The key output format when retrieving the information of an
entity.
required: false
type: str
default: 'json'
"""
EXAMPLES = '''
keys_to_create:
- { name: client.key, key: "AQAin8tUUK84ExAA/QgBtI7gEMWdmnvKBzlXdQ==", \
caps: { mon: "allow rwx", mds: "allow *" } , mode: "0600" }
- { name: client.cle, caps: { mon: "allow r", osd: "allow *" } , mode: "0600" }
caps:
mon: "allow rwx"
mds: "allow *"
- name: create ceph admin key
ceph_key:
name: client.admin
state: present
secret: AQAin8tU2DsKFBAAFIAzVTzkL3+gtAjjpQiomw==
caps:
mon: allow *
osd: allow *
mgr: allow *
mds: allow
mode: 0400
import_key: false
- name: create monitor initial keyring
ceph_key:
name: mon.
state: present
secret: AQAin8tUMICVFBAALRHNrV0Z4MXupRw4v9JQ6Q==
caps:
mon: allow *
dest: "/var/lib/ceph/tmp/"
import_key: false
- name: create cephx key
ceph_key:
name: "{{ keys_to_create }}"
user: client.bootstrap-rgw
user_key: /var/lib/ceph/bootstrap-rgw/ceph.keyring
state: present
caps: "{{ caps }}"
- name: create cephx key but don't import it in Ceph
ceph_key:
name: "{{ keys_to_create }}"
state: present
caps: "{{ caps }}"
import_key: false
- name: delete cephx key
ceph_key:
name: "my_key"
state: absent
- name: info cephx key
ceph_key:
name: "my_key""
state: info
- name: info cephx admin key (plain)
ceph_key:
name: client.admin
output_format: plain
state: info
register: client_admin_key
- name: list cephx keys
ceph_key:
state: list
- name: fetch cephx keys
ceph_key:
state: fetch_initial_keys
'''
RETURN = '''# '''
CEPH_INITIAL_KEYS = ['client.admin',
'client.bootstrap-mds', 'client.bootstrap-mgr',
'client.bootstrap-osd', 'client.bootstrap-rbd',
'client.bootstrap-rbd-mirror', 'client.bootstrap-rgw']
def fatal(message, module):
'''
Report a fatal error and exit
'''
if module:
module.fail_json(msg=message, rc=1)
else:
raise(Exception(message))
def generate_secret():
'''
Generate a CephX secret
'''
key = os.urandom(16)
header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
secret = base64.b64encode(header + key)
return secret
def generate_caps(_type, caps):
'''
Generate CephX capabilities list
'''
caps_cli = []
for k, v in caps.items():
# makes sure someone didn't pass an empty var,
# we don't want to add an empty cap
if len(k) == 0:
continue
if _type == "ceph-authtool":
caps_cli.extend(["--cap"])
caps_cli.extend([k, v])
return caps_cli
def generate_ceph_cmd(cluster, args, user, user_key_path, container_image=None):
'''
Generate 'ceph' command line to execute
'''
if container_image:
binary = 'ceph'
cmd = container_exec(
binary, container_image)
else:
binary = ['ceph']
cmd = binary
base_cmd = [
'-n',
user,
'-k',
user_key_path,
'--cluster',
cluster,
'auth',
]
cmd.extend(base_cmd + args)
return cmd
# Start TripleO change
# Tripleo only needs ca_common module_utils for this module.
# Rather than add to tripleo-ansible's module_utils, insert 6 functions here
# https://github.com/ceph/ceph-ansible/blob/master/module_utils/ca_common.py
def container_exec(binary, container_image):
'''
Build the docker CLI to run a command inside a container
'''
container_binary = os.getenv('CEPH_CONTAINER_BINARY')
command_exec = [container_binary,
'run',
'--rm',
'--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint={}'.format(binary), container_image]
return command_exec
def is_containerized():
'''
Check if we are running on a containerized cluster
'''
if 'CEPH_CONTAINER_IMAGE' in os.environ:
container_image = os.getenv('CEPH_CONTAINER_IMAGE')
else:
container_image = None
return container_image
# End TripleO change
def generate_ceph_authtool_cmd(cluster, name, secret, caps, dest, container_image=None):
'''
Generate 'ceph-authtool' command line to execute
'''
if container_image:
binary = 'ceph-authtool'
cmd = container_exec(
binary, container_image)
else:
binary = ['ceph-authtool']
cmd = binary
base_cmd = [
'--create-keyring',
dest,
'--name',
name,
'--add-key',
secret,
]
cmd.extend(base_cmd)
cmd.extend(generate_caps("ceph-authtool", caps))
return cmd
def create_key(module, result, cluster, user, user_key_path, name, secret, caps,
import_key, dest, container_image=None):
'''
Create a CephX key
'''
cmd_list = []
if not secret:
secret = generate_secret()
if user == 'client.admin':
args = ['import', '-i', dest]
else:
args = ['get-or-create', name]
args.extend(generate_caps(None, caps))
args.extend(['-o', dest])
cmd_list.append(generate_ceph_authtool_cmd(
cluster, name, secret, caps, dest, container_image))
if import_key or user != 'client.admin':
cmd_list.append(generate_ceph_cmd(
cluster, args, user, user_key_path, container_image))
return cmd_list
def delete_key(cluster, user, user_key_path, name, container_image=None):
'''
Delete a CephX key
'''
cmd_list = []
args = [
'del',
name,
]
cmd_list.append(generate_ceph_cmd(
cluster, args, user, user_key_path, container_image))
return cmd_list
def get_key(cluster, user, user_key_path, name, dest, container_image=None):
'''
Get a CephX key (write on the filesystem)
'''
cmd_list = []
args = [
'get',
name,
'-o',
dest,
]
cmd_list.append(generate_ceph_cmd(
cluster, args, user, user_key_path, container_image))
return cmd_list
def info_key(cluster, name, user, user_key_path, output_format,
container_image=None):
'''
Get information about a CephX key
'''
cmd_list = []
args = [
'get',
name,
'-f',
output_format,
]
cmd_list.append(generate_ceph_cmd(
cluster, args, user, user_key_path, container_image))
return cmd_list
def list_keys(cluster, user, user_key_path, container_image=None):
'''
List all CephX keys
'''
cmd_list = []
args = [
'ls',
'-f',
'json',
]
cmd_list.append(generate_ceph_cmd(
cluster, args, user, user_key_path, container_image))
return cmd_list
def exec_commands(module, cmd_list):
'''
Execute command(s)
'''
for cmd in cmd_list:
rc, out, err = module.run_command(cmd)
if rc != 0:
return rc, cmd, out, err
return rc, cmd, out, err
def lookup_ceph_initial_entities(module, out):
'''
Lookup Ceph initial keys entries in the auth map
'''
# convert out to json, ansible returns a string...
try:
out_dict = json.loads(out)
except ValueError as e:
fatal("Could not decode 'ceph auth list' json "
" output: {}".format(e), module)
entities = []
if "auth_dump" in out_dict:
for key in out_dict["auth_dump"]:
for k, v in key.items():
if k == "entity":
if v in CEPH_INITIAL_KEYS:
entities.append(v)
else:
fatal("'auth_dump' key not present in json output:", module)
if len(entities) != len(CEPH_INITIAL_KEYS):
# must be missing in auth_dump, as if it were in CEPH_INITIAL_KEYS
# it'd be in entities from the above test. Report what's missing.
missing = []
for e in CEPH_INITIAL_KEYS:
if e not in entities:
missing.append(e)
fatal("initial keyring does not "
"contain keys: " + ' '.join(missing), module)
return entities
def build_key_path(cluster, entity):
'''
Build key path depending on the key type
'''
if "admin" in entity:
path = "/etc/ceph"
keyring_filename = cluster + "." + entity + ".keyring"
key_path = os.path.join(path, keyring_filename)
elif "bootstrap" in entity:
path = "/var/lib/ceph"
# bootstrap keys show up as 'client.boostrap-osd'
# however the directory is called '/var/lib/ceph/bootstrap-osd'
# so we need to substring 'client.'
entity_split = entity.split('.')[1]
keyring_filename = cluster + ".keyring"
key_path = os.path.join(path, entity_split, keyring_filename)
else:
return None
return key_path
def run_module():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
supports_check_mode=True,
add_file_common_args=True,
)
file_args = module.load_file_common_arguments(module.params)
# Gather module parameters in variables
state = module.params['state']
name = module.params.get('name')
cluster = module.params.get('cluster')
caps = module.params.get('caps')
secret = module.params.get('secret')
import_key = module.params.get('import_key')
dest = module.params.get('dest')
user = module.params.get('user')
user_key = module.params.get('user_key')
output_format = module.params.get('output_format')
changed = False
result = dict(
changed=changed,
stdout='',
stderr='',
rc=0,
start='',
end='',
delta='',
)
if module.check_mode:
module.exit_json(**result)
startd = datetime.datetime.now()
# will return either the image name or None
container_image = is_containerized()
# Test if the key exists, if it does we skip its creation
# We only want to run this check when a key needs to be added
# There is no guarantee that any cluster is running and we don't need one
_secret = secret
_caps = caps
key_exist = 1
if not user_key:
user_key_filename = '{}.{}.keyring'.format(cluster, user)
user_key_dir = '/etc/ceph'
user_key_path = os.path.join(user_key_dir, user_key_filename)
else:
user_key_path = user_key
if (state in ["present", "update"]):
# if dest is not a directory, the user wants to change the file's name
# (e,g: /etc/ceph/ceph.mgr.ceph-mon2.keyring)
if not os.path.isdir(dest):
file_path = dest
else:
if 'bootstrap' in dest:
# Build a different path for bootstrap keys as there are stored
# as /var/lib/ceph/bootstrap-rbd/ceph.keyring
keyring_filename = cluster + '.keyring'
else:
keyring_filename = cluster + "." + name + ".keyring"
file_path = os.path.join(dest, keyring_filename)
file_args['path'] = file_path
if import_key:
_info_key = []
rc, cmd, out, err = exec_commands(
module, info_key(cluster, name, user, user_key_path,
output_format, container_image))
key_exist = rc
if not caps and key_exist != 0:
fatal("Capabilities must be provided when state "
"is 'present'", module)
if key_exist != 0 and secret is None and caps is None:
fatal("Keyring doesn't exist, you must provide "
"'secret' and 'caps'", module)
if key_exist == 0:
_info_key = json.loads(out)
if not secret:
secret = _info_key[0]['key']
_secret = _info_key[0]['key']
if not caps:
caps = _info_key[0]['caps']
_caps = _info_key[0]['caps']
if secret == _secret and caps == _caps:
if not os.path.isfile(file_path):
rc, cmd, out, err = exec_commands(module, get_key(cluster, user, user_key_path,
name, file_path, container_image))
result["rc"] = rc
if rc != 0:
result["stdout"] = "Couldn't fetch the key {0} at " \
"{1}.".format(name, file_path)
module.exit_json(**result)
result["stdout"] = "fetched the key {0} at " \
"{1}.".format(name, file_path)
result["stdout"] = "{0} already exists and doesn't " \
"need to be updated.".format(name)
result["rc"] = 0
module.set_fs_attributes_if_different(file_args, False)
module.exit_json(**result)
else:
if os.path.isfile(file_path) and not secret or not caps:
result["stdout"] = "{0} already exists in {1} you must provide " \
"secret *and* caps when import_key " \
"is {2}".format(name, dest, import_key)
result["rc"] = 0
module.exit_json(**result)
if (key_exist == 0 and (secret != _secret or caps != _caps)) or key_exist != 0: # noqa E501
rc, cmd, out, err = exec_commands(module, create_key(
module, result, cluster, user, user_key_path, name,
secret, caps, import_key, file_path, container_image))
if rc != 0:
result["stdout"] = "Couldn't create or update {0}".format(name)
result["stderr"] = err
module.exit_json(**result)
module.set_fs_attributes_if_different(file_args, False)
changed = True
elif state == "absent":
if key_exist == 0:
rc, cmd, out, err = exec_commands(
module, delete_key(cluster, user, user_key_path, name, container_image))
if rc == 0:
changed = True
else:
rc = 0
elif state == "info":
rc, cmd, out, err = exec_commands(
module, info_key(cluster, name, user, user_key_path,
output_format, container_image))
if rc != 0:
result["stdout"] = "skipped, since {0} does not exist".format(name)
result['rc'] = 0
module.exit_json(**result)
elif state == "list":
rc, cmd, out, err = exec_commands(
module, list_keys(cluster, user, user_key_path, container_image))
elif state == "fetch_initial_keys":
hostname = socket.gethostname().split('.', 1)[0]
user = "mon."
keyring_filename = cluster + "-" + hostname + "/keyring"
user_key_path = os.path.join("/var/lib/ceph/mon/", keyring_filename)
rc, cmd, out, err = exec_commands(
module, list_keys(cluster, user, user_key_path, container_image))
if rc != 0:
result["stdout"] = "failed to retrieve ceph keys"
result["sdterr"] = err
result['rc'] = 0
module.exit_json(**result)
entities = lookup_ceph_initial_entities(module, out)
output_format = "plain"
for entity in entities:
key_path = build_key_path(cluster, entity)
if key_path is None:
fatal("Failed to build key path, no entity yet?", module)
elif os.path.isfile(key_path):
# if the key is already on the filesystem
# there is no need to fetch it again
continue
extra_args = [
'-o',
key_path,
]
info_cmd = info_key(cluster, entity, user,
user_key_path, output_format, container_image)
# we use info_cmd[0] because info_cmd is an array made of an array
info_cmd[0].extend(extra_args)
rc, cmd, out, err = exec_commands(
module, info_cmd)
file_args = module.load_file_common_arguments(module.params)
file_args['path'] = key_path
module.set_fs_attributes_if_different(file_args, False)
elif state == "generate_secret":
out = generate_secret().decode()
cmd = ''
rc = 0
err = ''
changed = True
endd = datetime.datetime.now()
delta = endd - startd
result = dict(
cmd=cmd,
start=str(startd),
end=str(endd),
delta=str(delta),
rc=rc,
stdout=out.rstrip("\r\n"),
stderr=err.rstrip("\r\n"),
changed=changed,
)
if rc != 0:
module.fail_json(msg='non-zero return code', **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()

+ 766
- 0
tripleo_ansible/ansible_plugins/modules/ceph_pool.py View File

@ -0,0 +1,766 @@
#!/usr/bin/python3
# Copyright 2020, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Included from: https://github.com/ceph/ceph-ansible/blob/master/library/ceph_pool.py
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
import datetime
import json
import yaml
import os
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: ceph_pool
author: Guillaume Abrioux <gabrioux@redhat.com>
short_description: Manage Ceph Pools
version_added: "2.8"
description:
- Manage Ceph pool(s) creation, deletion and updates.
options:
cluster:
description:
- The ceph cluster name.
required: false
default: ceph
type: str
name:
description:
- name of the Ceph pool
required: true
type: str
state:
description:
If 'present' is used, the module creates a pool if it doesn't exist
or update it if it already exists.
If 'absent' is used, the module will simply delete the pool.
If 'list' is used, the module will return all details about the
existing pools. (json formatted).
required: false
type: str
choices: ['present', 'absent', 'list']
default: present
size:
description:
- set the replica size of the pool.
required: false
type: str
min_size:
description:
- set the min_size parameter of the pool.
required: false
type: str
pg_num:
description:
- set the pg_num of the pool.
required: false
type: str
pgp_num:
description:
- set the pgp_num of the pool.
required: false
type: str
pg_autoscale_mode:
description:
- set the pg autoscaler on the pool.
required: false
default: 'on'
type: str
target_size_ratio:
description:
- set the target_size_ratio on the pool
required: false
type: str
pool_type:
description:
- set the pool type, either 'replicated' or 'erasure'
required: false
default: 'replicated'
type: str
erasure_profile:
description:
- When pool_type = 'erasure', set the erasure profile of the pool
required: false
default: 'default'
type: str
rule_name:
description:
- Set the crush rule name assigned to the pool
required: false
default: 'replicated_rule'
type: str
expected_num_objects:
description:
- Set the expected_num_objects parameter of the pool.
required: false
default: '0'
application:
description:
- Set the pool application on the pool.
required: false
type: str
"""
EXAMPLES = '''
pools:
- { name: foo, size: 3, application: rbd, pool_type: 'replicated',
pg_autoscale_mode: 'on' }
- hosts: all
become: true
tasks:
- name: create a pool
ceph_pool:
name: "{{ item.name }}"
state: present
size: "{{ item.size }}"
application: "{{ item.application }}"
pool_type: "{{ item.pool_type }}"
pg_autoscale_mode: "{{ item.pg_autoscale_mode }}"
with_items: "{{ pools }}"
'''
RETURN = '''# '''
# Start TripleO change
# Tripleo only needs ca_common module_utils for this module.
# Rather than add to tripleo-ansible's module_utils, insert 6 functions here
# https://github.com/ceph/ceph-ansible/blob/master/module_utils/ca_common.py
def generate_ceph_cmd(sub_cmd, args, user_key=None,
cluster='ceph', user='client.admin',
container_image=None, interactive=False):
'''
Generate 'ceph' command line to execute
'''
if not user_key:
user_key = '/etc/ceph/{}.{}.keyring'.format(cluster, user)
cmd = pre_generate_ceph_cmd(container_image=container_image, interactive=interactive)
base_cmd = [
'-n',
user,
'-k',
user_key,
'--cluster',
cluster
]
base_cmd.extend(sub_cmd)
cmd.extend(base_cmd + args)
return cmd
def container_exec(binary, container_image, interactive=False):
'''
Build the docker CLI to run a command inside a container
'''
container_binary = os.getenv('CEPH_CONTAINER_BINARY')
command_exec = [container_binary, 'run']
if interactive:
command_exec.extend(['--interactive'])
command_exec.extend(['--rm',
'--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint={}'.format(binary), container_image])
return command_exec
def is_containerized():
'''
Check if we are running on a containerized cluster
'''
if 'CEPH_CONTAINER_IMAGE' in os.environ:
container_image = os.getenv('CEPH_CONTAINER_IMAGE')
else:
container_image = None
return container_image
def pre_generate_ceph_cmd(container_image=None, interactive=False):
'''
Generate ceph prefix comaand
'''
if container_image:
cmd = container_exec('ceph', container_image, interactive=interactive)
else:
cmd = ['ceph']
return cmd
def exec_command(module, cmd, stdin=None):
'''
Execute command(s)
'''
binary_data = False
if stdin:
binary_data = True
rc, out, err = module.run_command(cmd, data=stdin, binary_data=binary_data)
return rc, cmd, out, err
def exit_module(module, out, rc, cmd, err, startd, changed=False):
endd = datetime.datetime.now()
delta = endd - startd
result = dict(
cmd=cmd,
start=str(startd),
end=str(endd),
delta=str(delta),
rc=rc,
stdout=out.rstrip("\r\n"),
stderr=err.rstrip("\r\n"),
changed=changed,
)
module.exit_json(**result)
# End TripleO change
def check_pool_exist(cluster,
name,
user,
user_key,
output_format='json',
container_image=None):
'''
Check if a given pool exists
'''
args = ['stats', name, '-f', output_format]
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
return cmd
def generate_get_config_cmd(param,
cluster,
user,
user_key,
container_image=None):
_cmd = pre_generate_ceph_cmd(container_image=container_image)
args = [
'-n',
user,
'-k',
user_key,
'--cluster',
cluster,
'config',
'get',
'mon.*',
param
]
cmd = _cmd + args
return cmd
def get_application_pool(cluster,
name,
user,
user_key,
output_format='json',
container_image=None):
'''
Get application type enabled on a given pool
'''
args = ['application', 'get', name, '-f', output_format]
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
return cmd
def enable_application_pool(cluster,
name,
application,
user,
user_key,
container_image=None):
'''
Enable application on a given pool
'''
args = ['application', 'enable', name, application]
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
return cmd
def disable_application_pool(cluster,
name,
application,
user,
user_key,
container_image=None):
'''
Disable application on a given pool
'''
args = ['application', 'disable', name,
application, '--yes-i-really-mean-it']
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
return cmd
def get_pool_details(module,
cluster,
name,
user,
user_key,
output_format='json',
container_image=None):
'''
Get details about a given pool
'''
args = ['ls', 'detail', '-f', output_format]
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
rc, cmd, out, err = exec_command(module, cmd)
if rc == 0:
out = [p for p in json.loads(out.strip()) if p['pool_name'] == name][0]
_rc, _cmd, application_pool, _err = exec_command(module,
get_application_pool(cluster, # noqa: E501
name, # noqa: E501
user, # noqa: E501
user_key, # noqa: E501
container_image=container_image)) # noqa: E501
# This is a trick because "target_size_ratio" isn't present at the same level in the dict
# ie:
# {
# 'pg_num': 8,
# 'pgp_num': 8,
# 'pg_autoscale_mode': 'on',
# 'options': {
# 'target_size_ratio': 0.1
# }
# }
# If 'target_size_ratio' is present in 'options', we set it, this way we end up
# with a dict containing all needed keys at the same level.
if 'target_size_ratio' in out['options'].keys():
out['target_size_ratio'] = out['options']['target_size_ratio']
else:
out['target_size_ratio'] = None
application = list(json.loads(application_pool.strip()).keys())
if len(application) == 0:
out['application'] = ''
else:
out['application'] = application[0]
return rc, cmd, out, err
def compare_pool_config(user_pool_config, running_pool_details):
'''
Compare user input config pool details with current running pool details
'''
delta = {}
filter_keys = ['pg_num', 'pg_placement_num', 'size',
'pg_autoscale_mode', 'target_size_ratio']
for key in filter_keys:
if (str(running_pool_details[key]) != user_pool_config[key]['value'] and user_pool_config[key]['value']):
delta[key] = user_pool_config[key]
if (running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value']):
delta['application'] = {}
delta['application']['new_application'] = user_pool_config['application']['value'] # noqa: E501
# to be improved (for update_pools()...)
delta['application']['value'] = delta['application']['new_application']
delta['application']['old_application'] = running_pool_details['application'] # noqa: E501
return delta
def list_pools(cluster,
user,
user_key,
details,
output_format='json',
container_image=None):
'''
List existing pools
'''
args = ['ls']
if details:
args.append('detail')
args.extend(['-f', output_format])
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
return cmd
def create_pool(cluster,
name,
user,
user_key,
user_pool_config,
container_image=None):
'''
Create a new pool
'''
args = ['create', user_pool_config['pool_name']['value'],
user_pool_config['type']['value']]
if user_pool_config['pg_autoscale_mode']['value'] != 'on':
args.extend(['--pg_num',
user_pool_config['pg_num']['value'],
'--pgp_num',
user_pool_config['pgp_num']['value']])
elif user_pool_config['target_size_ratio']['value']:
args.extend(['--target_size_ratio',
user_pool_config['target_size_ratio']['value']])
if user_pool_config['type']['value'] == 'replicated':
args.extend([user_pool_config['crush_rule']['value'],
'--expected_num_objects',
user_pool_config['expected_num_objects']['value'],
'--autoscale-mode',
user_pool_config['pg_autoscale_mode']['value']])
if (user_pool_config['size']['value'] and user_pool_config['type']['value'] == "replicated"):
args.extend(['--size', user_pool_config['size']['value']])
elif user_pool_config['type']['value'] == 'erasure':
args.extend([user_pool_config['erasure_profile']['value']])
if user_pool_config['crush_rule']['value']:
args.extend([user_pool_config['crush_rule']['value']])
args.extend(['--expected_num_objects',
user_pool_config['expected_num_objects']['value'],
'--autoscale-mode',
user_pool_config['pg_autoscale_mode']['value']])
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
return cmd
def remove_pool(cluster, name, user, user_key, container_image=None):
'''
Remove a pool
'''
args = ['rm', name, name, '--yes-i-really-really-mean-it']
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
return cmd
def update_pool(module, cluster, name,
user, user_key, delta, container_image=None):
'''
Update an existing pool
'''
report = ""
for key in delta.keys():
if key != 'application':
args = ['set',
name,
delta[key]['cli_set_opt'],
delta[key]['value']]
cmd = generate_ceph_cmd(sub_cmd=['osd', 'pool'],
args=args,
cluster=cluster,
user=user,
user_key=user_key,
container_image=container_image)
rc, cmd, out, err = exec_command(module, cmd)
if rc != 0:
return rc, cmd, out, err
else:
rc, cmd, out, err = exec_command(module, disable_application_pool(cluster, name, delta['application']['old_application'], user, user_key, container_image=container_image)) # noqa: E501
if rc != 0:
return rc, cmd, out, err
rc, cmd, out, err = exec_command(module, enable_application_pool(cluster, name, delta['application']['new_application'], user, user_key, container_image=container_image)) # noqa: E501
if rc != 0:
return rc, cmd, out, err
report = report + "\n" + "{} has been updated: {} is now {}".format(name, key, delta[key]['value']) # noqa: E501
out = report
return rc, cmd, out, err
def run_module():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
supports_check_mode=True,
)
# Gather module parameters in variables
cluster = module.params.get('cluster')
name = module.params.get('name')
state = module.params.get('state')
details = module.params.get('details')
size = module.params.get('size')
min_size = module.params.get('min_size')
pg_num = module.params.get('pg_num')
pgp_num = module.params.get('pgp_num')
pg_autoscale_mode = module.params.get('pg_autoscale_mode')
target_size_ratio = module.params.get('target_size_ratio')
application = module.params.get('application')
if (module.params.get('pg_autoscale_mode').lower() in
['true', 'on', 'yes']):
pg_autoscale_mode = 'on'
elif (module.params.get('pg_autoscale_mode').lower() in
['false', 'off', 'no']):
pg_autoscale_mode = 'off'
else:
pg_autoscale_mode = 'warn'
if module.params.get('pool_type') == '1':
pool_type = 'replicated'
elif module.params.get('pool_type') == '3':
pool_type = 'erasure'
else:
pool_type = module.params.get('pool_type')
if not module.params.get('rule_name'):
rule_name = 'replicated_rule' if pool_type == 'replicated' else None
else:
rule_name = module.params.get('rule_name')
erasure_profile = module.params.get('erasure_profile')
expected_num_objects = module.params.get('expected_num_objects')
user_pool_config = {
'pool_name': {'value': name},
'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'},
'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'},
'pg_autoscale_mode': {'value': pg_autoscale_mode,
'cli_set_opt': 'pg_autoscale_mode'},
'target_size_ratio': {'value': target_size_ratio,
'cli_set_opt': 'target_size_ratio'},
'application': {'value': application},
'type': {'value': pool_type},
'erasure_profile': {'value': erasure_profile},
'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'},
'expected_num_objects': {'value': expected_num_objects},
'size': {'value': size, 'cli_set_opt': 'size'},
'min_size': {'value': min_size}
}
if module.check_mode:
module.exit_json(
changed=False,
stdout='',
stderr='',
rc=0,
start='',
end='',
delta='',
)
startd = datetime.datetime.now()
changed = False
# will return either the image name or None
container_image = is_containerized()
user = "client.admin"
keyring_filename = cluster + '.' + user + '.keyring'
user_key = os.path.join("/etc/ceph/", keyring_filename)
if state == "present":
rc, cmd, out, err = exec_command(module,
check_pool_exist(cluster,
name,
user,
user_key,
container_image=container_image)) # noqa: E501
if rc == 0:
running_pool_details = get_pool_details(module,
cluster,
name,
user,
user_key,
container_image=container_image) # noqa: E501
user_pool_config['pg_placement_num'] = {'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num'} # noqa: E501
delta = compare_pool_config(user_pool_config,
running_pool_details[2])
if len(delta) > 0:
keys = list(delta.keys())
details = running_pool_details[2]
if details['erasure_code_profile'] and 'size' in keys:
del delta['size']
if details['pg_autoscale_mode'] == 'on':
delta.pop('pg_num', None)
delta.pop('pgp_num', None)
if len(delta) == 0:
out = "Skipping pool {}.\nUpdating either 'size' on an erasure-coded pool " \
"or 'pg_num'/'pgp_num' on a pg autoscaled pool is incompatible".format(name)
else:
rc, cmd, out, err = update_pool(module,
cluster,
name,
user,
user_key,
delta,
container_image=container_image) # noqa: E501
if rc == 0:
changed = True
else:
out = "Pool {} already exists and there is nothing to update.".format(name) # noqa: E501
else:
rc, cmd, out, err = exec_command(module,
create_pool(cluster,
name,
user,
user_key,
user_pool_config=user_pool_config, # noqa: E501
container_image=container_image)) # noqa: E501
if user_pool_config['application']['value']:
rc, _, _, _ = exec_command(module,
enable_application_pool(cluster,
name,
user_pool_config['application']['value'], # noqa: E501
user,
user_key,
container_image=container_image)) # noqa: E501
if user_pool_config['min_size']['value']:
# not implemented yet
pass
changed = True
elif state == "list":
rc, cmd, out, err = exec_command(module,
list_pools(cluster,
name, user,
user_key,
details,
container_image=container_image)) # noqa: E501
if rc != 0:
out = "Couldn't list pool(s) present on the cluster"
elif state == "absent":
rc, cmd, out, err = exec_command(module,
check_pool_exist(cluster,
name, user,
user_key,
container_image=container_image)) # noqa: E501
if rc == 0:
rc, cmd, out, err = exec_command(module,
remove_pool(cluster,
name,
user,
user_key,
container_image=container_image)) # noqa: E501
changed = True
else:
rc = 0
out = "Skipped, since pool {} doesn't exist".format(name)
exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd,
changed=changed)
def main():
run_module()
if __name__ == '__main__':
main()

+ 36
- 0
tripleo_ansible/roles/tripleo_cephadm/defaults/main.yml View File

@ -0,0 +1,36 @@
---
# defaults file for tripleo_cephadm
tripleo_cephadm_spec_on_bootstrap: false # not recommended due to https://tracker.ceph.com/issues/49277
tripleo_cephadm_ssh_user: ceph-admin
tripleo_cephadm_bin: /usr/sbin/cephadm
tripleo_cephadm_cluster: ceph
tripleo_cephadm_config_home: /etc/ceph
tripleo_cephadm_verbose: true
tripleo_cephadm_container_ns: "docker.io/ceph"
tripleo_cephadm_container_image: "ceph"
tripleo_cephadm_container_tag: "v15"
tripleo_cephadm_container_cli: "podman"
tripleo_cephadm_container_options: "--net=host --ipc=host"
tripleo_cephadm_admin_keyring: "{{ tripleo_cephadm_config_home }}/{{ tripleo_cephadm_cluster }}.client.admin.keyring"