Ceph Ansible support
Add the initial playbooks for making ceph and ansible play nice together. This does not include all of the openstack changes to make things like nova, glance, and cinder work. This will simply build the ceph cluster and thats it. The next patchset will do the OpenStack integration. DocImpact Change-Id: Ie1697dde5f92e833652933a80f0004f31b641330 Partially-Implements: blueprint ceph-container
This commit is contained in:
parent
a83d84b383
commit
c86e66c9dc
@ -140,6 +140,7 @@ enable_nova: "yes"
|
||||
enable_rabbitmq: "yes"
|
||||
|
||||
# Additional optional OpenStack services are specified here
|
||||
enable_ceph: "no"
|
||||
enable_cinder: "no"
|
||||
enable_heat: "yes"
|
||||
enable_horizon: "yes"
|
||||
|
@ -54,6 +54,12 @@ control
|
||||
[murano:children]
|
||||
control
|
||||
|
||||
[ceph-mon:children]
|
||||
control
|
||||
|
||||
[ceph-osd:children]
|
||||
storage
|
||||
|
||||
|
||||
# Additional control implemented here. These groups allow you to control which
|
||||
# services run on which hosts at a per-service level.
|
||||
|
@ -62,6 +62,12 @@ control
|
||||
[murano:children]
|
||||
control
|
||||
|
||||
[ceph-mon:children]
|
||||
control
|
||||
|
||||
[ceph-osd:children]
|
||||
storage
|
||||
|
||||
|
||||
# Additional control implemented here. These groups allow you to control which
|
||||
# services run on which hosts at a per-service level.
|
||||
|
189
ansible/library/bslurp.py
Normal file
189
ansible/library/bslurp.py
Normal file
@ -0,0 +1,189 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This module has been relicensed from the source below:
|
||||
# https://github.com/SamYaple/yaodu/blob/master/ansible/library/bslurp
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: bslurp
|
||||
short_description: Slurps a file from a remote node
|
||||
description:
|
||||
- Used for fetching a binary blob containing the file, then push that file
|
||||
to other hosts.
|
||||
options:
|
||||
src:
|
||||
description:
|
||||
- File to fetch. When dest is used, src is expected to be a str with data
|
||||
required: True
|
||||
type: str
|
||||
compress:
|
||||
description:
|
||||
- Compress file with zlib
|
||||
default: True
|
||||
type: bool
|
||||
dest:
|
||||
description:
|
||||
- Where to write out binary blob
|
||||
required: False
|
||||
type: str
|
||||
mode:
|
||||
description:
|
||||
- Destination file permissions
|
||||
default: '0644'
|
||||
type: str
|
||||
sha1:
|
||||
description:
|
||||
- sha1 hash of the underlying data
|
||||
default: None
|
||||
type: bool
|
||||
author: Sam Yaple
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
Distribute a file from single to many host:
|
||||
|
||||
- hosts: web_servers
|
||||
tasks:
|
||||
- name: Pull in web config
|
||||
bslurp: src="/path/to/file"
|
||||
register: file_data
|
||||
run_once: True
|
||||
- name: Push if changed
|
||||
bslurp:
|
||||
src: "{{ file_data.content }}"
|
||||
dest: "{{ file_data.source }}"
|
||||
mode: "{{ file_data.mode }}"
|
||||
sha1: "{{ file_data.sha1 }}"
|
||||
|
||||
Distribute multiple files from single to many host:
|
||||
|
||||
- hosts: web_servers
|
||||
tasks:
|
||||
- name: Pull in web config
|
||||
bslurp: src="{{ item }}"
|
||||
with_items:
|
||||
- "/path/to/file1"
|
||||
- "/path/to/file2"
|
||||
- "/path/to/file3"
|
||||
register: file_data
|
||||
run_once: True
|
||||
- name: Push if changed
|
||||
bslurp:
|
||||
src: "{{ item.content }}"
|
||||
dest: "{{ item.source }}"
|
||||
mode: "{{ item.mode }}"
|
||||
sha1: "{{ item.sha1 }}"
|
||||
with_items: file_data.results
|
||||
|
||||
Distribute a to file many host without compression; Change permissions on dest:
|
||||
|
||||
- hosts: web_servers
|
||||
tasks:
|
||||
- name: Pull in web config
|
||||
bslurp: src="/path/to/file"
|
||||
register: file_data
|
||||
run_once: True
|
||||
- name: Push if changed
|
||||
bslurp:
|
||||
src: "{{ file_data.content }}"
|
||||
dest: "/new/path/to/file"
|
||||
mode: "0777"
|
||||
compress: False
|
||||
sha1: "{{ file_data.sha1 }}"
|
||||
'''
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import os
|
||||
import sys
|
||||
import zlib
|
||||
|
||||
def copy_from_host(module):
|
||||
compress = module.params.get('compress')
|
||||
src = module.params.get('src')
|
||||
|
||||
if not os.path.exists(src):
|
||||
module.fail_json(msg="file not found: {}".format(src))
|
||||
if not os.access(src, os.R_OK):
|
||||
module.fail_json(msg="file is not readable: {}".format(src))
|
||||
|
||||
mode = oct(os.stat(src).st_mode & 0777)
|
||||
|
||||
with open(src, 'rb') as f:
|
||||
raw_data = f.read()
|
||||
|
||||
sha1 = hashlib.sha1(raw_data).hexdigest()
|
||||
data = zlib.compress(raw_data) if compress else raw_data
|
||||
|
||||
module.exit_json(content=base64.b64encode(data), sha1=sha1, mode=mode,
|
||||
source=src)
|
||||
|
||||
|
||||
def copy_to_host(module):
|
||||
compress = module.params.get('compress')
|
||||
dest = module.params.get('dest')
|
||||
mode = int(module.params.get('mode'), 0)
|
||||
sha1 = module.params.get('sha1')
|
||||
src = module.params.get('src')
|
||||
|
||||
data = base64.b64decode(src)
|
||||
raw_data = zlib.decompress(data) if compress else data
|
||||
|
||||
if sha1:
|
||||
if os.path.exists(dest):
|
||||
if os.access(dest, os.R_OK):
|
||||
with open(dest, 'rb') as f:
|
||||
if hashlib.sha1(f.read()).hexdigest() == sha1:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.exit_json(failed=True, changed=False,
|
||||
msg='file is not accessible: {}'.format(dest))
|
||||
|
||||
if sha1 != hashlib.sha1(raw_data).hexdigest():
|
||||
module.exit_json(failed=True, changed=False,
|
||||
msg='sha1 sum does not match data')
|
||||
|
||||
with os.fdopen(os.open(dest, os.O_WRONLY | os.O_CREAT, mode), 'wb') as f:
|
||||
f.write(raw_data)
|
||||
|
||||
module.exit_json(changed=True)
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
compress = dict(default=True, type='bool'),
|
||||
dest = dict(type='str'),
|
||||
mode = dict(default='0644', type='str'),
|
||||
sha1 = dict(default=None, type='str'),
|
||||
src = dict(required=True, type='str')
|
||||
)
|
||||
)
|
||||
|
||||
dest = module.params.get('dest')
|
||||
|
||||
try:
|
||||
if dest:
|
||||
copy_to_host(module)
|
||||
else:
|
||||
copy_from_host(module)
|
||||
except Exception as e:
|
||||
module.exit_json(failed=True, changed=True, msg=repr(e))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
86
ansible/library/find_disks.py
Normal file
86
ansible/library/find_disks.py
Normal file
@ -0,0 +1,86 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This module has been relicensed from the source below:
|
||||
# https://github.com/SamYaple/yaodu/blob/master/ansible/library/ceph_osd_list
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: find_disks
|
||||
short_description: Return list of devices containing a specfied label
|
||||
description:
|
||||
- This will return a list of all devices with a GPT partition label with
|
||||
the name specified.
|
||||
options:
|
||||
partition_name:
|
||||
description:
|
||||
- Partition name
|
||||
required: True
|
||||
type: bool
|
||||
author: Sam Yaple
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- hosts: ceph-osd
|
||||
tasks:
|
||||
- name: Return all valid formated devices with the name KOLLA_CEPH_OSD
|
||||
ceph_osd_list:
|
||||
partition_name: 'KOLLA_CEPH_OSD'
|
||||
register: osds
|
||||
'''
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
partition_name = dict(required=True, type='str')
|
||||
)
|
||||
)
|
||||
|
||||
partition_name = module.params.get('partition_name')
|
||||
|
||||
try:
|
||||
# This should all really be done differently. Unfortunately there is no
|
||||
# decent python library for dealing with disks like we need to here.
|
||||
disks = subprocess.check_output("parted -l", shell=True).split('\n')
|
||||
ret = list()
|
||||
|
||||
for line in disks:
|
||||
d = line.split(' ')
|
||||
if d[0] == 'Disk':
|
||||
dev = d[1][:-1]
|
||||
|
||||
if line.find(partition_name) != -1:
|
||||
# This process returns an error code when no results return
|
||||
# We can ignore that, it is safe
|
||||
p = subprocess.Popen("blkid " + dev + "*", shell=True, stdout=subprocess.PIPE)
|
||||
fs_uuid = p.communicate()[0]
|
||||
# The dev doesn't need to have a uuid, will be '' otherwise
|
||||
if fs_uuid:
|
||||
fs_uuid = fs_uuid.split('"')[1]
|
||||
ret.append({'device': dev, 'fs_uuid': fs_uuid})
|
||||
|
||||
module.exit_json(disks=ret)
|
||||
except Exception as e:
|
||||
module.exit_json(failed=True, msg=repr(e))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
24
ansible/roles/ceph/defaults/main.yml
Normal file
24
ansible/roles/ceph/defaults/main.yml
Normal file
@ -0,0 +1,24 @@
|
||||
---
|
||||
project_name: "ceph"
|
||||
|
||||
|
||||
####################
|
||||
# Docker
|
||||
####################
|
||||
ceph_mon_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-mon"
|
||||
ceph_mon_tag: "{{ openstack_release }}"
|
||||
ceph_mon_image_full: "{{ ceph_mon_image }}:{{ ceph_mon_tag }}"
|
||||
|
||||
ceph_osd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-ceph-osd"
|
||||
ceph_osd_tag: "{{ openstack_release }}"
|
||||
ceph_osd_image_full: "{{ ceph_osd_image }}:{{ ceph_osd_tag }}"
|
||||
|
||||
ceph_data_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-data"
|
||||
ceph_data_tag: "{{ openstack_release }}"
|
||||
ceph_data_image_full: "{{ ceph_data_image }}:{{ ceph_data_tag }}"
|
||||
|
||||
|
||||
####################
|
||||
# Ceph
|
||||
####################
|
||||
osd_initial_weight: "1"
|
57
ansible/roles/ceph/tasks/bootstrap_mons.yml
Normal file
57
ansible/roles/ceph/tasks/bootstrap_mons.yml
Normal file
@ -0,0 +1,57 @@
|
||||
---
|
||||
- name: Cleaning up temp file on localhost
|
||||
local_action: file path=/tmp/kolla_ceph_cluster state=absent
|
||||
changed_when: False
|
||||
always_run: True
|
||||
run_once: True
|
||||
|
||||
- name: Creating temp file on localhost
|
||||
local_action: copy content=None dest=/tmp/kolla_ceph_cluster mode=0600
|
||||
changed_when: False
|
||||
always_run: True
|
||||
run_once: True
|
||||
|
||||
# TODO(SamYaple): Improve failed_when check
|
||||
- name: Checking if a previous cluster exists
|
||||
command: docker exec ceph_mon_data stat /etc/ceph/ceph.monmap
|
||||
register: exists
|
||||
changed_when: False
|
||||
failed_when: False
|
||||
always_run: True
|
||||
|
||||
- name: Writing hostname of host with existing cluster files to temp file
|
||||
local_action: copy content={{ ansible_hostname }} dest=/tmp/kolla_ceph_cluster mode=0600
|
||||
changed_when: False
|
||||
always_run: True
|
||||
when: exists.rc == 0
|
||||
|
||||
- name: Registering host from temp file
|
||||
set_fact:
|
||||
delegate_host: "{{ lookup('file', '/tmp/kolla_ceph_cluster') }}"
|
||||
|
||||
- name: Cleaning up temp file on localhost
|
||||
local_action: file path=/tmp/kolla_ceph_cluster state=absent
|
||||
changed_when: False
|
||||
always_run: True
|
||||
run_once: True
|
||||
|
||||
- name: Starting Ceph Monitor data container
|
||||
docker:
|
||||
docker_api_version: "{{ docker_api_version }}"
|
||||
net: host
|
||||
pull: "{{ docker_pull_policy }}"
|
||||
restart_policy: "{{ docker_restart_policy }}"
|
||||
restart_policy_retry: "{{ docker_restart_policy_retry }}"
|
||||
state: reloaded
|
||||
registry: "{{ docker_registry }}"
|
||||
username: "{{ docker_registry_username }}"
|
||||
password: "{{ docker_registry_password }}"
|
||||
insecure_registry: "{{ docker_insecure_registry }}"
|
||||
name: ceph_mon_data
|
||||
image: "{{ ceph_data_image_full }}"
|
||||
volumes:
|
||||
- "/etc/ceph/"
|
||||
- "/var/lib/ceph/"
|
||||
|
||||
- include: generate_cluster.yml
|
||||
when: delegate_host == 'None' and inventory_hostname == groups['ceph-mon'][0]
|
48
ansible/roles/ceph/tasks/bootstrap_osds.yml
Normal file
48
ansible/roles/ceph/tasks/bootstrap_osds.yml
Normal file
@ -0,0 +1,48 @@
|
||||
---
|
||||
- name: Looking up disks to bootstrap for Ceph
|
||||
find_disks:
|
||||
partition_name: 'KOLLA_CEPH_OSD_BOOTSTRAP'
|
||||
register: osds_bootstrap
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
|
||||
- name: Bootstrapping Ceph OSDs
|
||||
docker:
|
||||
docker_api_version: "{{ docker_api_version }}"
|
||||
net: host
|
||||
pull: "{{ docker_pull_policy }}"
|
||||
restart_policy: "no"
|
||||
state: reloaded
|
||||
registry: "{{ docker_registry }}"
|
||||
username: "{{ docker_registry_username }}"
|
||||
password: "{{ docker_registry_password }}"
|
||||
insecure_registry: "{{ docker_insecure_registry }}"
|
||||
privileged: True
|
||||
name: "bootstrap_osd_{{ item.0 }}"
|
||||
image: "{{ ceph_osd_image_full }}"
|
||||
volumes:
|
||||
- "{{ node_config_directory }}/ceph-osd/:/opt/kolla/ceph-osd/:ro"
|
||||
- "/dev/:/dev/"
|
||||
env:
|
||||
KOLLA_BOOTSTRAP:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
OSD_DEV: "{{ item.1.device }}"
|
||||
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
|
||||
with_indexed_items: osds_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
|
||||
# https://github.com/ansible/ansible-modules-core/pull/1031
|
||||
- name: Waiting for bootstrap containers to exit
|
||||
command: docker wait "bootstrap_osd_{{ item.0 }}"
|
||||
register: bootstrap_result
|
||||
run_once: True
|
||||
failed_when: bootstrap_result.stdout != "0"
|
||||
with_indexed_items: osds_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
|
||||
- name: Cleaning up bootstrap containers
|
||||
docker:
|
||||
name: "bootstrap_osd_{{ item.0 }}"
|
||||
image: "{{ ceph_osd_image_full }}"
|
||||
state: absent
|
||||
with_indexed_items: osds_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
22
ansible/roles/ceph/tasks/config.yml
Normal file
22
ansible/roles/ceph/tasks/config.yml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
- include: ../../config.yml
|
||||
vars:
|
||||
service_name: "ceph-mon"
|
||||
config_source:
|
||||
- "roles/ceph/templates/ceph.conf.j2"
|
||||
- "/etc/kolla/config/ceph.conf"
|
||||
config_template_dest:
|
||||
- "{{ node_templates_directory }}/{{ service_name }}/ceph.conf_minimal"
|
||||
- "{{ node_templates_directory }}/{{ service_name }}/ceph.conf_augment"
|
||||
config_dest: "{{ node_config_directory }}/{{ service_name }}/ceph.conf"
|
||||
|
||||
- include: ../../config.yml
|
||||
vars:
|
||||
service_name: "ceph-osd"
|
||||
config_source:
|
||||
- "roles/ceph/templates/ceph.conf.j2"
|
||||
- "/etc/kolla/config/ceph.conf"
|
||||
config_template_dest:
|
||||
- "{{ node_templates_directory }}/{{ service_name }}/ceph.conf_minimal"
|
||||
- "{{ node_templates_directory }}/{{ service_name }}/ceph.conf_augment"
|
||||
config_dest: "{{ node_config_directory }}/{{ service_name }}/ceph.conf"
|
34
ansible/roles/ceph/tasks/distribute_keyrings.yml
Normal file
34
ansible/roles/ceph/tasks/distribute_keyrings.yml
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
- name: Fetching Ceph keyrings
|
||||
command: docker exec ceph_mon fetch_ceph_keys.py
|
||||
register: ceph_files_json
|
||||
changed_when: "{{ (ceph_files_json.stdout | from_json).changed }}"
|
||||
failed_when: "{{ (ceph_files_json.stdout | from_json).failed }}"
|
||||
delegate_to: "{{ delegate_host }}"
|
||||
run_once: True
|
||||
|
||||
- name: Reading json from variable
|
||||
set_fact:
|
||||
ceph_files: "{{ (ceph_files_json.stdout | from_json) }}"
|
||||
|
||||
- name: Pushing Ceph keyring for OSDs
|
||||
bslurp:
|
||||
src: "{{ item.content }}"
|
||||
dest: "{{ node_config_directory }}/ceph-osd/{{ item.filename }}"
|
||||
mode: 0600
|
||||
sha1: "{{ item.sha1 }}"
|
||||
with_items:
|
||||
- "{{ ceph_files['ceph.client.admin.keyring'] }}"
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
|
||||
- name: Pushing Ceph keyrings for Mons
|
||||
bslurp:
|
||||
src: "{{ item.content }}"
|
||||
dest: "{{ node_config_directory }}/ceph-mon/{{ item.filename }}"
|
||||
mode: 0600
|
||||
sha1: "{{ item.sha1 }}"
|
||||
with_items:
|
||||
- "{{ ceph_files['ceph.client.admin.keyring'] }}"
|
||||
- "{{ ceph_files['ceph.client.mon.keyring'] }}"
|
||||
- "{{ ceph_files['ceph.monmap'] }}"
|
||||
when: inventory_hostname in groups['ceph-mon']
|
30
ansible/roles/ceph/tasks/generate_cluster.yml
Normal file
30
ansible/roles/ceph/tasks/generate_cluster.yml
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
- name: Generating Initial Ceph keyrings and monmap
|
||||
docker:
|
||||
detach: False
|
||||
docker_api_version: "{{ docker_api_version }}"
|
||||
net: host
|
||||
pull: "{{ docker_pull_policy }}"
|
||||
restart_policy: "no"
|
||||
state: reloaded
|
||||
registry: "{{ docker_registry }}"
|
||||
username: "{{ docker_registry_username }}"
|
||||
password: "{{ docker_registry_password }}"
|
||||
insecure_registry: "{{ docker_insecure_registry }}"
|
||||
name: ceph_mon
|
||||
image: "{{ ceph_mon_image_full }}"
|
||||
volumes: "{{ node_config_directory }}/ceph-mon/:/opt/kolla/ceph-mon/:ro"
|
||||
volumes_from:
|
||||
- "ceph_mon_data"
|
||||
env:
|
||||
KOLLA_BOOTSTRAP:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
MON_IP: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
|
||||
|
||||
- name: Waiting for a few settings for cluster to generate keys
|
||||
command: sleep 3
|
||||
changed_when: False
|
||||
|
||||
- name: Setting host for cluster files
|
||||
set_fact:
|
||||
delegate_host: "{{ ansible_hostname }}"
|
16
ansible/roles/ceph/tasks/main.yml
Normal file
16
ansible/roles/ceph/tasks/main.yml
Normal file
@ -0,0 +1,16 @@
|
||||
---
|
||||
- include: config.yml
|
||||
|
||||
- include: bootstrap_mons.yml
|
||||
when: inventory_hostname in groups['ceph-mon']
|
||||
|
||||
- include: distribute_keyrings.yml
|
||||
|
||||
- include: start_mons.yml
|
||||
when: inventory_hostname in groups['ceph-mon']
|
||||
|
||||
- include: bootstrap_osds.yml
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
|
||||
- include: start_osds.yml
|
||||
when: inventory_hostname in groups['ceph-osd']
|
22
ansible/roles/ceph/tasks/start_mons.yml
Normal file
22
ansible/roles/ceph/tasks/start_mons.yml
Normal file
@ -0,0 +1,22 @@
|
||||
---
|
||||
- name: Starting ceph-mon container
|
||||
docker:
|
||||
docker_api_version: "{{ docker_api_version }}"
|
||||
net: host
|
||||
pull: "{{ docker_pull_policy }}"
|
||||
restart_policy: "{{ docker_restart_policy }}"
|
||||
restart_policy_retry: "{{ docker_restart_policy_retry }}"
|
||||
state: reloaded
|
||||
registry: "{{ docker_registry }}"
|
||||
username: "{{ docker_registry_username }}"
|
||||
password: "{{ docker_registry_password }}"
|
||||
insecure_registry: "{{ docker_insecure_registry }}"
|
||||
name: ceph_mon
|
||||
image: "{{ ceph_mon_image_full }}"
|
||||
volumes: "{{ node_config_directory }}/ceph-mon/:/opt/kolla/ceph-mon/:ro"
|
||||
volumes_from:
|
||||
- "ceph_mon_data"
|
||||
env:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
MON_IP: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] }}"
|
||||
when: inventory_hostname in groups['ceph-mon']
|
50
ansible/roles/ceph/tasks/start_osds.yml
Normal file
50
ansible/roles/ceph/tasks/start_osds.yml
Normal file
@ -0,0 +1,50 @@
|
||||
---
|
||||
- name: Looking up OSDs for Ceph
|
||||
find_disks:
|
||||
partition_name: 'KOLLA_CEPH_DATA'
|
||||
register: osds
|
||||
|
||||
- name: Mounting Ceph OSD volumes
|
||||
mount:
|
||||
src: "UUID={{ item.fs_uuid }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
name: "/var/lib/ceph/osd/{{ item.fs_uuid }}"
|
||||
with_items: osds.disks
|
||||
|
||||
- name: Gathering OSD IDs
|
||||
command: 'cat /var/lib/ceph/osd/{{ item.fs_uuid }}/whoami'
|
||||
with_items: osds.disks
|
||||
register: id
|
||||
changed_when: False
|
||||
failed_when: id.rc != 0
|
||||
|
||||
- name: Starting ceph-osds container
|
||||
docker:
|
||||
docker_api_version: "{{ docker_api_version }}"
|
||||
net: host
|
||||
pull: "{{ docker_pull_policy }}"
|
||||
restart_policy: "{{ docker_restart_policy }}"
|
||||
restart_policy_retry: "{{ docker_restart_policy_retry }}"
|
||||
state: reloaded
|
||||
registry: "{{ docker_registry }}"
|
||||
username: "{{ docker_registry_username }}"
|
||||
password: "{{ docker_registry_password }}"
|
||||
insecure_registry: "{{ docker_insecure_registry }}"
|
||||
pid: host
|
||||
privileged: True
|
||||
name: "ceph_osd_{{ item.0.stdout }}"
|
||||
image: "{{ ceph_osd_image_full }}"
|
||||
volumes:
|
||||
- "/var/lib/ceph/osd/{{ item.1.fs_uuid }}:/var/lib/ceph/osd/ceph-{{ item.0.stdout }}"
|
||||
- "{{ node_config_directory }}/ceph-osd/:/opt/kolla/ceph-osd/:ro"
|
||||
- "/dev/:/dev/"
|
||||
env:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
OSD_ID: "{{ item.0.stdout }}"
|
||||
OSD_DEV: "{{ item.1.device }}"
|
||||
with_together:
|
||||
- id.results
|
||||
- osds.disks
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
and osds.disks
|
9
ansible/roles/ceph/templates/ceph.conf.j2
Normal file
9
ansible/roles/ceph/templates/ceph.conf.j2
Normal file
@ -0,0 +1,9 @@
|
||||
[global]
|
||||
fsid = {{ ceph_cluster_fsid }}
|
||||
mon initial members = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_hostname'] }}{% if not loop.last %}, {% endif %}{% endfor %}
|
||||
|
||||
mon host = {% for host in groups['ceph-mon'] %}{{ hostvars[host]['ansible_' + api_interface]['ipv4']['address'] }}{% if not loop.last %}, {% endif %}{% endfor %}
|
||||
|
||||
auth cluster required = cephx
|
||||
auth service required = cephx
|
||||
auth client required = cephx
|
@ -1,4 +1,8 @@
|
||||
---
|
||||
- hosts: [ceph-mon, ceph-osd]
|
||||
roles:
|
||||
- { role: ceph, tags: ceph, when: enable_ceph | bool }
|
||||
|
||||
- hosts: [haproxy, mariadb, rabbitmq, cinder-api, glance-api, keystone, nova-api, neutron-server, swift-proxy-server]
|
||||
roles:
|
||||
- { role: haproxy, tags: haproxy, when: enable_haproxy | bool }
|
||||
|
@ -15,3 +15,7 @@ RUN apt-get install -y --no-install-recommends \
|
||||
&& apt-get clean
|
||||
|
||||
{% endif %}
|
||||
|
||||
RUN useradd --user-group ceph \
|
||||
&& mkdir -p /home/ceph \
|
||||
&& chown -R ceph: /home/ceph
|
||||
|
@ -3,5 +3,6 @@ MAINTAINER Kolla Project (https://launchpad.net/kolla)
|
||||
|
||||
COPY start.sh /
|
||||
COPY config-external.sh /opt/kolla/
|
||||
COPY fetch_ceph_keys.py /usr/bin/
|
||||
|
||||
CMD ["/start.sh"]
|
||||
|
@ -8,3 +8,33 @@ if [[ -f "$SOURCE" ]]; then
|
||||
chown ${OWNER}: $TARGET
|
||||
chmod 0644 $TARGET
|
||||
fi
|
||||
|
||||
SOURCE="/opt/kolla/ceph-mon/ceph.client.admin.keyring"
|
||||
TARGET="/etc/ceph/ceph.client.admin.keyring"
|
||||
OWNER="ceph"
|
||||
|
||||
if [[ -f "$SOURCE" ]]; then
|
||||
cp $SOURCE $TARGET
|
||||
chown ${OWNER}: $TARGET
|
||||
chmod 0600 $TARGET
|
||||
fi
|
||||
|
||||
SOURCE="/opt/kolla/ceph-mon/ceph.client.mon.keyring"
|
||||
TARGET="/etc/ceph/ceph.client.mon.keyring"
|
||||
OWNER="ceph"
|
||||
|
||||
if [[ -f "$SOURCE" ]]; then
|
||||
cp $SOURCE $TARGET
|
||||
chown ${OWNER}: $TARGET
|
||||
chmod 0600 $TARGET
|
||||
fi
|
||||
|
||||
SOURCE="/opt/kolla/ceph-mon/ceph.monmap"
|
||||
TARGET="/etc/ceph/ceph.monmap"
|
||||
OWNER="ceph"
|
||||
|
||||
if [[ -f "$SOURCE" ]]; then
|
||||
cp $SOURCE $TARGET
|
||||
chown ${OWNER}: $TARGET
|
||||
chmod 0600 $TARGET
|
||||
fi
|
||||
|
67
docker/ceph/ceph-mon/fetch_ceph_keys.py
Executable file
67
docker/ceph/ceph-mon/fetch_ceph_keys.py
Executable file
@ -0,0 +1,67 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2015 Sam Yaple
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is a stripped down version of an ansible module I wrote in Yaodu to
|
||||
# achieve the same goals we have for Kolla. I have relicensed it for Kolla
|
||||
# https://github.com/SamYaple/yaodu/blob/master/ansible/library/bslurp
|
||||
|
||||
# Basically this module will fetch the admin and mon keyrings as well as the
|
||||
# monmap file. It then hashes the content, compresses them, and finally it
|
||||
# converts them to base64 to be safely transported around with ansible
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import zlib
|
||||
|
||||
|
||||
def json_exit(msg=None, failed=False, changed=False):
|
||||
if type(msg) is not dict:
|
||||
msg = {'msg': str(msg)}
|
||||
msg.update({'failed': failed, 'changed': changed})
|
||||
print(json.dumps(msg))
|
||||
sys.exit()
|
||||
|
||||
|
||||
def read_file(filename):
|
||||
filename_path = os.path.join('/etc/ceph', filename)
|
||||
|
||||
if not os.path.exists(filename_path):
|
||||
json_exit("file not found: {}".format(filename_path), failed=True)
|
||||
if not os.access(filename_path, os.R_OK):
|
||||
json_exit("file not readable: {}".format(filename_path), failed=True)
|
||||
|
||||
with open(filename_path, 'rb') as f:
|
||||
raw_data = f.read()
|
||||
|
||||
return {'content': base64.b64encode(zlib.compress(raw_data)),
|
||||
'sha1': hashlib.sha1(raw_data).hexdigest(),
|
||||
'filename': filename}
|
||||
|
||||
|
||||
def main():
|
||||
admin_keyring = 'ceph.client.admin.keyring'
|
||||
mon_keyring = 'ceph.client.mon.keyring'
|
||||
monmap = 'ceph.monmap'
|
||||
|
||||
files = [admin_keyring, mon_keyring, monmap]
|
||||
json_exit({filename: read_file(filename) for filename in files})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
12
docker/ceph/ceph-mon/start.sh
Normal file → Executable file
12
docker/ceph/ceph-mon/start.sh
Normal file → Executable file
@ -3,11 +3,11 @@
|
||||
set -o errexit
|
||||
|
||||
CMD="/usr/bin/ceph-mon"
|
||||
ARGS="-d -i ${MON_NAME} --public-addr ${MON_IP}:6789"
|
||||
ARGS="-d -i $(hostname) --public-addr ${MON_IP}:6789"
|
||||
|
||||
# Setup common paths
|
||||
KEYRING_ADMIN="/etc/ceph/ceph.admin.keyring"
|
||||
KEYRING_MON="/etc/ceph/ceph.mon.keyring"
|
||||
KEYRING_ADMIN="/etc/ceph/ceph.client.admin.keyring"
|
||||
KEYRING_MON="/etc/ceph/ceph.client.mon.keyring"
|
||||
MONMAP="/etc/ceph/ceph.monmap"
|
||||
MON_DIR="/var/lib/ceph/mon/ceph-$(hostname)"
|
||||
|
||||
@ -21,7 +21,7 @@ set_configs
|
||||
# of the KOLLA_BOOTSTRAP variable being set, including empty.
|
||||
if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
|
||||
# Lookup our fsid from the ceph.conf
|
||||
FSID="$(awk '/^fsid/ {print $3; exit}' ${ceph_conf})"
|
||||
FSID="$(awk '/^fsid/ {print $3; exit}' /etc/ceph/ceph.conf)"
|
||||
|
||||
# Generating initial keyrings and monmap
|
||||
ceph-authtool --create-keyring "${KEYRING_MON}" --gen-key -n mon. --cap mon 'allow *'
|
||||
@ -29,8 +29,8 @@ if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
|
||||
ceph-authtool "${KEYRING_MON}" --import-keyring "${KEYRING_ADMIN}"
|
||||
monmaptool --create --add "$(hostname)" "${MON_IP}" --fsid "${FSID}" "${MONMAP}"
|
||||
|
||||
# TODO(SamYaple): Return json parsible output to ansible
|
||||
exit 0
|
||||
echo "Sleeping until keys are fetched"
|
||||
/bin/sleep infinity
|
||||
fi
|
||||
|
||||
# This section runs on every mon that does not have a keyring already.
|
||||
|
@ -8,3 +8,13 @@ if [[ -f "$SOURCE" ]]; then
|
||||
chown ${OWNER}: $TARGET
|
||||
chmod 0644 $TARGET
|
||||
fi
|
||||
|
||||
SOURCE="/opt/kolla/ceph-osd/ceph.client.admin.keyring"
|
||||
TARGET="/etc/ceph/ceph.client.admin.keyring"
|
||||
OWNER="ceph"
|
||||
|
||||
if [[ -f "$SOURCE" ]]; then
|
||||
cp $SOURCE $TARGET
|
||||
chown ${OWNER}: $TARGET
|
||||
chmod 0600 $TARGET
|
||||
fi
|
||||
|
42
docker/ceph/ceph-osd/start.sh
Normal file → Executable file
42
docker/ceph/ceph-osd/start.sh
Normal file → Executable file
@ -1,9 +1,7 @@
|
||||
#!/bin/bash
|
||||
set -o xtrace
|
||||
set -o errexit
|
||||
|
||||
CMD="/usr/bin/ceph-osd"
|
||||
ARGS="-f -d -i ${OSD_ID} --osd-journal ${OSD_DIR}/journal -k ${OSD_DIR}/keyring"
|
||||
|
||||
# Loading common functions.
|
||||
source /opt/kolla/kolla-common.sh
|
||||
|
||||
@ -13,23 +11,45 @@ set_configs
|
||||
# Bootstrap and exit if KOLLA_BOOTSTRAP variable is set. This catches all cases
|
||||
# of the KOLLA_BOOTSTRAP variable being set, including empty.
|
||||
if [[ "${!KOLLA_BOOTSTRAP[@]}" ]]; then
|
||||
# Creating a new label for the disk
|
||||
parted "${OSD_DEV}" -s -- mklabel gpt
|
||||
JOURNAL_PARTUUID=$(uuidgen)
|
||||
|
||||
# Formating disk for ceph
|
||||
sgdisk --zap-all -- "${OSD_DEV}"
|
||||
sgdisk --new=2:1M:5G --change-name=2:KOLLA_CEPH_JOURNAL --typecode=2:45B0969E-9B03-4F30-B4C6-B4B80CEFF106 --partition-guid=2:${JOURNAL_PARTUUID} --mbrtogpt -- "${OSD_DEV}"
|
||||
sgdisk --largest-new=1 --change-name=1:KOLLA_CEPH_DATA --typecode=1:4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D -- "${OSD_DEV}"
|
||||
# This command may throw errors that we can safely ignore
|
||||
partprobe || true
|
||||
|
||||
# We look up the appropriate device path with partition.
|
||||
OSD_PARTITION="$(ls ${OSD_DEV}* | egrep ${OSD_DEV}p?1)"
|
||||
JOURNAL_PARTITION="${OSD_PARTITION%?}2"
|
||||
|
||||
# Preparing the OSD for use with Ceph
|
||||
ceph-disk prepare "${OSD_DEV}"
|
||||
OSD_ID="$(ceph osd create)"
|
||||
OSD_DIR="/var/lib/ceph/osd/ceph-${OSD_ID}"
|
||||
mkdir -p "${OSD_DIR}"
|
||||
mount "${OSD_DEV}1" "${OSD_DIR}"
|
||||
ceph-osd -i "${OSD_ID}" --mkfs --mkkey
|
||||
ceph auth add "osd.${OSD_ID}" osd 'allow *' mon 'allow proflie osd' -i "${OSD_DIR}/keyring"
|
||||
mkfs.xfs -f "${OSD_PARTITION}"
|
||||
mount "${OSD_PARTITION}" "${OSD_DIR}"
|
||||
|
||||
# Adding osd to crush map
|
||||
# This will through an error about no key existing. That is normal. It then
|
||||
# creates the key in the next step.
|
||||
ceph-osd -i "${OSD_ID}" --mkfs --osd-journal="${JOURNAL_PARTITION}" --mkkey
|
||||
ceph auth add "osd.${OSD_ID}" osd 'allow *' mon 'allow profile osd' -i "${OSD_DIR}/keyring"
|
||||
umount "${OSD_PARTITION}"
|
||||
|
||||
# These commands only need to be run once per host but are safe to run
|
||||
# repeatedly. This can be improved later or if any problems arise.
|
||||
ceph osd crush add-bucket "$(hostname)" host
|
||||
ceph osd crush move "$(hostname)" root=default
|
||||
|
||||
# Adding osd to crush map
|
||||
ceph osd crush add "${OSD_ID}" "${OSD_INITIAL_WEIGHT}" host="$(hostname)"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# We look up the appropriate journal since we cannot rely on symlinks
|
||||
JOURNAL_PARTITION="$(ls ${OSD_DEV}* | egrep ${OSD_DEV}p?2)"
|
||||
OSD_DIR="/var/lib/ceph/osd/ceph-${OSD_ID}"
|
||||
CMD="/usr/bin/ceph-osd"
|
||||
ARGS="-f -d -i ${OSD_ID} --osd-journal ${JOURNAL_PARTITION} -k ${OSD_DIR}/keyring"
|
||||
|
||||
exec $CMD $ARGS
|
||||
|
@ -40,7 +40,6 @@ RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py \
|
||||
python-tuskarclient \
|
||||
python-zaqarclient \
|
||||
python-openstackclient \
|
||||
MySQL-python \
|
||||
numpy
|
||||
MySQL-python
|
||||
|
||||
{% endif %}
|
||||
|
0
etc/kolla/config/ceph.conf
Normal file
0
etc/kolla/config/ceph.conf
Normal file
@ -3,6 +3,12 @@
|
||||
# Ansible vault for locking down the secrets properly.
|
||||
|
||||
|
||||
###################
|
||||
# Ceph options
|
||||
####################
|
||||
ceph_cluster_fsid: "5fba2fbc-551d-11e5-a8ce-01ef4c5cf93c"
|
||||
|
||||
|
||||
###################
|
||||
# Database options
|
||||
####################
|
||||
@ -46,6 +52,7 @@ heat_domain_admin_password: "password"
|
||||
murano_database_password: "password"
|
||||
murano_keystone_password: "password"
|
||||
|
||||
|
||||
####################
|
||||
# RabbitMQ options
|
||||
####################
|
||||
|
Loading…
Reference in New Issue
Block a user