Remove ceph block device support

It leaves certain ceph mentions in globals.yml.j2 as it needs
syncing with kolla-ansible contents anyways
(these are all comments).

Change-Id: I05e9c6223583e9bb5dc0020edc0b56990275093c
Story: 2007295
Task: 38766
This commit is contained in:
Radosław Piliszek 2020-02-23 21:32:26 +01:00 committed by Mark Goddard
parent 5ee815b0e9
commit e7d7daa531
48 changed files with 14 additions and 775 deletions

View File

@ -1,8 +0,0 @@
---
- name: Ensure Ceph disk are tagged
hosts: overcloud
tags:
- kolla-ceph
roles:
- role: kolla-ceph
when: kolla_enable_ceph | bool

View File

@ -1,7 +0,0 @@
---
###############################################################################
# OpenStack Ceph configuration.
# Ansible host pattern matching hosts on which Ceph storage services
# are deployed. The default is to use hosts in the 'storage' group.
ceph_hosts: "storage"

View File

@ -19,7 +19,6 @@ compute_default_network_interfaces: >
{{ ([admin_oc_net_name, {{ ([admin_oc_net_name,
internal_net_name, internal_net_name,
storage_net_name, storage_net_name,
ceph_storage_net_name,
tunnel_net_name] + tunnel_net_name] +
(external_net_names if kolla_enable_neutron_provider_networks | bool else [])) | reject('none') | unique | list }} (external_net_names if kolla_enable_neutron_provider_networks | bool else [])) | reject('none') | unique | list }}
@ -110,16 +109,6 @@ compute_lvm_group_data_lv_docker_volumes_size: 75%VG
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
compute_lvm_group_data_lv_docker_volumes_fs: ext4 compute_lvm_group_data_lv_docker_volumes_fs: ext4
###############################################################################
# Compute node Ceph configuration.
# List of Ceph disks.
# The format is a list of dict like :
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
# - { osd: "/dev/sdd" }
# Journal variable is not mandatory.
compute_ceph_disks: []
############################################################################### ###############################################################################
# Compute node sysctl configuration. # Compute node sysctl configuration.

View File

@ -24,7 +24,6 @@ controller_default_network_interfaces: >
inspection_net_name, inspection_net_name,
internal_net_name, internal_net_name,
storage_net_name, storage_net_name,
ceph_storage_net_name,
swift_storage_net_name, swift_storage_net_name,
cleaning_net_name] | reject('none') | unique | list }} cleaning_net_name] | reject('none') | unique | list }}
@ -121,16 +120,6 @@ controller_lvm_group_data_lv_docker_volumes_size: 75%VG
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
controller_lvm_group_data_lv_docker_volumes_fs: ext4 controller_lvm_group_data_lv_docker_volumes_fs: ext4
###############################################################################
# Controller node Ceph configuration.
# List of Ceph disks.
# The format is a list of dict like :
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
# - { osd: "/dev/sdd" }
# Journal variable is not mandatory.
controller_ceph_disks: []
############################################################################### ###############################################################################
# Controller node sysctl configuration. # Controller node sysctl configuration.

View File

@ -118,8 +118,6 @@ overcloud_container_image_regex_map:
enabled: "{{ kolla_enable_blazar | bool }}" enabled: "{{ kolla_enable_blazar | bool }}"
- regex: ceilometer - regex: ceilometer
enabled: "{{ kolla_enable_ceilometer | bool }}" enabled: "{{ kolla_enable_ceilometer | bool }}"
- regex: ceph
enabled: "{{ kolla_enable_ceph | bool }}"
- regex: cinder - regex: cinder
enabled: "{{ kolla_enable_cinder | bool }}" enabled: "{{ kolla_enable_cinder | bool }}"
- regex: cloudkitty - regex: cloudkitty
@ -346,7 +344,6 @@ kolla_enable_aodh: "no"
kolla_enable_barbican: "no" kolla_enable_barbican: "no"
kolla_enable_blazar: "no" kolla_enable_blazar: "no"
kolla_enable_central_logging: "no" kolla_enable_central_logging: "no"
kolla_enable_ceph: "no"
kolla_enable_ceilometer: "no" kolla_enable_ceilometer: "no"
# The chrony container is disabled by default on CentOS 7 because we enable an # The chrony container is disabled by default on CentOS 7 because we enable an
# NTP daemon on the host. On CentOS 8 the chrony container is enabled by # NTP daemon on the host. On CentOS 8 the chrony container is enabled by

View File

@ -49,12 +49,6 @@ storage_net_name: 'storage_net'
# Name of the network used to carry storage management traffic. # Name of the network used to carry storage management traffic.
storage_mgmt_net_name: 'storage_mgmt_net' storage_mgmt_net_name: 'storage_mgmt_net'
# Name of the network used to carry ceph storage data traffic.
ceph_storage_net_name: "{{ storage_net_name }}"
# Name of the network used to carry ceph storage management traffic.
ceph_storage_mgmt_net_name: "{{ storage_mgmt_net_name }}"
# Name of the network used to carry swift storage data traffic. # Name of the network used to carry swift storage data traffic.
swift_storage_net_name: "{{ storage_net_name }}" swift_storage_net_name: "{{ storage_net_name }}"

View File

@ -13,10 +13,6 @@ storage_bootstrap_user: "{{ lookup('env', 'USER') }}"
storage_network_interfaces: > storage_network_interfaces: >
{{ (storage_default_network_interfaces + {{ (storage_default_network_interfaces +
storage_extra_network_interfaces + storage_extra_network_interfaces +
([ceph_storage_net_name]
if storage_needs_ceph_network else []) +
([ceph_storage_mgmt_net_name]
if storage_needs_ceph_mgmt_network else []) +
([swift_storage_net_name] ([swift_storage_net_name]
if storage_needs_swift_network else []) + if storage_needs_swift_network else []) +
([swift_storage_replication_net_name] ([swift_storage_replication_net_name]
@ -32,15 +28,6 @@ storage_default_network_interfaces: >
# List of extra networks to which storage nodes are attached. # List of extra networks to which storage nodes are attached.
storage_extra_network_interfaces: [] storage_extra_network_interfaces: []
# Whether this host requires access to Ceph networks.
storage_needs_ceph_network: >-
{{ kolla_enable_ceph | bool and
inventory_hostname in query('inventory_hostnames', ceph_hosts) }}
storage_needs_ceph_mgmt_network: >-
{{ kolla_enable_ceph | bool and
inventory_hostname in query('inventory_hostnames', ceph_hosts) }}
# Whether this host requires access to Swift networks. # Whether this host requires access to Swift networks.
storage_needs_swift_network: >- storage_needs_swift_network: >-
{{ kolla_enable_swift | bool and {{ kolla_enable_swift | bool and
@ -134,16 +121,6 @@ storage_lvm_group_data_lv_docker_volumes_size: 75%VG
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
storage_lvm_group_data_lv_docker_volumes_fs: ext4 storage_lvm_group_data_lv_docker_volumes_fs: ext4
###############################################################################
# Storage node Ceph configuration.
# List of Ceph disks.
# The format is a list of dict like :
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
# - { osd: "/dev/sdd" }
# Journal variable is not mandatory.
storage_ceph_disks: []
############################################################################### ###############################################################################
# Storage node sysctl configuration. # Storage node sysctl configuration.

View File

@ -1,6 +0,0 @@
---
###############################################################################
# Compute node Ceph configuration.
# List of Ceph disks.
ceph_disks: "{{ compute_ceph_disks }}"

View File

@ -1,6 +0,0 @@
---
###############################################################################
# Controller node Ceph configuration.
# List of Ceph disks.
ceph_disks: "{{ controller_ceph_disks }}"

View File

@ -1,6 +0,0 @@
---
###############################################################################
# Storage node Ceph configuration.
# List of Ceph disks.
ceph_disks: "{{ storage_ceph_disks }}"

View File

@ -33,16 +33,6 @@
kolla_cluster_interface: "{{ storage_mgmt_net_name | net_interface | replace('-', '_') }}" kolla_cluster_interface: "{{ storage_mgmt_net_name | net_interface | replace('-', '_') }}"
when: storage_mgmt_net_name in network_interfaces when: storage_mgmt_net_name in network_interfaces
- name: Set Ceph storage network interface
set_fact:
kolla_ceph_storage_interface: "{{ ceph_storage_net_name | net_interface | replace('-', '_') }}"
when: ceph_storage_net_name in network_interfaces
- name: Set Ceph cluster network interface
set_fact:
kolla_ceph_cluster_interface: "{{ ceph_storage_mgmt_net_name | net_interface | replace('-', '_') }}"
when: ceph_storage_mgmt_net_name in network_interfaces
- name: Set Swift storage network interface - name: Set Swift storage network interface
set_fact: set_fact:
kolla_swift_storage_interface: "{{ swift_storage_net_name | net_interface | replace('-', '_') }}" kolla_swift_storage_interface: "{{ swift_storage_net_name | net_interface | replace('-', '_') }}"

View File

@ -106,7 +106,6 @@
with_items: with_items:
- { name: blazar, file: blazar.conf } - { name: blazar, file: blazar.conf }
- { name: ceilometer, file: ceilometer.conf } - { name: ceilometer, file: ceilometer.conf }
- { name: ceph, file: ceph.conf }
- { name: cinder, file: cinder.conf } - { name: cinder, file: cinder.conf }
- { name: cloudkitty, file: cloudkitty.conf } - { name: cloudkitty, file: cloudkitty.conf }
- { name: designate, file: designate.conf } - { name: designate, file: designate.conf }
@ -215,7 +214,6 @@
# Extra free-form user-provided configuration. # Extra free-form user-provided configuration.
kolla_extra_blazar: "{{ kolla_extra_config.blazar | default }}" kolla_extra_blazar: "{{ kolla_extra_config.blazar | default }}"
kolla_extra_ceilometer: "{{ kolla_extra_config.ceilometer | default }}" kolla_extra_ceilometer: "{{ kolla_extra_config.ceilometer | default }}"
kolla_extra_ceph: "{{ kolla_extra_config.ceph | default }}"
kolla_extra_cinder: "{{ kolla_extra_config.cinder | default }}" kolla_extra_cinder: "{{ kolla_extra_config.cinder | default }}"
kolla_extra_cloudkitty: "{{ kolla_extra_config.cloudkitty | default }}" kolla_extra_cloudkitty: "{{ kolla_extra_config.cloudkitty | default }}"
kolla_extra_designate: "{{ kolla_extra_config.designate | default }}" kolla_extra_designate: "{{ kolla_extra_config.designate | default }}"

View File

@ -120,8 +120,6 @@ kolla_overcloud_inventory_pass_through_host_vars:
- "kolla_api_interface" - "kolla_api_interface"
- "kolla_storage_interface" - "kolla_storage_interface"
- "kolla_cluster_interface" - "kolla_cluster_interface"
- "kolla_ceph_storage_interface"
- "kolla_ceph_cluster_interface"
- "kolla_swift_storage_interface" - "kolla_swift_storage_interface"
- "kolla_swift_replication_interface" - "kolla_swift_replication_interface"
- "kolla_provision_interface" - "kolla_provision_interface"
@ -141,8 +139,6 @@ kolla_overcloud_inventory_pass_through_host_vars_map:
kolla_api_interface: "api_interface" kolla_api_interface: "api_interface"
kolla_storage_interface: "storage_interface" kolla_storage_interface: "storage_interface"
kolla_cluster_interface: "cluster_interface" kolla_cluster_interface: "cluster_interface"
kolla_ceph_storage_interface: "ceph_storage_interface"
kolla_ceph_cluster_interface: "ceph_cluster_interface"
kolla_swift_storage_interface: "swift_storage_interface" kolla_swift_storage_interface: "swift_storage_interface"
kolla_swift_replication_interface: "swift_replication_interface" kolla_swift_replication_interface: "swift_replication_interface"
kolla_provision_interface: "provision_interface" kolla_provision_interface: "provision_interface"
@ -249,8 +245,6 @@ kolla_openstack_logging_debug:
#kolla_enable_blazar: #kolla_enable_blazar:
#kolla_enable_ceilometer: #kolla_enable_ceilometer:
#kolla_enable_central_logging: #kolla_enable_central_logging:
#kolla_enable_ceph:
#kolla_enable_ceph_rgw:
#kolla_enable_cinder: #kolla_enable_cinder:
#kolla_enable_cinder_backend_iscsi: #kolla_enable_cinder_backend_iscsi:
#kolla_enable_cinder_backend_lvm: #kolla_enable_cinder_backend_lvm:

View File

@ -152,9 +152,6 @@ control
[ironic:children] [ironic:children]
control control
[ceph:children]
control
[magnum:children] [magnum:children]
control control

View File

@ -89,25 +89,6 @@ neutron
[ironic-neutron-agent:children] [ironic-neutron-agent:children]
neutron neutron
# Ceph
[ceph-mds:children]
ceph
[ceph-mgr:children]
ceph
[ceph-nfs:children]
ceph
[ceph-mon:children]
ceph
[ceph-rgw:children]
ceph
[ceph-osd:children]
storage
# Cinder # Cinder
[cinder-api:children] [cinder-api:children]
cinder cinder

View File

@ -26,8 +26,6 @@
kolla_provision_interface: "eth8" kolla_provision_interface: "eth8"
kolla_inspector_dnsmasq_interface: "eth9" kolla_inspector_dnsmasq_interface: "eth9"
kolla_tunnel_interface: "eth10" kolla_tunnel_interface: "eth10"
kolla_ceph_storage_interface: "eth11"
kolla_ceph_cluster_interface: "eth12"
kolla_swift_storage_interface: "eth13" kolla_swift_storage_interface: "eth13"
kolla_swift_replication_interface: "eth14" kolla_swift_replication_interface: "eth14"
@ -42,7 +40,6 @@
kolla_neutron_external_interfaces: "eth4,eth5" kolla_neutron_external_interfaces: "eth4,eth5"
kolla_neutron_bridge_names: "br0,br1" kolla_neutron_bridge_names: "br0,br1"
kolla_tunnel_interface: "eth6" kolla_tunnel_interface: "eth6"
kolla_ceph_storage_interface: "eth7"
- name: Create a temporary directory - name: Create a temporary directory
tempfile: tempfile:
@ -129,8 +126,6 @@
kolla_enable_blazar: True kolla_enable_blazar: True
kolla_enable_ceilometer: True kolla_enable_ceilometer: True
kolla_enable_central_logging: True kolla_enable_central_logging: True
kolla_enable_ceph: True
kolla_enable_ceph_rgw: True
kolla_enable_chrony: True kolla_enable_chrony: True
kolla_enable_cinder: True kolla_enable_cinder: True
kolla_enable_cinder_backend_hnas_iscsi: True kolla_enable_cinder_backend_hnas_iscsi: True
@ -266,8 +261,6 @@
#enable_blazar: True #enable_blazar: True
#enable_ceilometer: True #enable_ceilometer: True
#enable_central_logging: True #enable_central_logging: True
#enable_ceph: True
#enable_ceph_rgw: True
#enable_chrony: True #enable_chrony: True
#enable_cinder: True #enable_cinder: True
#enable_cinder_backend_iscsi: True #enable_cinder_backend_iscsi: True
@ -355,8 +348,6 @@
- kolla_external_vip_interface - kolla_external_vip_interface
- storage_interface - storage_interface
- cluster_interface - cluster_interface
- ceph_storage_interface
- ceph_cluster_interface
- swift_storage_interface - swift_storage_interface
- swift_replication_interface - swift_replication_interface
- provision_interface - provision_interface
@ -494,8 +485,6 @@
api_interface: "eth2" api_interface: "eth2"
storage_interface: "eth3" storage_interface: "eth3"
cluster_interface: "eth4" cluster_interface: "eth4"
ceph_storage_interface: "eth11"
ceph_cluster_interface: "eth12"
swift_storage_interface: "eth13" swift_storage_interface: "eth13"
swift_replication_interface: "eth14" swift_replication_interface: "eth14"
provision_interface: "eth8" provision_interface: "eth8"
@ -511,7 +500,6 @@
network_interface: "eth0" network_interface: "eth0"
api_interface: "eth2" api_interface: "eth2"
storage_interface: "eth3" storage_interface: "eth3"
ceph_storage_interface: "eth7"
tunnel_interface: "eth6" tunnel_interface: "eth6"
neutron_external_interface: "eth4,eth5" neutron_external_interface: "eth4,eth5"
neutron_bridge_name: "br0,br1" neutron_bridge_name: "br0,br1"

View File

@ -79,11 +79,6 @@ kolla_feature_flags:
- ceilometer_ipmi - ceilometer_ipmi
- cells - cells
- central_logging - central_logging
- ceph
- ceph_mds
- ceph_rgw
- ceph_nfs
- ceph_dashboard
- chrony - chrony
- cinder - cinder
- cinder_backup - cinder_backup

View File

@ -1,4 +0,0 @@
---
# List of Ceph disks.
ceph_disks: []

View File

@ -1,85 +0,0 @@
---
- name: Ensure required packages are installed
package:
name: parted
state: present
become: True
when: ceph_disks | length > 0
- name: Check the presence of a partition on the OSD disks
become: True
parted:
device: "{{ item.osd }}"
with_items: "{{ ceph_disks }}"
register: "disk_osd_info"
- name: Check the presence of a partition on the journal disks
become: True
parted:
device: "{{ item.journal }}"
with_items: "{{ ceph_disks }}"
register: "disk_journal_info"
when:
- item.journal is defined
- name: Fail if the Ceph OSD disks have already a partition
fail:
msg: >
The physical disk {{ item.item }} already has a partition.
Ensure that each disk in 'ceph_disks' does not have any partitions.
with_items: "{{ disk_osd_info.results }}"
when:
- item.partitions | length > 0
- not item.partitions.0.name.startswith('KOLLA_CEPH')
loop_control:
label: "{{item.item}}"
- name: Fail if the Ceph journal disks have already a partition
fail:
msg: >
The physical disk {{ item.item }} already has a partition.
Ensure that each disk in 'ceph_disks' does not have any partitions.
with_items: "{{ disk_journal_info.results }}"
when:
- item is not skipped
- item.partitions | length > 0
- not item.partitions.0.name.startswith('KOLLA_CEPH')
loop_control:
label: "{{item.item}}"
- name: Create tag partition for Ceph OSD
become: True
parted:
device: "{{ item.item.osd }}"
number: 1
label: gpt
name: "{{ part_label }}"
state: present
with_items: "{{ disk_osd_info.results }}"
when: item.partitions | length == 0
loop_control:
label: "{{item.item}}"
vars:
part_label: "{% if item.item.journal is defined %}{{ part_label_with_journal }}{% else %}KOLLA_CEPH_OSD_BOOTSTRAP{% endif %}"
part_label_with_journal: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}"
osd_id: "{{ item.item.osd | basename }}{{ ansible_hostname }}"
- name: Create tag partition for Ceph external journal
become: True
parted:
device: "{{ item.item.journal }}"
number: 1
label: gpt
name: "{{ part_label }}"
state: present
with_items: "{{ disk_journal_info.results }}"
when:
- item is not skipped
- item.partitions | length == 0
loop_control:
label: "{{item.item}}"
vars:
part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}_J"
osd_id: "{{ item.item.osd | basename }}{{ ansible_hostname }}"

View File

@ -1,6 +0,0 @@
---
# NOTE: Use import_tasks here, since tags are not applied to tasks included via
# include_tasks.
- import_tasks: config.yml
tags:
- config

View File

@ -1,14 +0,0 @@
---
- import_playbook: test-no-journal.yml
- import_playbook: test-journal.yml
- import_playbook: test-bootstrapped-journal.yml
- import_playbook: test-data-journal.yml
- hosts: localhost
connection: local
tasks:
- name: Fail if any tests failed
fail:
msg: >
Test failures: {{ test_failures }}
when: test_failures is defined

View File

@ -1,114 +0,0 @@
---
# Test case with an OSD and external journal that have already been tagged by
# kayobe with the kolla-ansible bootstrap label, but have not yet been
# converted to use the in-use label.
- hosts: localhost
connection: local
tasks:
- name: Allocate a temporary file for a fake OSD
tempfile:
register: osd_tempfile
- name: Allocate a temporary file for a fake journal
tempfile:
register: journal_tempfile
- name: Allocate a fake OSD file
command: fallocate -l 10M {{ osd_tempfile.path }}
- name: Allocate a fake journal file
command: fallocate -l 10M {{ journal_tempfile.path }}
- name: Create tag partition for the fake OSD
become: True
parted:
device: "{{ osd_tempfile.path }}"
number: 1
label: gpt
name: "{{ part_label }}"
state: present
vars:
part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}"
osd_id: "{{ osd_tempfile.path | basename }}{{ ansible_hostname }}"
- name: Create tag partition for the fake journal
become: True
parted:
device: "{{ journal_tempfile.path }}"
number: 1
label: gpt
name: "{{ part_label }}"
state: present
vars:
part_label: "KOLLA_CEPH_OSD_BOOTSTRAP_{{ (osd_id | hash('md5'))[:9] }}_J"
osd_id: "{{ osd_tempfile.path | basename }}{{ ansible_hostname }}"
- block:
- name: Test the kolla-ceph role
include_role:
name: ../../kolla-ceph
vars:
ceph_disks:
- osd: "{{ osd_tempfile.path }}"
journal: "{{ journal_tempfile.path }}"
- name: Get name of fake OSD partition
parted:
device: "{{ osd_tempfile.path }}"
register: "disk_osd_info"
become: True
- name: Validate number of OSD partitions
assert:
that: disk_osd_info.partitions | length == 1
msg: >
Number of OSD partitions is not correct. Expected 1,
actual {{ disk_osd_info.partitions | length }}
- name: Validate OSD tag is present
assert:
that: "disk_osd_info.partitions.0.name == expected"
msg: >
Name of OSD partition is not correct. Expected {{ expected }},
actual {{ disk_osd_info.partitions.0.name }}.
vars:
expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname) | hash('md5'))[:9] }}"
- name: Get name of fake journal partition
parted:
device: "{{ journal_tempfile.path }}"
register: "disk_journal_info"
become: True
- name: Validate number of journal partitions
assert:
that: disk_journal_info.partitions | length == 1
msg: >
Number of journal partitions is not correct. Expected 1,
actual {{ disk_journal_info.partitions | length }}
- name: Validate journal tag is present
assert:
that: "disk_journal_info.partitions.0.name == expected"
msg: >
Name of journal partition is not correct. Expected {{ expected }},
actual {{ disk_journal_info.partitions.0.name }}.
vars:
expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ (( osd_tempfile.path | basename ~ ansible_hostname) | hash('md5'))[:9] ~ '_J' }}"
always:
- name: Remove the fake OSD file
file:
name: "{{ osd_tempfile.path }}"
state: absent
- name: Remove the fake journal file
file:
name: "{{ journal_tempfile.path }}"
state: absent
rescue:
- name: Flag that a failure occurred
set_fact:
test_failures: "{{ test_failures | default(0) | int + 1 }}"

View File

@ -1,113 +0,0 @@
---
# Test case with an OSD and external journal that have been converted by
# kolla-ansible to use the in-use label.
- hosts: localhost
connection: local
tasks:
- name: Allocate a temporary file for a fake OSD
tempfile:
register: osd_tempfile
- name: Allocate a temporary file for a fake journal
tempfile:
register: journal_tempfile
- name: Allocate a fake OSD file
command: fallocate -l 10M {{ osd_tempfile.path }}
- name: Allocate a fake journal file
command: fallocate -l 10M {{ journal_tempfile.path }}
- name: Create tag partition for the fake OSD
become: True
parted:
device: "{{ osd_tempfile.path }}"
number: 1
label: gpt
name: "{{ part_label }}"
state: present
vars:
part_label: "KOLLA_CEPH_DATA_{{ (osd_id | hash('md5'))[:9]}}"
osd_id: "{{ (osd_tempfile.path | basename ~ ansible_hostname) }}"
- name: Create tag partition for the fake journal
become: True
parted:
device: "{{ journal_tempfile.path }}"
number: 1
label: gpt
name: "{{ part_label }}"
state: present
vars:
part_label: "KOLLA_CEPH_DATA_{{ (osd_id | hash('md5'))[:9] }}_J"
osd_id: "{{ (osd_tempfile.path | basename ~ ansible_hostname) }}"
- block:
- name: Test the kolla-ceph role
include_role:
name: ../../kolla-ceph
vars:
ceph_disks:
- osd: "{{ osd_tempfile.path }}"
journal: "{{ journal_tempfile.path }}"
- name: Get name of fake OSD partition
parted:
device: "{{ osd_tempfile.path }}"
register: "disk_osd_info"
become: True
- name: Validate number of OSD partitions
assert:
that: disk_osd_info.partitions | length == 1
msg: >
Number of OSD partitions is not correct. Expected 1,
actual {{ disk_osd_info.partitions | length }}
- name: Validate OSD tag is present
assert:
that: "disk_osd_info.partitions.0.name == expected"
msg: >
Name of OSD partition is not correct. Expected {{ expected }},
actual {{ disk_osd_info.partitions.0.name }}.
vars:
expected: "{{ 'KOLLA_CEPH_DATA_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] }}"
- name: Get name of fake journal partition
parted:
device: "{{ journal_tempfile.path }}"
register: "disk_journal_info"
become: True
- name: Validate number of journal partitions
assert:
that: disk_journal_info.partitions | length == 1
msg: >
Number of journal partitions is not correct. Expected 1,
actual {{ disk_journal_info.partitions | length }}
- name: Validate journal tag is present
assert:
that: "disk_journal_info.partitions.0.name == expected"
msg: >
Name of journal partition is not correct. Expected {{ expected }},
actual {{ disk_journal_info.partitions.0.name }}.
vars:
expected: "{{ 'KOLLA_CEPH_DATA_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] ~ '_J' }}"
always:
- name: Remove the fake OSD file
file:
name: "{{ osd_tempfile.path }}"
state: absent
- name: Remove the fake journal file
file:
name: "{{ journal_tempfile.path }}"
state: absent
rescue:
- name: Flag that a failure occurred
set_fact:
test_failures: "{{ test_failures | default(0) | int + 1 }}"

View File

@ -1,89 +0,0 @@
---
# Test case with an OSD and external journal that have not yet been tagged by
# kayobe with the kolla-ansible bootstrap label.
- hosts: localhost
connection: local
tasks:
- name: Allocate a temporary file for a fake OSD
tempfile:
register: osd_tempfile
- name: Allocate a temporary file for a fake journal
tempfile:
register: journal_tempfile
- name: Allocate a fake OSD file
command: fallocate -l 10M {{ osd_tempfile.path }}
- name: Allocate a fake journal file
command: fallocate -l 10M {{ journal_tempfile.path }}
- block:
- name: Test the kolla-ceph role
include_role:
name: ../../kolla-ceph
vars:
ceph_disks:
- osd: "{{ osd_tempfile.path }}"
journal: "{{ journal_tempfile.path }}"
- name: Get name of fake OSD partition
parted:
device: "{{ osd_tempfile.path }}"
register: "disk_osd_info"
become: True
- name: Validate number of OSD partitions
assert:
that: disk_osd_info.partitions | length == 1
msg: >
Number of OSD partitions is not correct. Expected 1,
actual {{ disk_osd_info.partitions | length }}
- name: Validate OSD tag is present
assert:
that: "disk_osd_info.partitions.0.name == expected"
msg: >
Name of OSD partition is not correct. Expected {{ expected }},
actual {{ disk_osd_info.partitions.0.name }}.
vars:
expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] }}"
- name: Get name of fake journal partition
parted:
device: "{{ journal_tempfile.path }}"
register: "disk_journal_info"
become: True
- name: Validate number of journal partitions
assert:
that: disk_journal_info.partitions | length == 1
msg: >
Number of journal partitions is not correct. Expected 1,
actual {{ disk_journal_info.partitions | length }}
- name: Validate journal tag is present
assert:
that: "disk_journal_info.partitions.0.name == expected"
msg: >
Name of journal partition is not correct. Expected {{ expected }},
actual {{ disk_journal_info.partitions.0.name }}.
vars:
expected: "{{ 'KOLLA_CEPH_OSD_BOOTSTRAP_' ~ ((osd_tempfile.path | basename ~ ansible_hostname)| hash('md5'))[:9] ~ '_J' }}"
always:
- name: Remove the fake OSD file
file:
name: "{{ osd_tempfile.path }}"
state: absent
- name: Remove the fake journal file
file:
name: "{{ journal_tempfile.path }}"
state: absent
rescue:
- name: Flag that a failure occurred
set_fact:
test_failures: "{{ test_failures | default(0) | int + 1 }}"

View File

@ -1,50 +0,0 @@
---
# Test case with an OSD and no external journal that has not yet been tagged by
# kayobe with the kolla-ansible bootstrap label.
- hosts: localhost
connection: local
tasks:
- name: Allocate a temporary file for a fake OSD
tempfile:
register: tempfile
- name: Allocate a fake OSD file
command: fallocate -l 10M {{ tempfile.path }}
- block:
- name: Test the kolla-ceph role
include_role:
name: ../../kolla-ceph
vars:
ceph_disks:
- osd: "{{ tempfile.path }}"
- name: Get name of fake partition
parted:
device: "{{ tempfile.path }}"
register: "disk_osd_info"
become: True
- name: Validate number of partition
assert:
that: disk_osd_info.partitions | length == 1
msg: >
Number of partition is not correct.
- name: Validate OSD tag is present
assert:
that: "disk_osd_info.partitions.0.name == 'KOLLA_CEPH_OSD_BOOTSTRAP'"
msg: >
Name of partition is not correct.
always:
- name: Remove the fake OSD file
file:
name: "{{ tempfile.path }}"
state: absent
rescue:
- name: Flag that a failure occurred
set_fact:
test_failures: "{{ test_failures | default(0) | int + 1 }}"

View File

@ -35,15 +35,6 @@ kolla_enable_ceilometer:
# Free form extra configuration to append to ceilometer.conf. # Free form extra configuration to append to ceilometer.conf.
kolla_extra_ceilometer: kolla_extra_ceilometer:
###############################################################################
# ceph configuration.
# Whether to enable ceph.
kolla_enable_ceph:
# Free form extra configuration to append to ceph.conf.
kolla_extra_ceph:
############################################################################### ###############################################################################
# cinder configuration. # cinder configuration.

View File

@ -37,8 +37,7 @@ def test_service_config_directory(host, path):
@pytest.mark.parametrize( @pytest.mark.parametrize(
'path', 'path',
['ceph', ['cinder',
'cinder',
'cloudkitty', 'cloudkitty',
'designate', 'designate',
'glance', 'glance',

View File

@ -22,10 +22,6 @@ provisioner:
kolla_extra_ceilometer: | kolla_extra_ceilometer: |
[extra-ceilometer.conf] [extra-ceilometer.conf]
foo=bar foo=bar
kolla_enable_ceph: true
kolla_extra_ceph: |
[extra-ceph.conf]
foo=bar
kolla_enable_cinder: true kolla_enable_cinder: true
kolla_extra_cinder: | kolla_extra_cinder: |
[extra-cinder.conf] [extra-cinder.conf]

View File

@ -27,8 +27,7 @@ testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
@pytest.mark.parametrize( @pytest.mark.parametrize(
'path', 'path',
['ceph', ['cinder',
'cinder',
'cloudkitty', 'cloudkitty',
'designate', 'designate',
'fluentd/filter', 'fluentd/filter',
@ -61,8 +60,7 @@ def test_service_config_directory(host, path):
@pytest.mark.parametrize( @pytest.mark.parametrize(
'path', 'path',
['ceph.conf', ['cinder.conf',
'cinder.conf',
'cloudkitty.conf', 'cloudkitty.conf',
'designate.conf', 'designate.conf',
'galera.cnf', 'galera.cnf',

View File

@ -15,7 +15,6 @@
with_items: with_items:
- { src: blazar.conf.j2, dest: blazar.conf, enabled: "{{ kolla_enable_blazar }}" } - { src: blazar.conf.j2, dest: blazar.conf, enabled: "{{ kolla_enable_blazar }}" }
- { src: ceilometer.conf.j2, dest: ceilometer.conf, enabled: "{{ kolla_enable_ceilometer }}" } - { src: ceilometer.conf.j2, dest: ceilometer.conf, enabled: "{{ kolla_enable_ceilometer }}" }
- { src: ceph.conf.j2, dest: ceph.conf, enabled: "{{ kolla_enable_ceph }}" }
- { src: cinder.conf.j2, dest: cinder.conf, enabled: "{{ kolla_enable_cinder }}" } - { src: cinder.conf.j2, dest: cinder.conf, enabled: "{{ kolla_enable_cinder }}" }
- { src: cloudkitty.conf.j2, dest: cloudkitty.conf, enabled: "{{ kolla_enable_cloudkitty }}" } - { src: cloudkitty.conf.j2, dest: cloudkitty.conf, enabled: "{{ kolla_enable_cloudkitty }}" }
- { src: designate.conf.j2, dest: designate.conf, enabled: "{{ kolla_enable_designate }}" } - { src: designate.conf.j2, dest: designate.conf, enabled: "{{ kolla_enable_designate }}" }

View File

@ -1,9 +0,0 @@
# {{ ansible_managed }}
{% if kolla_extra_ceph %}
#######################
# Extra configuration
#######################
{{ kolla_extra_ceph }}
{% endif %}

View File

@ -24,11 +24,6 @@ kolla_openstack_custom_config:
dest: "{{ kolla_node_custom_config_path }}/ceilometer" dest: "{{ kolla_node_custom_config_path }}/ceilometer"
patterns: "*" patterns: "*"
enabled: "{{ kolla_enable_ceilometer }}" enabled: "{{ kolla_enable_ceilometer }}"
# Ceph.
- src: "{{ kolla_extra_config_path }}/ceph"
dest: "{{ kolla_node_custom_config_path }}/ceph"
patterns: "*"
enabled: "{{ kolla_enable_ceph }}"
# Cinder. # Cinder.
- src: "{{ kolla_extra_config_path }}/cinder" - src: "{{ kolla_extra_config_path }}/cinder"
dest: "{{ kolla_node_custom_config_path }}/cinder" dest: "{{ kolla_node_custom_config_path }}/cinder"

View File

@ -674,32 +674,3 @@ Ansible's containers do), but may be necessary when building images.
Docker's live restore feature can be configured via Docker's live restore feature can be configured via
``docker_daemon_live_restore``, although it is disabled by default due to ``docker_daemon_live_restore``, although it is disabled by default due to
issues observed. issues observed.
Ceph Block Devices
==================
*tags:*
| ``kolla-ceph``
If using Kolla Ansible to deploy Ceph, some preparation of block devices is
required. The list of disks to configure for use by Ceph is specified via
``ceph_disks``. This is mapped to the following variables:
* ``compute_ceph_disks``
* ``controller_ceph_disks``
* ``storage_ceph_disks``
The format of the variable is a list of dict/mapping objects. Each mapping
should contain an ``osd`` item that defines the full path to a block device to
use for data. Optionally, each mapping may contain a ``journal`` item that
specifies the full path to a block device to use for journal data.
The following example defines two OSDs for use by controllers, one of which has
a journal:
.. code-block:: yaml
:caption: ``controller.yml``
controller_ceph_disks:
- osd: /dev/sdb
- osd: /dev/sdc
journal: /dev/sdd

View File

@ -427,8 +427,6 @@ which files are supported.
``blazar/*`` Extended Blazar configuration. ``blazar/*`` Extended Blazar configuration.
``ceilometer.conf`` Ceilometer configuration. ``ceilometer.conf`` Ceilometer configuration.
``ceilometer/*`` Extended Ceilometer configuration. ``ceilometer/*`` Extended Ceilometer configuration.
``ceph.conf`` Ceph configuration.
``ceph/*`` Extended Ceph configuration.
``cinder.conf`` Cinder configuration. ``cinder.conf`` Cinder configuration.
``cinder/*`` Extended Cinder configuration. ``cinder/*`` Extended Cinder configuration.
``cloudkitty.conf`` CloudKitty configuration. ``cloudkitty.conf`` CloudKitty configuration.

View File

@ -506,12 +506,6 @@ Storage network (``storage_net_name``)
Name of the network used to carry storage data traffic. Name of the network used to carry storage data traffic.
Storage management network (``storage_mgmt_net_name``) Storage management network (``storage_mgmt_net_name``)
Name of the network used to carry storage management traffic. Name of the network used to carry storage management traffic.
Ceph storage network (``ceph_storage_net_name``)
Name of the network used to carry Ceph storage data traffic.
Defaults to the storage network (``storage_net_name``).
Ceph storage management network (``ceph_storage_mgmt_net_name``)
Name of the network used to carry storage management traffic.
Defaults to the storage management network (``storage_mgmt_net_name``)
Swift storage network (``swift_storage_net_name``) Swift storage network (``swift_storage_net_name``)
Name of the network used to carry Swift storage data traffic. Name of the network used to carry Swift storage data traffic.
Defaults to the storage network (``storage_net_name``). Defaults to the storage network (``storage_net_name``).
@ -544,8 +538,6 @@ To configure network roles in a system with two networks, ``example1`` and
external_net_name: example2 external_net_name: example2
storage_net_name: example2 storage_net_name: example2
storage_mgmt_net_name: example2 storage_mgmt_net_name: example2
ceph_storage_net_name: example2
ceph_storage_mgmt_net_name: example2
swift_storage_net_name: example2 swift_storage_net_name: example2
swift_replication_net_name: example2 swift_replication_net_name: example2
inspection_net_name: example2 inspection_net_name: example2
@ -789,8 +781,8 @@ By default, the storage hosts are attached to the following networks:
* storage network * storage network
* storage management network * storage management network
In addition, if Ceph or Swift is enabled, they can also be attached to the Ceph and Swift In addition, if Swift is enabled, they can also be attached to the Swift
mangagment and replication networks. management and replication networks.
Virtualised Compute Hosts Virtualised Compute Hosts
------------------------- -------------------------

View File

@ -59,7 +59,6 @@ hosts in the ``monitoring`` group.
``ansible_user`` Username with which to access the host via SSH. ``ansible_user`` Username with which to access the host via SSH.
``bootstrap_user`` Username with which to access the host before ``bootstrap_user`` Username with which to access the host before
``ansible_user`` is configured. ``ansible_user`` is configured.
``ceph_disks`` List of Ceph disks.
``lvm_groups`` List of LVM volume groups to configure. See ``lvm_groups`` List of LVM volume groups to configure. See
`mrlesmithjr.manage-lvm role `mrlesmithjr.manage-lvm role
<https://galaxy.ansible.com/mrlesmithjr/manage-lvm/>`_ <https://galaxy.ansible.com/mrlesmithjr/manage-lvm/>`_
@ -189,7 +188,6 @@ providing the necessary variables for a control plane host.
ansible_user: "{{ kayobe_ansible_user }}" ansible_user: "{{ kayobe_ansible_user }}"
bootstrap_user: "{{ controller_bootstrap_user }}" bootstrap_user: "{{ controller_bootstrap_user }}"
ceph_disks: "{{ controller_ceph_disks }}"
lvm_groups: "{{ controller_lvm_groups }}" lvm_groups: "{{ controller_lvm_groups }}"
mdadm_arrays: "{{ controller_mdadm_arrays }}" mdadm_arrays: "{{ controller_mdadm_arrays }}"
network_interfaces: "{{ controller_network_host_network_interfaces }}" network_interfaces: "{{ controller_network_host_network_interfaces }}"

View File

@ -1,11 +0,0 @@
---
###############################################################################
# OpenStack Ceph configuration.
# Ansible host pattern matching hosts on which Ceph storage services
# are deployed. The default is to use hosts in the 'storage' group.
#ceph_hosts:
###############################################################################
# Dummy variable to allow Ansible to accept this file.
workaround_ansible_issue_8743: yes

View File

@ -89,16 +89,6 @@
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
#compute_lvm_group_data_lv_docker_volumes_fs: #compute_lvm_group_data_lv_docker_volumes_fs:
###############################################################################
# Compute node Ceph configuration.
# List of Ceph disks.
# The format is a list of dict like :
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
# - { osd: "/dev/sdd" }
# Journal variable is not mandatory.
#compute_ceph_disks:
############################################################################### ###############################################################################
# Compute node sysctl configuration. # Compute node sysctl configuration.

View File

@ -92,16 +92,6 @@
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
#controller_lvm_group_data_lv_docker_volumes_fs: #controller_lvm_group_data_lv_docker_volumes_fs:
###############################################################################
# Controller node Ceph configuration.
# List of Ceph disks.
# The format is a list of dict like :
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
# - { osd: "/dev/sdd" }
# Journal variable is not mandatory.
#controller_ceph_disks:
############################################################################### ###############################################################################
# Controller node sysctl configuration. # Controller node sysctl configuration.

View File

@ -22,11 +22,6 @@
# storage_net_bridge_ports: # storage_net_bridge_ports:
# storage_net_bond_slaves: # storage_net_bond_slaves:
# Ceph storage network IP information.
# ceph_storage_net_interface:
# ceph_storage_net_bridge_ports:
# ceph_storage_net_bond_slaves:
############################################################################### ###############################################################################
# Dummy variable to allow Ansible to accept this file. # Dummy variable to allow Ansible to accept this file.
workaround_ansible_issue_8743: yes workaround_ansible_issue_8743: yes

View File

@ -27,11 +27,6 @@
# storage_net_bridge_ports: # storage_net_bridge_ports:
# storage_net_bond_slaves: # storage_net_bond_slaves:
# Storage network IP information.
# ceph_storage_net_interface:
# ceph_storage_net_bridge_ports:
# ceph_storage_net_bond_slaves:
# Storage management network IP information. # Storage management network IP information.
# swift_storage_net_interface: # swift_storage_net_interface:
# swift_storage_net_bridge_ports: # swift_storage_net_bridge_ports:

View File

@ -22,16 +22,6 @@
# storage_mgmt_net_bridge_ports: # storage_mgmt_net_bridge_ports:
# storage_mgmt_net_bond_slaves: # storage_mgmt_net_bond_slaves:
# Ceph storage network IP information.
# ceph_storage_net_interface:
# ceph_storage_net_bridge_ports:
# ceph_storage_net_bond_slaves:
# Ceph storage management network IP information.
# ceph_storage_mgmt_net_interface:
# ceph_storage_mgmt_net_bridge_ports:
# ceph_storage_mgmt_net_bond_slaves:
# Swift storage network IP information. # Swift storage network IP information.
# swift_storage_net_interface: # swift_storage_net_interface:
# swift_storage_net_bridge_ports: # swift_storage_net_bridge_ports:

View File

@ -181,10 +181,6 @@
#kolla_enable_cadf_notifications: #kolla_enable_cadf_notifications:
#kolla_enable_ceilometer: #kolla_enable_ceilometer:
#kolla_enable_central_logging: #kolla_enable_central_logging:
#kolla_enable_ceph:
#kolla_enable_ceph_mds:
#kolla_enable_ceph_nfs:
#kolla_enable_ceph_rgw:
# The chrony container is disabled by default on CentOS 7 because we enable an # The chrony container is disabled by default on CentOS 7 because we enable an
# NTP daemon on the host. On CentOS 8 the chrony container is enabled by # NTP daemon on the host. On CentOS 8 the chrony container is enabled by
# default because the NTP daemon is not supported. Setting this to true will # default because the NTP daemon is not supported. Setting this to true will

View File

@ -45,12 +45,6 @@
# Name of the network used to carry storage management traffic. # Name of the network used to carry storage management traffic.
#storage_mgmt_net_name: #storage_mgmt_net_name:
# Name of the network used to carry ceph storage data traffic.
#ceph_storage_net_name:
# Name of the network used to carry ceph storage management traffic.
#ceph_storage_mgmt_net_name:
# Name of the network used to carry swift storage data traffic. # Name of the network used to carry swift storage data traffic.
#swift_storage_net_name: #swift_storage_net_name:

View File

@ -18,11 +18,6 @@
# List of extra networks to which storage nodes are attached. # List of extra networks to which storage nodes are attached.
#storage_extra_network_interfaces: #storage_extra_network_interfaces:
# Whether this host requires access to Ceph networks.
#storage_needs_ceph_network:
#storage_needs_ceph_mgmt_network:
# Whether this host requires access to Swift networks. # Whether this host requires access to Swift networks.
#storage_needs_swift_network: #storage_needs_swift_network:
@ -99,16 +94,6 @@
# Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking. # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
#storage_lvm_group_data_lv_docker_volumes_fs: #storage_lvm_group_data_lv_docker_volumes_fs:
###############################################################################
# Storage node Ceph configuration.
# List of Ceph disks.
# The format is a list of dict like :
# - { osd: "/dev/sdb", journal: "/dev/sdc" }
# - { osd: "/dev/sdd" }
# Journal variable is not mandatory.
#storage_ceph_disks:
############################################################################### ###############################################################################
# Storage node sysctl configuration. # Storage node sysctl configuration.

View File

@ -977,7 +977,7 @@ class OvercloudHostConfigure(KollaAnsibleMixin, KayobeAnsibleMixin, VaultMixin,
# Further kayobe playbooks. # Further kayobe playbooks.
playbooks = _build_playbook_list( playbooks = _build_playbook_list(
"pip", "kolla-target-venv", "kolla-host", "pip", "kolla-target-venv", "kolla-host",
"docker", "ceph-block-devices", "swift-block-devices") "docker", "swift-block-devices")
self.run_kayobe_playbooks(parsed_args, playbooks, self.run_kayobe_playbooks(parsed_args, playbooks,
extra_vars=extra_vars, limit="overcloud") extra_vars=extra_vars, limit="overcloud")

View File

@ -1156,8 +1156,6 @@ class TestCase(unittest.TestCase):
"ansible", "kolla-target-venv.yml"), "ansible", "kolla-target-venv.yml"),
utils.get_data_files_path("ansible", "kolla-host.yml"), utils.get_data_files_path("ansible", "kolla-host.yml"),
utils.get_data_files_path("ansible", "docker.yml"), utils.get_data_files_path("ansible", "docker.yml"),
utils.get_data_files_path(
"ansible", "ceph-block-devices.yml"),
utils.get_data_files_path( utils.get_data_files_path(
"ansible", "swift-block-devices.yml"), "ansible", "swift-block-devices.yml"),
], ],

View File

@ -0,0 +1,8 @@
---
upgrade:
- |
Drops support for Kolla Ceph deployment.
Kayobe follows upstream decision of Kolla and Kolla Ansible.
Please use other means of Ceph deployment.
Please note Ceph backends will still work if using external Ceph
cluster.