CEPH support for 2 node configuration

In order to enable Openstack's helm charts on StarlingX we need
a distributed persistent storage for Kubernetes that leverages
our existing configurations.

Changes made:
- allow CEPH to be configured on a 2 node configuration
  with a single floating monitor.
- floating monitor is managed by SM.
- the CEPH monitor filesystem is DRBD replicated between
  the two controller nodes
- add ceph crushmap for two node setup; both controllers are
  in the same group and redundancy is created between the
  two nodes
- only replication 2 is supported

Change-Id: Ic97b9fafa752a40befe395be2cafd3096010cc5b
Co-Authored-By: Stefan Dinescu <stefan.dinescu@windriver.com>
Depends-On: I8f9ea4798070e08171ad73da39821bc20b7af231
Story: 2002844
Task: 26878
Signed-off-by: Stefan Dinescu <stefan.dinescu@windriver.com>
This commit is contained in:
Ovidiu Poncea
2018-11-15 11:44:37 +00:00
committed by Stefan Dinescu
parent 977112e99e
commit 4b004e1d49
17 changed files with 383 additions and 103 deletions

View File

@@ -25,7 +25,6 @@ class openstack::cinder::params (
$initial_cinder_lvm_config_flag = "${::platform::params::config_path}/.initial_cinder_lvm_config_complete",
$initial_cinder_ceph_config_flag = "${::platform::params::config_path}/.initial_cinder_ceph_config_complete",
$node_cinder_lvm_config_flag = '/etc/platform/.node_cinder_lvm_config_complete',
$node_cinder_ceph_config_flag = '/etc/platform/.node_cinder_ceph_config_complete',
) {
$cinder_disk = regsubst($cinder_device, '-part\d+$', '')
@@ -75,16 +74,8 @@ class openstack::cinder::params (
} else {
$is_initial_cinder_ceph = false
}
# Check if we should configure/reconfigure cinder LVM for this node.
# True in case of node reinstalls etc.
if str2bool($::is_node_cinder_ceph_config) {
$is_node_cinder_ceph = true
} else {
$is_node_cinder_ceph = false
}
} else {
$is_initial_cinder_ceph = false
$is_node_cinder_ceph = false
}
# Cinder needs to be running on initial configuration of either Ceph or LVM
@@ -727,12 +718,6 @@ class openstack::cinder::post
}
}
if $is_node_cinder_ceph {
file { $node_cinder_ceph_config_flag:
ensure => present
}
}
# cinder-api needs to be running in order to apply the cinder manifest,
# however, it needs to be stopped/disabled to allow SM to manage the service.
# To allow for the transition it must be explicitly stopped. Once puppet

View File

@@ -0,0 +1,7 @@
# Returns true if Ceph has been configured on current node
Facter.add("is_node_ceph_configured") do
setcode do
File.exist?('/etc/platform/.node_ceph_configured')
end
end

View File

@@ -1,7 +0,0 @@
# Returns true if cinder Ceph needs to be configured on current node
Facter.add("is_node_cinder_ceph_config") do
setcode do
! File.exist?('/etc/platform/.node_cinder_ceph_config_complete')
end
end

View File

@@ -8,6 +8,9 @@ class platform::ceph::params(
$mon_fs_type = 'ext4',
$mon_fs_options = ' ',
$mon_mountpoint = '/var/lib/ceph/mon',
$floating_mon_host = undef,
$floating_mon_ip = undef,
$floating_mon_addr = undef,
$mon_0_host = undef,
$mon_0_ip = undef,
$mon_0_addr = undef,
@@ -35,6 +38,7 @@ class platform::ceph::params(
$restapi_public_addr = undef,
$configure_ceph_mon_info = false,
$ceph_config_ready_path = '/var/run/.ceph_started',
$node_ceph_configured_flag = '/etc/platform/.node_ceph_configured',
) { }
@@ -44,10 +48,17 @@ class platform::ceph
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $service_enabled or $configure_ceph_mon_info {
if $system_type == 'All-in-one' and 'simplex' in $system_mode {
# Allow 1 node configurations to work with a single monitor
$mon_initial_members = $mon_0_host
# Set the minimum set of monitors that form a valid cluster
if $system_type == 'All-in-one' {
if $system_mode == 'simplex' {
# 1 node configuration, a single monitor is available
$mon_initial_members = $mon_0_host
} else {
# 2 node configuration, we have a floating monitor
$mon_initial_members = $floating_mon_host
}
} else {
# Multinode, any 2 monitors form a cluster
$mon_initial_members = undef
}
@@ -58,21 +69,31 @@ class platform::ceph
} ->
ceph_config {
"mon/mon clock drift allowed": value => ".1";
"mon.${mon_0_host}/host": value => $mon_0_host;
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
"client.restapi/public_addr": value => $restapi_public_addr;
}
if $system_type == 'All-in-one' {
# 1 and 2 node configurations have a single monitor
if 'duplex' in $system_mode {
# Floating monitor, running on active controller.
Class['::ceph'] ->
ceph_config {
"mon.${mon_1_host}/host": value => $mon_1_host;
"mon.${mon_1_host}/mon_addr": value => $mon_1_addr;
"mon.${floating_mon_host}/host": value => $floating_mon_host;
"mon.${floating_mon_host}/mon_addr": value => $floating_mon_addr;
}
} else {
# Simplex case, a single monitor binded to the controller.
Class['::ceph'] ->
ceph_config {
"mon.${mon_0_host}/host": value => $mon_0_host;
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
}
}
} else {
# Multinode has 3 monitors.
Class['::ceph'] ->
ceph_config {
"mon.${mon_0_host}/host": value => $mon_0_host;
"mon.${mon_0_host}/mon_addr": value => $mon_0_addr;
"mon.${mon_1_host}/host": value => $mon_1_host;
"mon.${mon_1_host}/mon_addr": value => $mon_1_addr;
"mon.${mon_2_host}/host": value => $mon_2_host;
@@ -86,44 +107,79 @@ class platform::ceph
}
class platform::ceph::post {
include ::platform::ceph::params
class platform::ceph::post
inherits ::platform::ceph::params {
# Enable ceph process recovery after all configuration is done
file { $::platform::ceph::params::ceph_config_ready_path:
file { $ceph_config_ready_path:
ensure => present,
content => '',
owner => 'root',
group => 'root',
mode => '0644',
}
if $service_enabled {
file { $node_ceph_configured_flag:
ensure => present
}
}
}
class platform::ceph::monitor
inherits ::platform::ceph::params {
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if $service_enabled {
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
if str2bool($::is_controller_active) {
# Ceph mon is configured on a DRBD partition, on the active controller,
# when 'ceph' storage backend is added in sysinv.
# Then SM takes care of starting ceph after manifests are applied.
$configure_ceph_mon = true
} else {
$configure_ceph_mon = false
}
} else {
# Simplex, multinode. Ceph is pmon managed.
$configure_ceph_mon = true
}
}
else {
$configure_ceph_mon = false
}
if $configure_ceph_mon {
file { '/var/lib/ceph':
ensure => 'directory',
owner => 'root',
group => 'root',
mode => '0755',
} ->
}
platform::filesystem { $mon_lv_name:
lv_name => $mon_lv_name,
lv_size => $mon_lv_size,
mountpoint => $mon_mountpoint,
fs_type => $mon_fs_type,
fs_options => $mon_fs_options,
} -> Class['::ceph']
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
# ensure DRBD config is complete before enabling the ceph monitor
Drbd::Resource <| |> -> Class['::ceph']
} else {
File['/var/lib/ceph'] ->
platform::filesystem { $mon_lv_name:
lv_name => $mon_lv_name,
lv_size => $mon_lv_size,
mountpoint => $mon_mountpoint,
fs_type => $mon_fs_type,
fs_options => $mon_fs_options,
} -> Class['::ceph']
file { "/etc/pmon.d/ceph.conf":
ensure => link,
target => "/etc/ceph/ceph.conf.pmon",
owner => 'root',
group => 'root',
mode => '0640',
file { "/etc/pmon.d/ceph.conf":
ensure => link,
target => "/etc/ceph/ceph.conf.pmon",
owner => 'root',
group => 'root',
mode => '0640',
}
}
# ensure configuration is complete before creating monitors
@@ -131,9 +187,7 @@ class platform::ceph::monitor
# Start service on AIO SX and on active controller
# to allow in-service configuration.
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
if str2bool($::is_controller_active) or ($system_type == 'All-in-one' and $system_mode == 'simplex') {
if str2bool($::is_controller_active) or $system_type == 'All-in-one' {
$service_ensure = "running"
} else {
$service_ensure = "stopped"
@@ -146,19 +200,53 @@ class platform::ceph::monitor
service_ensure => $service_ensure,
}
if $::hostname == $mon_0_host {
ceph::mon { $mon_0_host:
public_addr => $mon_0_ip,
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
ceph::mon { $floating_mon_host:
public_addr => $floating_mon_ip,
}
}
elsif $::hostname == $mon_1_host {
ceph::mon { $mon_1_host:
public_addr => $mon_1_ip,
if (str2bool($::is_controller_active) and
str2bool($::is_initial_cinder_ceph_config) and
!str2bool($::is_standalone_controller)) {
# When we configure ceph after both controllers are active,
# we need to stop the monitor, unmount the monitor partition
# and set the drbd role to secondary, so that the handoff to
# SM is done properly once we swact to the standby controller.
# TODO: Remove this once SM supports in-service config reload.
Ceph::Mon <| |> ->
exec { "Stop Ceph monitor":
command =>"/etc/init.d/ceph stop mon",
onlyif => "/etc/init.d/ceph status mon",
logoutput => true,
} ->
exec { "umount ceph-mon partition":
command => "umount $mon_mountpoint",
onlyif => "mount | grep -q $mon_mountpoint",
logoutput => true,
} ->
exec { 'Set cephmon secondary':
command => "drbdadm secondary drbd-cephmon",
unless => "drbdadm role drbd-cephmon | egrep '^Secondary'",
logoutput => true,
}
}
} else {
if $::hostname == $mon_0_host {
ceph::mon { $mon_0_host:
public_addr => $mon_0_ip,
}
}
}
elsif $::hostname == $mon_2_host {
ceph::mon { $mon_2_host:
public_addr => $mon_2_ip,
elsif $::hostname == $mon_1_host {
ceph::mon { $mon_1_host:
public_addr => $mon_1_ip,
}
}
elsif $::hostname == $mon_2_host {
ceph::mon { $mon_2_host:
public_addr => $mon_2_ip,
}
}
}
}

View File

@@ -394,6 +394,64 @@ class platform::drbd::dockerdistribution ()
}
}
class platform::drbd::cephmon::params (
$device = '/dev/drbd9',
$lv_name = 'ceph-mon-lv',
$mountpoint = '/var/lib/ceph/mon',
$port = '7788',
$resource_name = 'drbd-cephmon',
$vg_name = 'cgts-vg',
) {}
class platform::drbd::cephmon ()
inherits ::platform::drbd::cephmon::params {
include ::platform::ceph::params
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
#TODO: This will change once we remove the native cinder service
if (str2bool($::is_initial_config_primary) or
(str2bool($::is_controller_active) and str2bool($::is_initial_cinder_ceph_config))
){
# Active controller, first time configuration.
$drbd_primary = true
$drbd_initial = true
$drbd_automount = true
} elsif str2bool($::is_standalone_controller){
# Active standalone controller, successive reboots.
$drbd_primary = true
$drbd_initial = undef
$drbd_automount = true
} else {
# Node unlock, reboot or standby configuration
# Do not mount ceph
$drbd_primary = undef
$drbd_initial = undef
$drbd_automount = undef
}
if ($::platform::ceph::params::service_enabled and
$system_type == 'All-in-one' and 'duplex' in $system_mode) {
platform::drbd::filesystem { $resource_name:
vg_name => $vg_name,
lv_name => $lv_name,
lv_size => $::platform::ceph::params::mon_lv_size,
port => $port,
device => $device,
mountpoint => $mountpoint,
resync_after => undef,
manage_override => true,
ha_primary_override => $drbd_primary,
initial_setup_override => $drbd_initial,
automount_override => $drbd_automount,
} -> Class['::ceph']
}
}
class platform::drbd(
$service_enable = false,
$service_ensure = 'stopped',
@@ -427,6 +485,7 @@ class platform::drbd(
include ::platform::drbd::patch_vault
include ::platform::drbd::etcd
include ::platform::drbd::dockerdistribution
include ::platform::drbd::cephmon
# network changes need to be applied prior to DRBD resources
Anchor['platform::networking'] ->
@@ -498,3 +557,8 @@ class platform::drbd::dockerdistribution::runtime {
include ::platform::drbd::params
include ::platform::drbd::dockerdistribution
}
class platform::drbd::cephmon::runtime {
include ::platform::drbd::params
include ::platform::drbd::cephmon
}

View File

@@ -13,6 +13,7 @@ class platform::sm
$region_config = $::platform::params::region_config
$region_2_name = $::platform::params::region_2_name
$system_mode = $::platform::params::system_mode
$system_type = $::platform::params::system_type
include ::platform::network::pxeboot::params
if $::platform::network::pxeboot::params::interface_name {
@@ -79,6 +80,11 @@ class platform::sm
$dockerdistribution_fs_device = $::platform::drbd::dockerdistribution::params::device
$dockerdistribution_fs_directory = $::platform::drbd::dockerdistribution::params::mountpoint
include ::platform::drbd::cephmon::params
$cephmon_drbd_resource = $::platform::drbd::cephmon::params::resource_name
$cephmon_fs_device = $::platform::drbd::cephmon::params::device
$cephmon_fs_directory = $::platform::drbd::cephmon::params::mountpoint
include ::openstack::keystone::params
$keystone_api_version = $::openstack::keystone::params::api_version
$keystone_identity_uri = $::openstack::keystone::params::identity_uri
@@ -1376,7 +1382,46 @@ class platform::sm
}
if $ceph_configured {
# Ceph-Rest-API
if $system_type == 'All-in-one' and 'duplex' in $system_mode {
exec { 'Provision Cephmon FS in SM (service-group-member cephmon-fs)':
command => "sm-provision service-group-member controller-services cephmon-fs",
} ->
exec { 'Provision Cephmon FS in SM (service cephmon-fs)':
command => "sm-provision service cephmon-fs",
} ->
exec { 'Provision Cephmon DRBD in SM (service-group-member drbd-cephmon':
command => "sm-provision service-group-member controller-services drbd-cephmon",
} ->
exec { 'Provision Cephmon DRBD in SM (service drbd-cephmon)':
command => "sm-provision service drbd-cephmon",
} ->
exec { 'Configure Cephmon DRBD':
command => "sm-configure service_instance drbd-cephmon drbd-cephmon:${hostunit} \"drbd_resource=${cephmon_drbd_resource}\"",
} ->
exec { 'Configure Cephmon FileSystem':
command => "sm-configure service_instance cephmon-fs cephmon-fs \"device=${cephmon_fs_device},directory=${cephmon_fs_directory},options=noatime,nodiratime,fstype=ext4,check_level=20\"",
} ->
exec { 'Configure cephmon':
command => "sm-configure service_instance ceph-mon ceph-mon \"\"",
} ->
exec { 'Provision cephmon (service-group-member)':
command => "sm-provision service-group-member controller-services ceph-mon",
} ->
exec { 'Provision cephmon (service)':
command => "sm-provision service ceph-mon",
} ->
exec { 'Configure ceph-osd':
command => "sm-configure service_instance ceph-osd ceph-osd \"\"",
} ->
exec { 'Provision ceph-osd (service-group-member)':
command => "sm-provision service-group-member storage-services ceph-osd",
} ->
exec { 'Provision ceph-osd (service)':
command => "sm-provision service ceph-osd",
}
}
# Ceph-Rest-Api
exec { 'Provision Ceph-Rest-Api (service-domain-member storage-services)':
command => "sm-provision service-domain-member controller storage-services",
} ->