From 3422f3c244cd80c7c207bbde9dcc706214c2f1dc Mon Sep 17 00:00:00 2001 From: Guillaume Thouvenin Date: Mon, 9 Mar 2015 15:37:49 +0100 Subject: [PATCH] Create disk partitions on Ubuntu. Now we create a partition /dev/sdb1 instead of using the whole disk. Also as we need to reboot the node after modifying disk partitions we moved from fpb 1.0.0 to fpb 2.0.0. Change-Id: I8b5bdde546858d1e4ad9fc30719415453a7268ab --- .../puppet/manifests/setup_disks.pp | 10 ++-- .../puppet/manifests/setup_esdir.pp | 21 +++++++ .../disk_management/files/add_partition.sh | 41 +++++++++++++ .../files/add_partition_on_raid.sh | 39 ------------ .../lib/facter/unallocated_pvs.rb | 26 ++++---- .../modules/disk_management/manifests/init.pp | 60 ++++--------------- .../disk_management/manifests/params.pp | 5 ++ .../disk_management/manifests/partition.pp | 32 ++-------- environment_config.yaml | 7 +-- metadata.yaml | 11 +++- tasks.yaml | 14 +++++ 11 files changed, 130 insertions(+), 136 deletions(-) create mode 100644 deployment_scripts/puppet/manifests/setup_esdir.pp create mode 100644 deployment_scripts/puppet/modules/disk_management/files/add_partition.sh delete mode 100644 deployment_scripts/puppet/modules/disk_management/files/add_partition_on_raid.sh create mode 100644 deployment_scripts/puppet/modules/disk_management/manifests/params.pp diff --git a/deployment_scripts/puppet/manifests/setup_disks.pp b/deployment_scripts/puppet/manifests/setup_disks.pp index 0b62736..79f11d1 100644 --- a/deployment_scripts/puppet/manifests/setup_disks.pp +++ b/deployment_scripts/puppet/manifests/setup_disks.pp @@ -3,11 +3,11 @@ $fuel_settings = parseyaml(file('/etc/astute.yaml')) if $fuel_settings['elasticsearch_kibana']['node_name'] == $fuel_settings['user_node_name'] { $disks = regsubst($fuel_settings['elasticsearch_kibana']['dedicated_disks'], '([a-z]+)', '/dev/\1', 'G') + $array_disks = split($disks, ',') - class { 'disk_management': - disks => split($disks, ','), - directory => $fuel_settings['elasticsearch_kibana']['data_dir'], - lv_name => "es", - vg_name => "data", + class { 'disk_management': } + + disk_management::partition { $array_disks: + require => Class['disk_management'] } } diff --git a/deployment_scripts/puppet/manifests/setup_esdir.pp b/deployment_scripts/puppet/manifests/setup_esdir.pp new file mode 100644 index 0000000..1660877 --- /dev/null +++ b/deployment_scripts/puppet/manifests/setup_esdir.pp @@ -0,0 +1,21 @@ +$fuel_settings = parseyaml(file('/etc/astute.yaml')) + +if $fuel_settings['elasticsearch_kibana']['node_name'] == $fuel_settings['user_node_name'] { + + $directory = $fuel_settings['elasticsearch_kibana']['data_dir'] + $disks = split($::unallocated_pvs, ',') + + validate_array($disks) + + if empty($disks) { + file { $directory: + ensure => "directory", + } + } else { + disk_management::lvm_fs { $directory: + disks => $disks, + lv_name => "es", + vg_name => "data", + } + } +} diff --git a/deployment_scripts/puppet/modules/disk_management/files/add_partition.sh b/deployment_scripts/puppet/modules/disk_management/files/add_partition.sh new file mode 100644 index 0000000..529331d --- /dev/null +++ b/deployment_scripts/puppet/modules/disk_management/files/add_partition.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Use this script if you want to allocate a new partition. +# Ubuntu and CentOS are not configured the same way by Fuel. CentOS is doing +# RAID 1 with /boot on all disks so we need to deal with that. + +# $1 -> The disk (example: "/dev/sdb") + +set -eux + +DISK=$1 +PARTED="$(which parted 2>/dev/null) -s -m" + +if ${PARTED} ${DISK} p | grep -q "unrecognised disk label"; then + # We need to create a new label + ${PARTED} ${DISK} mklabel gpt +fi + +# We take the free space at the end of the disk. +FREESPACE=$(${PARTED} ${DISK} unit s p free | grep "free" | tail -1 | awk -F: '{print $2, $3}') +if [[ -z "${FREESPACE}" ]]; then + echo "Failed to find free space" + exit 1 +fi + +# If you create a partition on a mounted disk, this command returns 1 +# So we need a different way to catch the error +if ${PARTED} ${DISK} unit s mkpart primary ${FREESPACE} | grep -q "^Error"; then + echo "Failed to create a new primary partition" + exit 1 +fi + +# Get the ID of the partition and set flags to LVM +# Like when we create a new partition, if you run this command on a mounted +# FS the kernel failed to re-read the partition and the command returns 1 +# event in case of success. +PARTID=$(${PARTED} ${DISK} p | tail -1 | awk -F: {'print $1'}) +if ${PARTED} ${DISK} set ${PARTID} lvm on | grep -q "^Error"; then + echo "Failed to set the lvm flag on partition ${PARTID}." + exit 1 +fi diff --git a/deployment_scripts/puppet/modules/disk_management/files/add_partition_on_raid.sh b/deployment_scripts/puppet/modules/disk_management/files/add_partition_on_raid.sh deleted file mode 100644 index e10fdd4..0000000 --- a/deployment_scripts/puppet/modules/disk_management/files/add_partition_on_raid.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash - -# Use this script if you want to allocate a new partition that is already used -# in RAID. It is the case for example with the current deployment of CentOS. - -# $1 is the disk (for example: /dev/sdc) -# $2 is the raid : default is "/dev/md0" - -set -eux - -DISK=$1 -RAID=${2:-/dev/md0} - -MDADM=$(which mdadm 2>/dev/null) -PARTED=$(which parted 2>/dev/null) -PARTPROBE=$(which partprobe 2>/dev/null) - -function add_new_partition { - FREESPACE=$(${PARTED} "$1" unit s p free | grep "Free Space" | awk '{print $1, $2}') - if [[ -z "${FREESPACE}" ]] - then - echo "Failed to find free space" - exit 1 - fi - - ${PARTED} -s -- $1 unit s mkpart primary ${FREESPACE} &> /dev/null -} - -# Get the partition involved into RAID. -PARTITION=$(${MDADM} -D ${RAID} | grep "active" | grep ${DISK} | awk '{print $7}') - -# Remove the partition from RAID. -$MDADM $RAID --fail $PARTITION --remove $PARTITION &>/dev/null - -# Create a new partition -add_new_partition $DISK - -# Add the partition that belongs to the raid. -$MDADM --add $RAID $PARTITION diff --git a/deployment_scripts/puppet/modules/disk_management/lib/facter/unallocated_pvs.rb b/deployment_scripts/puppet/modules/disk_management/lib/facter/unallocated_pvs.rb index 7182afa..876d5b2 100644 --- a/deployment_scripts/puppet/modules/disk_management/lib/facter/unallocated_pvs.rb +++ b/deployment_scripts/puppet/modules/disk_management/lib/facter/unallocated_pvs.rb @@ -6,19 +6,21 @@ devices = Dir.entries('/sys/block/').select do |d| File.exist?( "/sys/block/#{ d }/device" ) end -devices.each do |device| - device = "/dev/#{ device }" - # Filter only partitions flagged as LVM - lvm_partitions = Facter::Util::Resolution.exec( - "parted -s -m #{ device } print 2>/dev/null").scan(/^(\d+):.+:lvm;$/).flatten - lvm_partitions.each do |x| - # Filter only partitions which haven't been created yet - pvs = Facter::Util::Resolution.exec( - "pvs --noheadings #{ device }#{ x } 2>/dev/null") - if pvs.nil? then - unallocated_pvs.push("#{ device }#{ x }") +if Facter::Util::Resolution.which("parted") and Facter::Util::Resolution.which('pvs') then + devices.each do |device| + device = "/dev/#{ device }" + # Filter only partitions flagged as LVM + lvm_partitions = Facter::Util::Resolution.exec( + "parted -s -m #{ device } print 2>/dev/null").scan(/^(\d+):.+:lvm;$/).flatten + lvm_partitions.each do |x| + # Filter only partitions which haven't been created yet + pvs = Facter::Util::Resolution.exec( + "pvs --noheadings #{ device }#{ x } 2>/dev/null") + if pvs.nil? then + unallocated_pvs.push("#{ device }#{ x }") + end end end end -Facter.add("unallocated_pvs") { setcode { unallocated_pvs } } +Facter.add("unallocated_pvs") { setcode { unallocated_pvs.sort.join(',') } } diff --git a/deployment_scripts/puppet/modules/disk_management/manifests/init.pp b/deployment_scripts/puppet/modules/disk_management/manifests/init.pp index feccae6..6bdb256 100644 --- a/deployment_scripts/puppet/modules/disk_management/manifests/init.pp +++ b/deployment_scripts/puppet/modules/disk_management/manifests/init.pp @@ -1,53 +1,19 @@ -# == Class: disk_management -# -# The disk_management class will create a logical volume above the disks -# given as parameter and mount the direcory on this volume. -# -# === Parameters -# -# [*disks*] -# The disks to use to create the physical volumes. -# -# [*directory*] -# The name of the directory that will be mount on created logical volumes. -# -# === Examples -# -# class { 'disk_management': -# disks => ['/dev/sdb', '/dev/sdc'], -# directory => "/data", -# } -# -# === Authors -# -# Guillaume Thouvenin regsubst($disks, '/dev/([a-z]+)', '/dev/\14', 'G'), - Ubuntu => $disks + package { 'parted': + ensure => installed, } - disk_management::partition { $disks: + file { $script_location: + ensure => 'file', + source => $puppet_source, + owner => 'root', + group => 'root', + mode => '0700', + require => Package['parted'], } - - disk_management::lvm_fs { $directory: - disks => $usedisks, - lv_name => $lv_name, - vg_name => $vg_name, - require => Disk_management::Partition[$disks], - } - } diff --git a/deployment_scripts/puppet/modules/disk_management/manifests/params.pp b/deployment_scripts/puppet/modules/disk_management/manifests/params.pp new file mode 100644 index 0000000..2f4ab39 --- /dev/null +++ b/deployment_scripts/puppet/modules/disk_management/manifests/params.pp @@ -0,0 +1,5 @@ +class disk_management::params { + $script = "add_partition.sh" + $puppet_source = "puppet:///modules/disk_management/${script}" + $script_location = "/usr/local/bin/${script}" +} diff --git a/deployment_scripts/puppet/modules/disk_management/manifests/partition.pp b/deployment_scripts/puppet/modules/disk_management/manifests/partition.pp index 41485d4..aba5ea0 100644 --- a/deployment_scripts/puppet/modules/disk_management/manifests/partition.pp +++ b/deployment_scripts/puppet/modules/disk_management/manifests/partition.pp @@ -1,32 +1,12 @@ define disk_management::partition { - $disk = $title - $script = "/usr/local/bin/add_partition_on_raid.sh" - $cmd = "${script} ${disk}" + include disk_management::params - case $::osfamily { - 'RedHat': { - # CentOS deploys /boot into a RAID on all available disks. So in - # this case we need to create a new partition instead of using the whole - # disks as we do for Debian family. + $disk = $title + $script = $disk_management::params::script_location + $cmd = "${script} ${disk}" - package { 'parted': - ensure => installed, - } - - file { $script: - ensure => 'file', - source => 'puppet:///modules/disk_management/add_partition_on_raid.sh', - owner => 'root', - group => 'root', - mode => '0700', - require => Package['parted'], - } - - exec { 'run_script': - command => $cmd, - require => File[$script], - } - } + exec { $title: + command => $cmd, } } diff --git a/environment_config.yaml b/environment_config.yaml index e3b593b..52497b6 100644 --- a/environment_config.yaml +++ b/environment_config.yaml @@ -8,14 +8,11 @@ attributes: type: "text" dedicated_disks: - value: 'sdb' + value: '' label: 'Dedicated disks' - description: 'Comma-separated list of disk devices used to store Elasticsearch data (for instance "sdb,sdc")' + description: 'Comma-separated list of disk devices used to store Elasticsearch data (for instance "sda,sdb"). Keep it empty means using "/"' weight: 20 type: "text" - regex: - source: '\S' - error: "Invalid disk name" # Parameter hidden in the UI on purpose data_dir: diff --git a/metadata.yaml b/metadata.yaml index 64b6008..8c6eefd 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -3,11 +3,18 @@ name: elasticsearch_kibana # Human-readable name for your plugin title: The Elasticsearch-Kibana Server Plugin # Plugin version -version: 6.1.0 +version: '6.1.0' # Description description: Deploy Elasticsearch server and the Kibana web interface. # Required fuel version fuel_version: ['6.1'] +# Licences +licenses: ['Apache License Version 2.0'] +# Specify author or company name +authors: ['Mirantis Inc.'] +# A link to the plugin homepage +homepage: 'https://github.com/stackforge/fuel-plugin-elasticsearch-kibana' +groups: [] # The plugin is compatible with releases in the list releases: @@ -23,4 +30,4 @@ releases: repository_path: repositories/centos # Version of plugin package -package_version: '1.0.0' +package_version: '2.0.0' diff --git a/tasks.yaml b/tasks.yaml index 304c8b4..fb94b74 100644 --- a/tasks.yaml +++ b/tasks.yaml @@ -14,6 +14,20 @@ puppet_modules: puppet/modules timeout: 600 +- role: ['base-os'] + stage: post_deployment + type: reboot + parameters: + timeout: 600 + +- role: ['base-os'] + stage: post_deployment + type: puppet + parameters: + puppet_manifest: puppet/manifests/setup_esdir.pp + puppet_modules: puppet/modules + timeout: 600 + - role: ['base-os'] stage: post_deployment type: puppet