implement bp add separate role for cinder-vmware
Add separate role for cinder with vmdk backend for vmware-related clouds. Change-Id: Id09b09115b16b45e599ff417c83c05d213acc5ce Implements: blueprint cinder-vmdk-role
This commit is contained in:
parent
acd7dfb5f9
commit
ecd964da30
|
@ -52,6 +52,15 @@
|
|||
strategy:
|
||||
type: parallel
|
||||
|
||||
- id: cinder-vmware
|
||||
type: group
|
||||
role: [cinder-vmware]
|
||||
requires: [controller, cinder]
|
||||
required_for: [deploy_end]
|
||||
parameters:
|
||||
strategy:
|
||||
type: parallel
|
||||
|
||||
- id: compute
|
||||
type: group
|
||||
role: [compute]
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
notice('MODULAR: cinder-vmware.pp')
|
||||
|
||||
$vcenter_hash = hiera('vcenter', {})
|
||||
# TODO Now $vcenter_hash['computes'] has only one value. This condition may be
|
||||
# changed in 7.0 release. After that generalization of processing may be
|
||||
# required.
|
||||
$cmp = $vcenter_hash['computes'][0]
|
||||
$vmware_host_ip = $cmp['vc_host']
|
||||
$vmware_host_username = $cmp['vc_user']
|
||||
$vmware_host_password = $cmp['vc_password']
|
||||
$vmware_availability_zone = $cmp['availability_zone_name']
|
||||
$vmware_clusters = $cmp['vc_cluster']
|
||||
$nodes_hash = hiera('nodes', {})
|
||||
$roles = node_roles($nodes_hash, hiera('uid'))
|
||||
|
||||
|
||||
if (member($roles, 'cinder-vmware')) {
|
||||
class {'vmware::cinder':
|
||||
vmware_host_ip => $vmware_host_ip,
|
||||
vmware_host_username => $vmware_host_username,
|
||||
vmware_host_password => $vmware_host_password,
|
||||
vmware_cluster => $vmware_clusters,
|
||||
storage_availability_zone => $vmware_availabiilty_zone,
|
||||
default_availability_zone => $vmware_availabiilty_zone,
|
||||
debug => hiera('debug', true)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
require 'test/unit'
|
||||
|
||||
def process_tree
|
||||
return $process_tree if $process_tree
|
||||
$process_tree = {}
|
||||
ps = `ps haxo pid,ppid,cmd`
|
||||
ps.split("\n").each do |p|
|
||||
f = p.split
|
||||
pid = f.shift.to_i
|
||||
ppid = f.shift.to_i
|
||||
cmd = f.join ' '
|
||||
|
||||
# create entry for this pid if not present
|
||||
$process_tree[pid] = {
|
||||
:children => []
|
||||
} unless $process_tree.key? pid
|
||||
|
||||
# fill this entry
|
||||
$process_tree[pid][:ppid] = ppid
|
||||
$process_tree[pid][:pid] = pid
|
||||
$process_tree[pid][:cmd] = cmd
|
||||
|
||||
unless ppid == 0
|
||||
# create entry for parent process if not present
|
||||
$process_tree[ppid] = {
|
||||
:children => [],
|
||||
:cmd => '',
|
||||
} unless $process_tree.key? ppid
|
||||
|
||||
# fill parent's children
|
||||
$process_tree[ppid][:children] << pid
|
||||
end
|
||||
end
|
||||
$process_tree
|
||||
end
|
||||
|
||||
class CinderVmwarePostTest < Test::Unit::TestCase
|
||||
|
||||
def test_process
|
||||
assert process_tree.find { |pid, proc| proc[:cmd].include? "/etc/cinder/cinder.d/vmware"}, "Process cinder-volume --config /etc/cinder/cinder.d/vmware-N.conf is not running!"
|
||||
end
|
||||
|
||||
end
|
|
@ -0,0 +1,23 @@
|
|||
require 'hiera'
|
||||
require 'test/unit'
|
||||
|
||||
def hiera
|
||||
return $hiera if $hiera
|
||||
$hiera = Hiera.new(:config => '/etc/puppet/hiera.yaml')
|
||||
end
|
||||
|
||||
def roles
|
||||
return $roles if $roles
|
||||
$roles = hiera.lookup 'roles', nil, {}
|
||||
end
|
||||
|
||||
|
||||
class CinderVmwarePreTest < Test::Unit::TestCase
|
||||
|
||||
def test_roles
|
||||
assert roles, 'Could not get the roles data!'
|
||||
assert roles.is_a?(Array), 'Incorrect roles data!'
|
||||
assert roles.find_index("cinder-vmware"), 'Wrong role for this node!'
|
||||
end
|
||||
|
||||
end
|
|
@ -8,3 +8,30 @@
|
|||
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/vmware/vcenter.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
|
||||
|
||||
- id: vmware-compute
|
||||
type: puppet
|
||||
groups: [compute]
|
||||
required_for: [deploy_end]
|
||||
requires: [top-role-compute]
|
||||
condition: "settings:common.libvirt_type.value == 'vcenter' or settings:common.use_vcenter.value == true"
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/vmware/compute.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
|
||||
- id: top-role-cinder-vmware
|
||||
type: puppet
|
||||
groups: [cinder-vmware]
|
||||
required_for: [deploy_end]
|
||||
requires: [top-role-cinder]
|
||||
condition: "settings:common.libvirt_type.value == 'vcenter' or settings:common.use_vcenter.value == true"
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/vmware/cinder-vmware.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
test_pre:
|
||||
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/vmware/cinder-vmware_pre.rb
|
||||
test_post:
|
||||
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/vmware/cinder-vmware_post.rb
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
# Copyright 2014 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
class vmware::cinder(
|
||||
$vmware_host_ip = '1.2.3.4',
|
||||
$vmware_host_username = 'administrator@vsphere.local',
|
||||
$vmware_host_password = '',
|
||||
$vmware_volume_folder = 'cinder-volumes',
|
||||
$vmware_wsdl_location = '',
|
||||
$vmware_api_retry_count = 10,
|
||||
$vmware_host_version = '',
|
||||
$vmware_image_transfer_timeout_secs = 7200,
|
||||
$vmware_max_objects_retrieval = 100,
|
||||
$vmware_task_poll_interval = 5,
|
||||
$vmware_tmp_dir = '/tmp',
|
||||
$storage_availability_zone = 'vcenter',
|
||||
$default_availability_zone = 'vcenter',
|
||||
$vmware_cluster = '',
|
||||
$init_package = 'cinder-vmware-init',
|
||||
$debug = false,
|
||||
)
|
||||
{
|
||||
$vsphere_clusters = vmware_index($vmware_cluster)
|
||||
create_resources(vmware::cinder::vmdk, $vsphere_clusters)
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
# This type creates cinder-volume service for provided vSphere cluster (cluster
|
||||
# that is formed of ESXi hosts and is managed by vCenter server).
|
||||
|
||||
define vmware::cinder::vmdk(
|
||||
$index = '0',
|
||||
$cinder_conf_dir = '/etc/cinder/cinder.d',
|
||||
$cinder_log_dir = '/var/log/cinder',
|
||||
)
|
||||
{
|
||||
|
||||
include cinder::params
|
||||
|
||||
$cinder_volume_conf = "${cinder_conf_dir}/vmware-${index}.conf"
|
||||
$cinder_volume_log = "${cinder_log_dir}/vmware-${index}.log"
|
||||
$cinder_conf = $::cinder::params::cinder_conf
|
||||
$cinder_volume_vmware = "${::cinder::params::volume_service}-vmware"
|
||||
|
||||
if ! defined(File[$cinder_conf_dir]) {
|
||||
file { $cinder_conf_dir:
|
||||
ensure => directory,
|
||||
owner => 'cinder',
|
||||
group => 'cinder',
|
||||
mode => '0750'
|
||||
}
|
||||
}
|
||||
|
||||
if ! defined (File[$cinder_volume_conf]) {
|
||||
file { $cinder_volume_conf:
|
||||
ensure => present,
|
||||
content => template('vmware/cinder-volume.conf.erb'),
|
||||
mode => '0600',
|
||||
owner => 'cinder',
|
||||
group => 'cinder',
|
||||
}
|
||||
}
|
||||
|
||||
File[$cinder_conf_dir]->File[$cinder_volume_conf]
|
||||
|
||||
if ! defined(Service["cinder_volume_vmware_${index}"]) {
|
||||
service { "cinder_volume_vmware_${index}":
|
||||
ensure => running,
|
||||
name => "${cinder_volume_vmware}-${index}",
|
||||
enable => true
|
||||
}
|
||||
}
|
||||
|
||||
case $::osfamily {
|
||||
'RedHat': {
|
||||
$src_init = $cinder_volume_vmware
|
||||
$dst_init = '/etc/init.d'
|
||||
$cinder_volume_vmware_init = "${dst_init}/${cinder_volume_vmware}"
|
||||
$init_link = "${cinder_volume_vmware_init}-${index}"
|
||||
if ! defined(File[$init_link]) {
|
||||
file { $init_link:
|
||||
ensure => link,
|
||||
target => $cinder_volume_vmware_init
|
||||
}
|
||||
}
|
||||
|
||||
$cinder_volume_default = "/etc/sysconfig/${cinder_volume_vmware}-${index}"
|
||||
if ! defined(File[$cinder_volume_default]){
|
||||
file { $cinder_volume_default:
|
||||
ensure => present,
|
||||
content => "OPTIONS='--config-file=${cinder_conf} \
|
||||
--config-file=${cinder_volume_conf}'",
|
||||
}
|
||||
}
|
||||
File[$cinder_volume_default]~>
|
||||
Service["cinder_volume_vmware_${index}"]
|
||||
}
|
||||
'Debian': {
|
||||
$cinder_volume_default = "/etc/default/${cinder_volume_vmware}-${index}"
|
||||
$src_init = "${cinder_volume_vmware}.conf"
|
||||
$dst_init = '/etc/init'
|
||||
if ! defined(File[$cinder_volume_default]) {
|
||||
file { $cinder_volume_default:
|
||||
ensure => present,
|
||||
content => "CINDER_VOLUME_OPTS='--config-file=${cinder_conf} \
|
||||
--config-file=${cinder_volume_conf} --log-file=${cinder_volume_log}'",
|
||||
}
|
||||
}
|
||||
|
||||
$cinder_volume_vmware_init = "${dst_init}/${cinder_volume_vmware}.conf"
|
||||
$init_link = "/etc/init/${cinder_volume_vmware}-${index}.conf"
|
||||
if ! defined(File[$init_link]) {
|
||||
file { $init_link:
|
||||
ensure => link,
|
||||
target => $cinder_volume_vmware_init
|
||||
}
|
||||
}
|
||||
|
||||
$upstart_link = "/etc/init.d/${cinder_volume_vmware}-${index}"
|
||||
if ! defined(File[$upstart_link]) {
|
||||
file { $upstart_link:
|
||||
ensure => link,
|
||||
target => '/etc/init.d/cinder-volume'
|
||||
}
|
||||
}
|
||||
|
||||
$init_reload_cmd = '/sbin/initctl reload-configuration'
|
||||
$init_reload = 'initctl reload-configuration'
|
||||
if ! defined(Exec[$init_reload]) {
|
||||
exec { $init_reload:
|
||||
command => $init_reload_cmd,
|
||||
path => [ '/bin', '/sbin', '/usr/bin', '/usr/sbin' ]
|
||||
}
|
||||
}
|
||||
|
||||
File[$cinder_volume_default]~>
|
||||
Exec[$init_reload]->
|
||||
Service["cinder_volume_vmware_${index}"]
|
||||
}
|
||||
default: {
|
||||
fail { "Unsupported OS family (${::osfamily})": }
|
||||
}
|
||||
}
|
||||
|
||||
$cmd = "cp /usr/share/cinder/${src_init} ${dst_init}"
|
||||
if ! defined(Exec[$src_init]) {
|
||||
exec {$src_init:
|
||||
command => $cmd,
|
||||
path => [ '/bin', '/sbin', '/usr/bin', '/usr/sbin' ]
|
||||
}
|
||||
}
|
||||
|
||||
Exec[$src_init]->
|
||||
File[$init_link]->
|
||||
File[$cinder_volume_default]
|
||||
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
[DEFAULT]
|
||||
host=<%= @index %>
|
||||
storage_availability_zone=<%= scope.lookupvar('vmware::cinder::storage_availability_zone')%>
|
||||
default_availability_zone=<%= scope.lookupvar('vmware::cinder::default_availability_zone')%>
|
||||
volume_driver=cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
|
||||
debug=<%= scope.lookupvar('vmware::cinder::debug')%>
|
||||
vmware_host_ip=<%= scope.lookupvar('vmware::cinder::vmware_host_ip')%>
|
||||
vmware_host_password=<%= scope.lookupvar('vmware::cinder::vmware_host_password')%>
|
||||
vmware_host_username=<%= scope.lookupvar('vmware::cinder::vmware_host_username')%>
|
||||
vmware_volume_folder=<%= scope.lookupvar('vmware::cinder::vmware_volume_folder')%>
|
||||
<% if !scope.lookupvar('vmware::cinder::vmware_wsdl_location').empty? -%>
|
||||
vmware_wsdl_location=<%= scope.lookupvar('vmware::cinder::vmware_wsdl_location')%>
|
||||
<% end -%>
|
||||
vmware_api_retry_count=<%= scope.lookupvar('vmware::cinder::vmware_api_retry_count')%>
|
||||
<% if !scope.lookupvar('vmware::cinder::vmware_host_version').empty? -%>
|
||||
vmware_host_version=<%= scope.lookupvar('vmware::cinder::vmware_host_version')%>
|
||||
<% end -%>
|
||||
vmware_image_transfer_timeout_secs=<%= scope.lookupvar('vmware::cinder::vmware_image_transfer_timeout_secs')%>
|
||||
vmware_max_objects_retrieval=<%= scope.lookupvar('vmware::cinder::vmware_max_objects_retrieval')%>
|
||||
vmware_task_poll_interval=<%= scope.lookupvar('vmware::cinder::vmware_task_poll_interval')%>
|
||||
vmware_tmp_dir=<%= scope.lookupvar('vmware::cinder::vmware_tmp_dir')%>
|
Loading…
Reference in New Issue