Merge pull request #593 from xarses/ceph-fuel-3

Sync CEPH working branch part 3
This commit is contained in:
Vladimir Kuklin 2013-10-07 11:40:48 -07:00
commit 2620d514be
38 changed files with 839 additions and 752 deletions

View File

@ -2,16 +2,19 @@ Facter.add("osd_devices_list") do
setcode do
# Use any filesystem labeled "cephosd" as an osd
devs = %x{blkid -o list | awk '{if ($3 == "cephosd") print $1}'}.split("\n")
journal = %x{blkid -o list | awk '{if ($3 == "cephjournal") print $4}'}.strip
journal = %x{blkid -o list | awk '{if ($3 == "cephjournal") print $1}'}.split("\n")
output = []
devs.collect! do |d|
if journal == ''
d
else
part = d.split('/')[-1]
"#{d}:#{journal}/#{part}-journal"
end
if journal.length > 0
ratio = (devs.length * 1.0 / journal.length).ceil
ratio = ratio > 1 ? ratio : 1
devs.each_slice(ratio) { |s|
j = journal.shift
output << s.map{|d| "#{d}:#{j}"}
}
else
output = devs
end
devs.join(" ")
output.join(" ")
end
end

View File

@ -1,27 +0,0 @@
Puppet::Type.type(:cinder_config).provide(
:ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do
def section
resource[:name].split('/', 2).first
end
def setting
resource[:name].split('/', 2).last
end
def separator
'='
end
def self.file_path
'/etc/cinder/cinder.conf'
end
# added for backwards compatibility with older versions of inifile
def file_path
self.class.file_path
end
end

View File

@ -1,27 +0,0 @@
Puppet::Type.type(:glance_api_config).provide(
:ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do
def section
resource[:name].split('/', 2).first
end
def setting
resource[:name].split('/', 2).last
end
def separator
'='
end
def self.file_path
'/etc/glance/glance-api.conf'
end
# this needs to be removed. This has been replaced with the class method
def file_path
self.class.file_path
end
end

View File

@ -1,42 +0,0 @@
Puppet::Type.newtype(:cinder_config) do
ensurable
newparam(:name, :namevar => true) do
desc 'Section/setting name to manage from /etc/cinder/cinder.conf'
newvalues(/\S+\/\S+/)
end
newproperty(:value) do
desc 'The value of the setting to be defined.'
munge do |value|
value = value.to_s.strip
value.capitalize! if value =~ /^(true|false)$/i
value
end
def is_to_s( currentvalue )
if resource.secret?
return '[old secret redacted]'
else
return currentvalue
end
end
def should_to_s( newvalue )
if resource.secret?
return '[new secret redacted]'
else
return newvalue
end
end
end
newparam(:secret, :boolean => true) do
desc 'Whether to hide the value from Puppet logs. Defaults to `false`.'
newvalues(:true, :false)
defaultto false
end
end

View File

@ -1,19 +0,0 @@
Puppet::Type.newtype(:glance_api_config) do
ensurable
newparam(:name, :namevar => true) do
desc 'Section/setting name to manage from glance-api.conf'
newvalues(/\S+\/\S+/)
end
newproperty(:value) do
desc 'The value of the setting to be defined.'
munge do |value|
value = value.to_s.strip
value.capitalize! if value =~ /^(true|false)$/i
value
end
end
end

View File

@ -1,3 +1,4 @@
# configure apt sources for Ceph
class ceph::apt (
$release = 'cuttlefish'
) {
@ -19,9 +20,9 @@ class ceph::apt (
location => "http://ceph.com/debian-${release}/",
}
apt::source { 'radosgw-apache2':
location => "http://gitbuilder.ceph.com/apache2-deb-precise-x86_64-basic/ref/master/",
location => 'http://gitbuilder.ceph.com/apache2-deb-precise-x86_64-basic/ref/master/',
}
apt::source { 'radosgw-fastcgi':
location => "http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-precise-x86_64-basic/ref/master/",
location => 'http://gitbuilder.ceph.com/libapache-mod-fastcgi-deb-precise-x86_64-basic/ref/master/',
}
}

View File

@ -1,56 +0,0 @@
#ceph::cinder will setup cinder parts if detected on the system
class ceph::cinder (
$volume_driver = $::ceph::volume_driver,
$rbd_pool = $::ceph::rbd_pool,
$glance_api_version = $::ceph::glance_api_version,
$rbd_user = $::ceph::rbd_user,
$rbd_secret_uuid = $::ceph::rbd_secret_uuid,
) {
if str2bool($::cinder_conf) {
exec {'Copy configs':
command => "scp -r ${::ceph::primary_mon}:/etc/ceph/* /etc/ceph/",
require => Package['ceph'],
returns => 0,
}
Cinder_config<||> ~> Service["${::ceph::params::service_cinder_volume}" ]
File_line<||> ~> Service["${::ceph::params::service_cinder_volume}"]
cinder_config {
'DEFAULT/volume_driver': value => $volume_driver;
'DEFAULT/rbd_pool': value => $rbd_pool;
'DEFAULT/glance_api_version': value => $glance_api_version;
'DEFAULT/rbd_user': value => $rbd_user;
'DEFAULT/rbd_secret_uuid': value => $rbd_secret_uuid;
}
file { "${::ceph::params::service_cinder_volume_opts}":
ensure => 'present',
} -> file_line { 'cinder-volume.conf':
path => "${::ceph::params::service_cinder_volume_opts}",
line => 'export CEPH_ARGS="--id volumes"',
}
if ! defined(Class['cinder::volume']) {
service { "${::ceph::params::service_cinder_volume}":
ensure => 'running',
enable => true,
hasstatus => true,
hasrestart => true,
}
}
exec { 'Create keys for pool volumes':
command => 'ceph auth get-or-create client.volumes > /etc/ceph/ceph.client.volumes.keyring',
before => File['/etc/ceph/ceph.client.volumes.keyring'],
creates => '/etc/ceph/ceph.client.volumes.keyring',
require => [Package['ceph'], Exec['Copy configs']],
notify => Service["${::ceph::params::service_cinder_volume}"],
returns => 0,
}
file { '/etc/ceph/ceph.client.volumes.keyring':
owner => cinder,
group => cinder,
require => Exec['Create keys for pool volumes'],
mode => '0600',
}
}
}

View File

@ -0,0 +1,64 @@
# create new conf on primary Ceph MON, pull conf on all other nodes
class ceph::conf {
if $::hostname == $::ceph::primary_mon {
exec {'ceph-deploy new':
command => "ceph-deploy new ${::hostname}:${::internal_address}",
cwd => '/etc/ceph',
logoutput => true,
creates => ['/etc/ceph/ceph.conf'],
}
# link is necessary to work around http://tracker.ceph.com/issues/6281
file {'/root/ceph.conf':
ensure => link,
target => '/etc/ceph/ceph.conf',
}
file {'/root/ceph.mon.keyring':
ensure => link,
target => '/etc/ceph/ceph.mon.keyring',
}
ceph_conf {
'global/auth_supported': value => $::ceph::auth_supported;
'global/osd_journal_size': value => $::ceph::osd_journal_size;
'global/osd_mkfs_type': value => $::ceph::osd_mkfs_type;
'global/osd_pool_default_size': value => $::ceph::osd_pool_default_size;
'global/osd_pool_default_min_size': value => $::ceph::osd_pool_default_min_size;
'global/osd_pool_default_pg_num': value => $::ceph::osd_pool_default_pg_num;
'global/osd_pool_default_pgp_num': value => $::ceph::osd_pool_default_pgp_num;
'global/cluster_network': value => $::ceph::cluster_network;
'global/public_network': value => $::ceph::public_network;
}
Exec['ceph-deploy new'] ->
File['/root/ceph.conf'] -> File['/root/ceph.mon.keyring'] ->
Ceph_conf <||>
} else {
exec {'ceph-deploy config pull':
command => "ceph-deploy --overwrite-conf config pull ${::ceph::primary_mon}",
creates => '/root/ceph.conf',
}
exec {'ceph-deploy gatherkeys remote':
command => "ceph-deploy gatherkeys ${::ceph::primary_mon}",
creates => ['/root/ceph.bootstrap-mds.keyring',
'/root/ceph.bootstrap-osd.keyring',
'/root/ceph.client.admin.keyring',
'/root/ceph.mon.keyring',
],
}
exec {'ceph-deploy init config':
command => "ceph-deploy --overwrite-conf config push ${::hostname}",
creates => '/etc/ceph/ceph.conf',
}
Exec['ceph-deploy config pull'] ->
Exec['ceph-deploy gatherkeys remote'] ->
Exec['ceph-deploy init config']
}
}

View File

@ -1,11 +0,0 @@
#Ceph::deploy will install mds server if invoked
class ceph::deploy (
) {
if $mds_server {
exec { 'ceph-deploy-s4':
command => "ceph-deploy mds create ${mds_server}",
require => Class['c_osd'],
logoutput => true,
}
}
}

View File

@ -1,46 +0,0 @@
#ceph:glance will configure glance parts if present on the system
class ceph::glance (
$default_store = $::ceph::default_store,
$rbd_store_user = $::ceph::rbd_store_user,
$rbd_store_pool = $::ceph::rbd_store_pool,
$show_image_direct_url = $::ceph::show_image_direct_url,
) {
if str2bool($::glance_api_conf) {
exec {'Copy config':
command => "scp -r ${::ceph::primary_mon}:/etc/ceph/* /etc/ceph/",
require => Package['ceph'],
returns => 0,
}
if ! defined('glance::backend::ceph') {
package {['python-ceph']:
ensure => latest,
}
glance_api_config {
'DEFAULT/default_store': value => $default_store;
'DEFAULT/rbd_store_user': value => $rbd_store_user;
'DEFAULT/rbd_store_pool': value => $rbd_store_pool;
'DEFAULT/show_image_direct_url': value => $show_image_direct_url;
}~> Service["${::ceph::params::service_glance_api}"]
service { "${::ceph::params::service_glance_api}":
ensure => 'running',
enable => true,
hasstatus => true,
hasrestart => true,
}
}
exec { 'Create keys for pool images':
command => 'ceph auth get-or-create client.images > /etc/ceph/ceph.client.images.keyring',
before => File['/etc/ceph/ceph.client.images.keyring'],
creates => '/etc/ceph/ceph.client.images.keyring',
require => [Package['ceph'], Exec['Copy config']],
notify => Service["${::ceph::params::service_glance_api}"],
returns => 0,
}
file { '/etc/ceph/ceph.client.images.keyring':
owner => glance,
group => glance,
require => Exec['Create keys for pool images'],
mode => '0600',
}
}
}

View File

@ -1,154 +1,115 @@
#ceph will install ceph parts
# ceph configuration and resource relations
class ceph (
#General settings
# General settings
$cluster_node_address = $::ipaddress, #This should be the cluster service address
$primary_mon = $::hostname, #This should be the first controller
$ceph_pools = [ 'volumes', 'images' ],
$osd_devices = split($::osd_devices_list, " "),
#ceph.conf Global settings
$cinder_pool = 'volumes',
$glance_pool = 'images',
$osd_devices = split($::osd_devices_list, ' '),
$use_ssl = false,
$use_rgw = false,
# ceph.conf Global settings
$auth_supported = 'cephx',
$osd_journal_size = '2048',
$osd_mkfs_type = 'xfs',
$osd_pool_default_size = '2',
$osd_pool_default_min_size = '1',
#TODO: calculate PG numbers
# TODO: calculate PG numbers
$osd_pool_default_pg_num = '100',
$osd_pool_default_pgp_num = '100',
$cluster_network = "${::storage_network_range}",
$public_network = "${::management_network_range}",
#RadosGW settings
$host = $::hostname,
$keyring_path = '/etc/ceph/keyring.radosgw.gateway',
$cluster_network = $::storage_network_range,
$public_network = $::management_network_range,
# RadosGW settings
$rgw_host = $::hostname,
$rgw_port = '6780',
$rgw_keyring_path = '/etc/ceph/keyring.radosgw.gateway',
$rgw_socket_path = '/tmp/radosgw.sock',
$log_file = '/var/log/ceph/radosgw.log',
$user = 'www-data',
$rgw_log_file = '/var/log/ceph/radosgw.log',
$rgw_user = $::ceph::params::user_httpd,
$rgw_keystone_url = "${cluster_node_address}:5000",
$rgw_keystone_admin_token = 'nova',
$rgw_keystone_token_cache_size = '10',
$rgw_keystone_accepted_roles = undef, #TODO: find a default value for this
$rgw_keystone_accepted_roles = '_member_, Member, admin, swiftoperator',
$rgw_keystone_revocation_interval = '60',
$rgw_data = '/var/lib/ceph/rados',
$rgw_dns_name = $::hostname,
$rgw_data = '/var/lib/ceph/radosgw',
$rgw_dns_name = "*.${::domain}",
$rgw_print_continue = 'false',
$nss_db_path = '/etc/ceph/nss',
#Cinder settings
$rgw_nss_db_path = '/etc/ceph/nss',
# Keystone settings
$rgw_pub_ip = $cluster_node_address,
$rgw_adm_ip = $cluster_node_address,
$rgw_int_ip = $cluster_node_address,
# Cinder settings
$volume_driver = 'cinder.volume.drivers.rbd.RBDDriver',
$rbd_pool = 'volumes',
$glance_api_version = '2',
$rbd_user = 'volumes',
#TODO: generate rbd_secret_uuid
# TODO: generate rbd_secret_uuid
$rbd_secret_uuid = 'a5d0dd94-57c4-ae55-ffe0-7e3732a24455',
#Glance settings
$default_store = 'rbd',
# Glance settings
$glance_backend = 'ceph',
$rbd_store_user = 'images',
$rbd_store_pool = 'images',
$show_image_direct_url = 'True',
#Keystone settings
$rgw_pub_ip = "${cluster_node_address}",
$rgw_adm_ip = "${cluster_node_address}",
$rgw_int_ip = "${cluster_node_address}",
) {
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
ceph_conf {
'global/auth supported': value => $auth_supported;
'global/osd journal size': value => $osd_journal_size;
'global/osd mkfs type': value => $osd_mkfs_type;
'global/osd pool default size': value => $osd_pool_default_size;
'global/osd pool default min size': value => $osd_pool_default_min_size;
'global/osd pool default pg num': value => $osd_pool_default_pg_num;
'global/osd pool default pgp num': value => $osd_pool_default_pgp_num;
'global/cluster network': value => $cluster_network;
'global/public network': value => $public_network;
'client.radosgw.gateway/host': value => $host;
'client.radosgw.gateway/keyring': value => $keyring_path;
'client.radosgw.gateway/rgw socket path': value => $rgw_socket_path;
'client.radosgw.gateway/log file': value => $log_file;
'client.radosgw.gateway/user': value => $user;
'client.radosgw.gateway/rgw keystone url': value => $rgw_keystone_url;
'client.radosgw.gateway/rgw keystone admin token': value => $rgw_keystone_admin_token;
'client.radosgw.gateway/rgw keystone accepted roles': value => $rgw_keystone_accepted_roles;
'client.radosgw.gateway/rgw keystone token cache size': value => $rgw_keystone_token_cache_size;
'client.radosgw.gateway/rgw keystone revocation interval': value => $rgw_keystone_revocation_interval;
'client.radosgw.gateway/rgw data': value => $rgw_data;
'client.radosgw.gateway/rgw dns name': value => $rgw_dns_name;
'client.radosgw.gateway/rgw print continue': value => $rgw_print_continue;
'client.radosgw.gateway/nss db path': value => $nss_db_path;
}
Ceph_conf {require => Exec['ceph-deploy init config']}
#RE-enable this if not using fuelweb iso with Ceph packages
#include 'ceph::yum'
include 'ceph::params'
include 'ceph::ssh'
#TODO: this should be pulled back into existing modules for setting up ssh-key
#TODO: OR need to at least generate the key
#Prepare nodes for futher actions
#TODO: add ceph service
if $::hostname == $::ceph::primary_mon {
exec { 'ceph-deploy init config':
command => "ceph-deploy new ${::hostname}:${::internal_address}",
cwd => '/etc/ceph',
require => Package['ceph-deploy', 'ceph'],
logoutput => true,
creates => ['/etc/ceph/ceph.conf'],
} -> file {'/root/ceph.conf':
#link is necessary to work around http://tracker.ceph.com/issues/6281
ensure => link,
target => '/etc/ceph/ceph.conf',
} -> file {'/root/ceph.mon.keyring':
ensure => link,
target => '/etc/ceph/ceph.mon.keyring',
} -> Ceph_conf <||>
} else {
exec {'ceph-deploy config pull':
command => "ceph-deploy --overwrite-conf config pull ${::ceph::primary_mon}",
require => Package['ceph-deploy', 'ceph'],
creates => '/root/ceph.conf',
}
exec {'ceph-deploy gatherkeys remote':
command => "ceph-deploy gatherkeys ${::ceph::primary_mon}",
require => [Exec['ceph-deploy config pull']],
creates => ['/root/ceph.bootstrap-mds.keyring',
'/root/ceph.bootstrap-osd.keyring',
'/root/ceph.admin.keyring',
'/root/ceph.mon.keyring'
],
}
exec {'ceph-deploy init config':
command => "ceph-deploy --overwrite-conf config push ${::hostname}",
require => [Exec['ceph-deploy gatherkeys remote']],
creates => '/etc/ceph/ceph.conf',
# Re-enable ceph::yum if not using a Fuel iso with Ceph packages
#include ceph::yum
include ceph::ssh
include ceph::params
include ceph::conf
Class[['ceph::ssh', 'ceph::params']] -> Class['ceph::conf']
if $::fuel_settings['role'] =~ /controller|ceph/ {
service {'ceph':
ensure => 'running',
enable => true,
}
}
case $::fuel_settings['role'] {
'primary-controller', 'controller', 'ceph-mon': {
class {['ceph::glance', 'ceph::cinder', 'ceph::nova_compute']: }
class {'ceph::mon':
} -> Class[['ceph::glance',
'ceph::cinder',
'ceph::nova_compute',
#'ceph::keystone', #ceph::keystone is currently disabled
]]
#include ceph::keystone #Keystone is currently disabled
}
#TODO: remove cinder from this list.
#This will still NOOP on cinder if $::osd_device_list is empty
'ceph-osd', 'cinder': {
class {'ceph::osd': }
}
'ceph-mds': {
class {'ceph::deploy': }
}
'compute': {
class {'ceph::nova_compute': }
}
default: {
#TODO: this is probably too aggressive
include ceph::cinder, ceph::nova_compute
}
}
include ceph::mon
Class['ceph::conf'] ->
Class['ceph::mon'] ->
Service['ceph']
if ($::ceph::use_rgw) {
include ceph::libnss, ceph::keystone, ceph::radosgw
Class['ceph::mon'] ->
Class['ceph::libnss'] ->
Class[['ceph::keystone', 'ceph::radosgw']] ~>
Service['ceph']
}
}
'ceph-osd': {
if ! empty($osd_devices) {
include ceph::osd
Class['ceph::conf'] -> Class['ceph::osd'] -> Service['ceph']
}
}
'compute': {
include ceph::nova_compute
Class['ceph::conf'] ->
Class['ceph::nova_compute'] ~>
Service[$::ceph::params::service_nova_compute]
}
'ceph-mds': { include ceph::mds }
default: {}
}
}

View File

@ -1,44 +1,44 @@
#ceph::keystone will configure keystone with ceph parts
class ceph::keystone (
$pub_ip = $::ceph::rgw_pub_ip,
$adm_ip = $::ceph::rgw_adm_ip,
$int_ip = $::ceph::rgw_int_ip,
$directory = '/etc/ceph/nss',
$pub_ip = $::ceph::rgw_pub_ip,
$adm_ip = $::ceph::rgw_adm_ip,
$int_ip = $::ceph::rgw_int_ip,
$rgw_port = $::ceph::rgw_port,
$use_ssl = $::ceph::use_ssl,
$directory = $::ceph::rgw_nss_db_path,
) {
if str2bool($::keystone_conf) {
package { 'libnss3-tools' :
ensure => 'latest'
}
file { "${directory}":
ensure => "directory",
require => Package['ceph'],
}
exec {"creating OpenSSL certificates":
command => "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey \
| certutil -d ${directory} -A -n ca -t 'TCu,Cu,Tuw' && openssl x509 \
-in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | certutil -A -d \
${directory} -n signing_cert -t 'P,P,P'",
require => [File["${directory}"], Package['libnss3-tools']]
if ($use_ssl) {
exec {'creating OpenSSL certificates':
command => "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \
certutil -d ${directory} -A -n ca -t 'TCu,Cu,Tuw' && openssl x509 \
-in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \
certutil -A -d ${directory} -n signing_cert -t 'P,P,P'",
require => [File[$directory], Package[$::ceph::params::package_libnss]]
} ->
exec {"copy OpenSSL certificates":
command => "scp -r /etc/ceph/nss/* ${rados_GW}:/etc/ceph/nss/ && ssh ${rados_GW} '/etc/init.d/radosgw restart'",
}
keystone_service { 'swift':
ensure => present,
type => 'object-store',
description => 'Openstack Object-Store Service',
notify => Service['keystone'],
}
keystone_endpoint { "RegionOne/swift":
ensure => present,
public_url => "http://${pub_ip}/swift/v1",
admin_url => "http://${adm_ip}/swift/v1",
internal_url => "http://${int_ip}/swift/v1",
notify => Service['keystone'],
exec {'copy OpenSSL certificates':
command => "scp -r ${directory}/* ${::ceph::primary_mon}:${directory} && \
ssh ${::ceph::primary_mon} '/etc/init.d/radosgw restart'",
}
}
keystone_service {'swift':
ensure => present,
type => 'object-store',
description => 'Openstack Object-Store Service',
}
keystone_endpoint {'swift':
ensure => present,
region => 'RegionOne',
public_url => "http://${pub_ip}:${rgw_port}/swift/v1",
admin_url => "http://${adm_ip}:${rgw_port}/swift/v1",
internal_url => "http://${int_ip}:${rgw_port}/swift/v1",
}
if ! defined(Class['keystone']) {
service { 'keystone':
enable => true,
ensure => 'running',
enable => true,
}
}
}

View File

@ -0,0 +1,13 @@
# set up the OS-specific libnss package for Ceph
class ceph::libnss {
package {$::ceph::params::package_libnss:
ensure => 'latest',
}
file {$::ceph::rgw_nss_db_path:
ensure => 'directory',
mode => '0755',
require => Package['ceph']
}
}

View File

@ -0,0 +1,11 @@
# Ceph::mds will install mds server if invoked
class ceph::mds (
) {
if $::mds_server {
exec { 'ceph-deploy mds create':
command => "ceph-deploy mds create ${::mds_server}",
logoutput => true,
}
}
}

View File

@ -1,6 +1,5 @@
#ceph::mon will install the ceph-mon
# setup Ceph monitors
class ceph::mon {
include c_pools
firewall {'010 ceph-mon allow':
chain => 'INPUT',
@ -9,50 +8,43 @@ class ceph::mon {
action => accept,
}
exec { 'ceph-deploy deploy monitors':
exec {'ceph-deploy mon create':
command => "ceph-deploy mon create ${::hostname}:${::internal_address}",
logoutput => true,
require => [Exec['ceph-deploy init config'],
],
#TODO: need method to update mon_nodes in ceph.conf
unless => 'ceph -s',
# TODO: need method to update mon_nodes in ceph.conf
}
exec { 'ceph-deploy gatherkeys':
command => "ceph-deploy gatherkeys ${::hostname}",
exec {'Wait for Ceph quorum':
# this can be replaced with "ceph mon status mon.$::host" for Dumpling
command => 'ps ax|grep -vq ceph-create-keys',
returns => 0,
tries => 60, #This is necessary to prevent race, mon must establish
tries => 60, # This is necessary to prevent a race: mon must establish
# a quorum before it can generate keys, observed this takes upto 15 seconds
# Keys must exist prior to other commands running
try_sleep => 1,
require => [Firewall['010 ceph-mon allow'],
Exec['ceph-deploy deploy monitors']],
}
File {
require => Exec['ceph-deploy gatherkeys']
exec {'ceph-deploy gatherkeys':
command => "ceph-deploy gatherkeys ${::hostname}",
creates => ['/root/ceph.bootstrap-mds.keyring',
'/root/ceph.bootstrap-osd.keyring',
'/root/ceph.client.admin.keyring',
],
}
file { '/root/ceph.bootstrap-osd.keyring':
}
file { '/root/ceph.bootstrap-mds.keyring':
}
file { '/root/ceph.client.admin.keyring':
}
file { '/root/ceph.client.mon.keyring':
}
#c_pools is used to loop through the list of $::ceph::ceph_pools
class c_pools {
define int {
exec { "Creating pool ${name}":
command => "ceph osd pool create ${name} ${::ceph::osd_pool_default_pg_num} ${::ceph::osd_pool_default_pgp_num}",
require => Exec['ceph-deploy gatherkeys'],
logoutput => true,
}
# creates the named OSD pool
define osd_pool {
exec { "Creating pool ${name}":
command => "ceph osd pool create ${name} ${::ceph::osd_pool_default_pg_num} ${::ceph::osd_pool_default_pgp_num}",
logoutput => true,
}
int { $::ceph::ceph_pools: }
}
exec { 'CLIENT AUTHENTICATION':
#DO NOT SPLIT ceph auth command lines See http://tracker.ceph.com/issues/3279
command => "ceph auth get-or-create client.${::ceph::ceph_pools[0]} mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${::ceph::ceph_pools[0]}, allow rx pool=${::ceph::ceph_pools[1]}' && \
ceph auth get-or-create client.${::ceph::ceph_pools[1]} mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${::ceph::ceph_pools[1]}'",
require => Class['c_pools'],
logoutput => true,
}
}
osd_pool {[$::ceph::cinder_pool, $::ceph::glance_pool]: }
Firewall['010 ceph-mon allow'] ->
Exec['ceph-deploy mon create'] ->
Exec['Wait for Ceph quorum'] ->
Exec['ceph-deploy gatherkeys'] ->
Osd_pool <||>
}

View File

@ -1,37 +1,23 @@
#ceph::nova_compute will configure the nova_compure parts if present
# configure the nova_compute parts if present
class ceph::nova_compute (
$rbd_secret_uuid = $::ceph::rbd_secret_uuid
) {
if $::fuel_settings['role'] == "compute" {
exec {'Copy conf':
command => "scp -r ${::ceph::primary_mon}:/etc/ceph/* /etc/ceph/",
require => Package['ceph'],
returns => [0,1],
}
file { '/tmp/secret.xml':
#TODO: use mktemp
content => template('ceph/secret.erb')
}
exec { 'Set value':
#TODO: clean this command up
command => 'virsh secret-set-value --secret $( \
virsh secret-define --file /tmp/secret.xml | \
egrep -o "[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}") \
--base64 $(ceph auth get-key client.volumes) && \
rm /tmp/secret.xml',
require => [File['/tmp/secret.xml'],
Package ['ceph'],
Exec['Copy conf']],
returns => [0,1],
}
if ! defined('nova::compute') {
service {"${::ceph::params::service_nova_compute}":
ensure => "running",
enable => true,
hasstatus => true,
hasrestart => true,
subscribe => Exec['Set value']
}
}
file {'/root/secret.xml':
content => template('ceph/secret.erb')
}
exec {'Set Ceph RBD secret for Nova':
# TODO: clean this command up
command => 'virsh secret-set-value --secret $( \
virsh secret-define --file /root/secret.xml | \
egrep -o "[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}") \
--base64 $(ceph auth get-key client.volumes) && \
rm /root/secret.xml',
require => File['/root/secret.xml'],
returns => [0,1],
}
File['/root/secret.xml'] ->
Exec['Set Ceph RBD secret for Nova']
}

View File

@ -1,9 +1,7 @@
#Ceph::osd will prepare and online devices in $::ceph::osd_devices
# prepare and bring online the devices listed in $::ceph::osd_devices
class ceph::osd (
$devices = join(prefix($::ceph::osd_devices, "${::hostname}:"), " "),
$devices = join(prefix($::ceph::osd_devices, "${::hostname}:"), ' '),
){
if ! empty($::ceph::osd_devices) {
firewall {'011 ceph-osd allow':
chain => 'INPUT',
dport => '6800-7100',
@ -12,26 +10,29 @@ class ceph::osd (
}
exec { 'ceph-deploy osd prepare':
#ceph-deploy osd prepare is ensuring there is a filesystem on the
# ceph-deploy osd prepare is ensuring there is a filesystem on the
# disk according to the args passed to ceph.conf (above).
#timeout: It has a long timeout because of the format taking forever.
# A resonable amount of time would be around 300 times the length
# of $osd_nodes. Right now its 0 to prevent puppet from aborting it.
#
# It has a long timeout because of the format taking forever. A
# resonable amount of time would be around 300 times the length of
# $osd_nodes. Right now its 0 to prevent puppet from aborting it.
command => "ceph-deploy osd prepare ${devices}",
returns => 0,
timeout => 0, #TODO: make this something reasonable
tries => 2, #This is necessary because of race for mon creating keys
timeout => 0, # TODO: make this something reasonable
tries => 2, # This is necessary because of race for mon creating keys
try_sleep => 1,
require => [Exec['ceph-deploy init config'],
Firewall['011 ceph-osd allow'],
],
logoutput => true,
unless => "grep -q '^${ $::ceph::osd_devices[0] }' /proc/mounts",
}
exec { 'ceph-deploy osd activate':
command => "ceph-deploy osd activate ${devices}",
returns => 0,
require => Exec['ceph-deploy osd prepare'],
logoutput => true,
}
}
}
Firewall['011 ceph-osd allow'] ->
Exec['ceph-deploy osd prepare'] ->
Exec['ceph-deploy osd activate']
}

View File

@ -1,4 +1,4 @@
#These are per-OS parameters and should be considered static
# These are per-OS parameters and should be considered static
class ceph::params {
case $::osfamily {
@ -8,27 +8,54 @@ class ceph::params {
$service_glance_api = 'openstack-glance-api'
$service_glance_registry = 'openstack-glance-registry'
$service_nova_compute = 'openstack-nova-compute'
#RadosGW
$service_httpd = 'httpd'
$package_httpd = 'httpd'
$user_httpd = 'apache'
$package_libnss = 'nss-tools'
$service_radosgw = 'ceph-radosgw'
$package_radosgw = 'ceph-radosgw'
$package_modssl = 'mod_ssl'
$package_fastcgi = 'mod_fastcgi'
$dir_httpd_conf = '/etc/httpd/conf/'
$dir_httpd_sites = '/etc/httpd/conf.d/'
$dir_httpd_ssl = '/etc/httpd/ssl/'
package { ['ceph', 'redhat-lsb-core','ceph-deploy', 'pushy',]:
ensure => latest,
}
file {'/etc/sudoers.d/ceph':
content => "#This is required for ceph-deploy\nDefaults !requiretty\n"
content => "# This is required for ceph-deploy\nDefaults !requiretty\n"
}
}
'Debian': {
$service_cinder_volume = 'cinder-volume'
$service_cinder_volume_opts = '/etc/init/cinder-volume.conf'
$servic_glance_api = 'glance-api'
$service_glance_registry = 'glance-registry'
$service_nova_compute = 'nova-compute'
#RadosGW
$service_httpd = 'apache2'
$package_httpd = 'apache2'
$user_httpd = 'www-data'
$package_libnss = 'libnss3-tools'
$service_radosgw = 'radosgw'
$package_radosgw = 'radosgw'
$package_fastcgi = 'libapache2-mod-fastcgi'
$package_modssl = ''
$dir_httpd_conf = '/etc/httpd/conf/'
$dir_httpd_sites = '/etc/apache2/sites-available/'
$dir_httpd_ssl = '/etc/apache2/ssl/'
package { ['ceph','ceph-deploy', 'pushy', ]:
package { ['ceph','ceph-deploy', 'python-pushy', ]:
ensure => latest,
}
}
default: {
fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem}, module ${module_name} only support osfamily RedHat and Debian")
}
}
}
}

View File

@ -1,70 +1,156 @@
# enable an Apache module
define apache::loadmodule () {
exec { "/usr/sbin/a2enmod $name" :
exec { "/usr/sbin/a2enmod ${name}" :
unless => "/bin/readlink -e /etc/apache2/mods-enabled/${name}.load",
notify => Service[apache2]
notify => Service['httpd']
}
}
define ceph::radosgw (
# deploys Ceph radosgw as an Apache FastCGI application
class ceph::radosgw (
$keyring_path = '/etc/ceph/keyring.radosgw.gateway',
$apache2_ssl = '/etc/apache2/ssl/',
$httpd_ssl = $::ceph::params::dir_httpd_ssl,
$radosgw_auth_key = 'client.radosgw.gateway',
# RadosGW settings
$rgw_host = $::ceph::rgw_host,
$rgw_port = $::ceph::rgw_port,
$rgw_keyring_path = $::ceph::rgw_keyring_path,
$rgw_socket_path = $::ceph::rgw_socket_path,
$rgw_log_file = $::ceph::rgw_log_file,
$rgw_user = $::ceph::rgw_user,
$rgw_keystone_url = $::ceph::rgw_keystone_url,
$rgw_keystone_admin_token = $::ceph::rgw_admin_token,
$rgw_keystone_token_cache_size = $::ceph::rgw_token_cache_size,
$rgw_keystone_accepted_roles = $::ceph::rgw_accepted_roles,
$rgw_keystone_revocation_interval = $::ceph::rgw_revocation_interval,
$rgw_data = $::ceph::rgw_data,
$rgw_dns_name = $::ceph::rgw_dns_name,
$rgw_print_continue = $::ceph::rgw_print_continue,
$rgw_nss_db_path = $::ceph::rgw_nss_db_path,
$use_ssl = $::ceph::use_ssl,
) {
package { ["apache2", "libapache2-mod-fastcgi", 'libnss3-tools', 'radosgw']:
ensure => "latest",
$dir_httpd_root = '/var/www/radosgw'
package { [$::ceph::params::package_radosgw,
$::ceph::params::package_fastcgi,
$::ceph::params::package_modssl
]:
ensure => 'latest',
}
apache::loadmodule{["rewrite", "fastcgi", "ssl"]: }
service { 'radosgw':
ensure => 'running',
name => $::ceph::params::service_radosgw,
enable => true,
}
file {'/etc/apache2/httpd.conf':
ensure => "present",
content => "ServerName ${fqdn}",
notify => Service["apache2"],
require => Package["apache2"],
if !(defined('horizon') or
defined($::ceph::params::package_httpd) or
defined($::ceph::params::service_httpd) ) {
package {$::ceph::params::package_httpd:
ensure => 'latest',
}
service { 'httpd':
ensure => 'running',
name => $::ceph::params::service_httpd,
enable => true,
}
}
file {["${apache2_ssl}", '/var/lib/ceph/radosgw/ceph-radosgw.gateway', '/var/lib/ceph/radosgw', '/etc/ceph/nss']:
ensure => "directory",
mode => 755,
# All files need to be owned by the rgw / http user.
File {
owner => $rgw_user,
group => $rgw_user,
}
exec {"generate SSL certificate on $name":
command => "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${apache2_ssl}apache.key -out ${apache2_ssl}apache.crt -subj '/C=RU/ST=Russia/L=Saratov/O=Mirantis/OU=CA/CN=localhost'",
returns => [0,1],
ceph_conf {
'client.radosgw.gateway/host': value => $rgw_host;
'client.radosgw.gateway/keyring': value => $keyring_path;
'client.radosgw.gateway/rgw_socket_path': value => $rgw_socket_path;
'client.radosgw.gateway/log_file': value => $rgw_log_file;
'client.radosgw.gateway/user': value => $rgw_user;
'client.radosgw.gateway/rgw_keystone_url': value => $rgw_keystone_url;
'client.radosgw.gateway/rgw_keystone_admin_token': value => $rgw_keystone_admin_token;
'client.radosgw.gateway/rgw_keystone_accepted_roles': value => $rgw_keystone_accepted_roles;
'client.radosgw.gateway/rgw_keystone_token_cache_size': value => $rgw_keystone_token_cache_size;
'client.radosgw.gateway/rgw_keystone_revocation_interval': value => $rgw_keystone_revocation_interval;
'client.radosgw.gateway/rgw_data': value => $rgw_data;
'client.radosgw.gateway/rgw_dns_name': value => $rgw_dns_name;
'client.radosgw.gateway/rgw_print_continue': value => $rgw_print_continue;
}
file { "/etc/apache2/sites-available/rgw.conf":
# TODO: CentOS conversion
# apache::loadmodule{['rewrite', 'fastcgi', 'ssl']: }
# file {"${::ceph::params::dir_httpd_conf}/httpd.conf":
# ensure => 'present',
# content => "ServerName ${fqdn}",
# notify => Service['httpd'],
# require => Package[$::ceph::params::package_httpd],
# }
file {[$::ceph::params::dir_httpd_ssl,
"${::ceph::rgw_data}/ceph-radosgw.gateway",
$::ceph::rgw_data,
$dir_httpd_root,
]:
ensure => 'directory',
mode => '0755',
}
if ($use_ssl) {
exec {"generate SSL certificate on ${name}":
command => "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${httpd_ssl}apache.key -out ${httpd_ssl}apache.crt -subj '/C=RU/ST=Russia/L=Saratov/O=Mirantis/OU=CA/CN=localhost'",
returns => [0,1],
}
ceph_conf{
'client.radosgw.gateway/nss db path': value => $rgw_nss_db_path;
}
}
file { "${::ceph::params::dir_httpd_sites}/rgw.conf":
content => template('ceph/rgw.conf.erb'),
notify => Service["apache2"],
require => Package["apache2"],
}
Exec {require => File["/etc/apache2/sites-available/rgw.conf"]}
exec {'a2ensite rgw.conf':}
exec {'a2dissite default':}
file { "/var/www/s3gw.fcgi":
file { "${dir_httpd_root}/s3gw.fcgi":
content => template('ceph/s3gw.fcgi.erb'),
notify => Service["apache2"],
require => Package["apache2"],
mode => "+x",
mode => '0755',
}
exec { "ceph-create-radosgw-keyring-on $name":
exec { "ceph-create-radosgw-keyring-on ${name}":
command => "ceph-authtool --create-keyring ${keyring_path}",
require => Package['ceph'],
} ->
file { "${keyring_path}":
mode => "+r",
} ->
exec { "ceph-generate-key-on $name":
creates => $keyring_path,
}
file { $keyring_path: mode => '0640', }
exec { "ceph-generate-key-on ${name}":
command => "ceph-authtool ${keyring_path} -n ${radosgw_auth_key} --gen-key",
require => Package["apache2"],
} ->
exec { "ceph-add-capabilities-to-the-key-on $name":
}
exec { "ceph-add-capabilities-to-the-key-on ${name}":
command => "ceph-authtool -n ${radosgw_auth_key} --cap osd 'allow rwx' --cap mon 'allow rw' ${keyring_path}",
require => Package["apache2"],
} ->
exec { "ceph-add-to-ceph-keyring-entries-on $name":
}
exec { "ceph-add-to-ceph-keyring-entries-on ${name}":
command => "ceph -k /etc/ceph/ceph.client.admin.keyring auth add ${radosgw_auth_key} -i ${keyring_path}",
require => Package["apache2"],
}
service { "apache2":
enable => true,
ensure => "running",
}
Ceph_conf <||> ->
Package[[$::ceph::params::package_httpd,
$::ceph::params::package_radosgw,]] ->
File[["${::ceph::params::dir_httpd_sites}/rgw.conf",
$::ceph::params::dir_httpd_ssl,
"${::ceph::rgw_data}/ceph-radosgw.gateway",
$::ceph::rgw_data,
$dir_httpd_root,]] ->
Exec["ceph-create-radosgw-keyring-on ${name}"] ->
File[$keyring_path] ->
Exec["ceph-generate-key-on ${name}"] ->
Exec["ceph-add-capabilities-to-the-key-on ${name}"] ->
Exec["ceph-add-to-ceph-keyring-entries-on ${name}"] ~>
Service ['httpd'] ~>
Service['radosgw']
}

View File

@ -1,27 +1,10 @@
# generate and install SSH keys for Ceph
class ceph::ssh {
$server_package = 'openssh-server'
$client_package = $::osfamily ? {
'RedHat' => 'openssh-clients',
'Debian' => 'openssh-client',
default => 'openssh-clients',
}
$ssh_config = '/root/.ssh/config'
$private_key = '/var/lib/astute/ceph/ceph'
$public_key = '/var/lib/astute/ceph/ceph.pub'
if !defined(Package[$server_package]) {
package { $server_package :
ensure => installed,
}
}
if !defined(Package[$client_package]) {
package { $client_package :
ensure => installed,
}
}
install_ssh_keys {'root_ssh_keys_for_ceph':
ensure => present,
user => 'root',
@ -40,5 +23,4 @@ class ceph::ssh {
}
Install_ssh_keys['root_ssh_keys_for_ceph'] -> File[$ssh_config]
Package[$server_package] -> Package[$client_package] -> Install_ssh_keys['root_ssh_keys_for_ceph']
}

View File

@ -1,14 +1,15 @@
# configure yum repos for Ceph
class ceph::yum (
$release = 'cuttlefish'
)
{
yumrepo { 'ext-epel-6.8':
descr => 'External EPEL 6.8',
name => 'ext-epel-6.8',
baseurl => absent,
gpgcheck => '0',
gpgkey => absent,
mirrorlist => 'https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch',
name => 'ext-epel-6.8',
baseurl => absent,
gpgcheck => '0',
gpgkey => absent,
mirrorlist => 'https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch',
}
yumrepo { 'ext-ceph':
@ -30,40 +31,40 @@ class ceph::yum (
}
#fuel repos
yumrepo { 'centos-base':
descr => 'Mirantis-CentOS-Base',
name => 'base',
baseurl => 'http://download.mirantis.com/centos-6.4',
gpgcheck => '1',
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
mirrorlist => absent,
}
yumrepo { 'openstack-epel-fuel-grizzly':
descr => 'Mirantis OpenStack grizzly Custom Packages',
baseurl => 'http://download.mirantis.com/epel-fuel-grizzly-3.1',
gpgcheck => '1',
gpgkey => 'http://download.mirantis.com/epel-fuel-grizzly-3.1/mirantis.key',
mirrorlist => absent,
}
# completely disable additional out-of-box repos
yumrepo { 'extras':
descr => 'CentOS-$releasever - Extras',
mirrorlist => 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras',
gpgcheck => '1',
baseurl => absent,
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
enabled => '0',
}
yumrepo { 'updates':
descr => 'CentOS-$releasever - Updates',
mirrorlist => 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates',
gpgcheck => '1',
baseurl => absent,
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
enabled => '0',
}
# Fuel repos
yumrepo { 'centos-base':
descr => 'Mirantis-CentOS-Base',
name => 'base',
baseurl => 'http://download.mirantis.com/centos-6.4',
gpgcheck => '1',
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
mirrorlist => absent,
}
}
yumrepo { 'openstack-epel-fuel-grizzly':
descr => 'Mirantis OpenStack grizzly Custom Packages',
baseurl => 'http://download.mirantis.com/epel-fuel-grizzly-3.1',
gpgcheck => '1',
gpgkey => 'http://download.mirantis.com/epel-fuel-grizzly-3.1/mirantis.key',
mirrorlist => absent,
}
# completely disable additional out-of-box repos
yumrepo { 'extras':
descr => 'CentOS-$releasever - Extras',
mirrorlist => 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras',
gpgcheck => '1',
baseurl => absent,
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
enabled => '0',
}
yumrepo { 'updates':
descr => 'CentOS-$releasever - Updates',
mirrorlist => 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates',
gpgcheck => '1',
baseurl => absent,
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
enabled => '0',
}
}

View File

@ -1,14 +1,15 @@
FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw.sock
Listen *:<%= @rgw_port %>
FastCgiExternalServer <%= @dir_httpd_root %>/s3gw.fcgi -socket /tmp/radosgw.sock
<VirtualHost *:80>
<VirtualHost *:<%= @rgw_port %>>
ServerName <%= @fqdn %>
DocumentRoot /var/www
DocumentRoot <%= @dir_httpd_root %>
RewriteEngine On
RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1&params=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
<IfModule mod_fastcgi.c>
<Directory /var/www>
<Directory <%= @dir_httpd_root %>>
Options +ExecCGI
AllowOverride All
SetHandler fastcgi-script

View File

@ -15,8 +15,13 @@ Puppet::Type.type(:cinder_config).provide(
'='
end
def file_path
def self.file_path
'/etc/cinder/cinder.conf'
end
# added for backwards compatibility with older versions of inifile
def file_path
self.class.file_path
end
end

View File

@ -9,10 +9,34 @@ Puppet::Type.newtype(:cinder_config) do
newproperty(:value) do
desc 'The value of the setting to be defined.'
munge do |v|
v.to_s.strip
munge do |value|
value = value.to_s.strip
value.capitalize! if value =~ /^(true|false)$/i
value
end
def is_to_s( currentvalue )
if resource.secret?
return '[old secret redacted]'
else
return currentvalue
end
end
def should_to_s( newvalue )
if resource.secret?
return '[new secret redacted]'
else
return newvalue
end
end
end
newparam(:secret, :boolean => true) do
desc 'Whether to hide the value from Puppet logs. Defaults to `false`.'
newvalues(:true, :false)
defaultto false
end
end

View File

@ -30,6 +30,7 @@ if $cinder_rate_limits {
Cinder_config<||> ~> Service['cinder-api']
Cinder_config<||> ~> Exec['cinder-manage db_sync']
Cinder_api_paste_ini<||> ~> Service['cinder-api']
Exec['cinder-manage db_sync'] -> Service['cinder-api']
if $enabled {
$ensure = 'running'

View File

@ -30,6 +30,7 @@ class cinder::scheduler (
Cinder_config<||> ~> Service['cinder-scheduler']
Cinder_config<||> ~> Exec['cinder-manage db_sync']
Cinder_api_paste_ini<||> ~> Service['cinder-scheduler']
Exec['cinder-manage db_sync'] -> Service['cinder-scheduler']

View File

@ -33,6 +33,7 @@ class cinder::volume (
Cinder_config<||> ~> Service['cinder-volume']
Cinder_config<||> ~> Exec['cinder-manage db_sync']
Cinder_api_paste_ini<||> ~> Service['cinder-volume']
Exec['cinder-manage db_sync'] -> Service['cinder-volume']
if $enabled {
$ensure = 'running'

View File

@ -0,0 +1,55 @@
class cinder::volume::ceph (
$volume_driver = $::ceph::volume_driver,
$rbd_pool = $::ceph::rbd_pool,
$glance_api_version = $::ceph::glance_api_version,
$rbd_user = $::ceph::rbd_user,
$rbd_secret_uuid = $::ceph::rbd_secret_uuid,
) {
require ::ceph
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
Cinder_config<||> ~> Service['cinder-volume']
File_line<||> ~> Service['cinder-volume']
# TODO: this needs to be re-worked to follow https://wiki.openstack.org/wiki/Cinder-multi-backend
cinder_config {
'DEFAULT/volume_driver': value => $volume_driver;
'DEFAULT/rbd_pool': value => $rbd_pool;
'DEFAULT/glance_api_version': value => $glance_api_version;
'DEFAULT/rbd_user': value => $rbd_user;
'DEFAULT/rbd_secret_uuid': value => $rbd_secret_uuid;
}
# TODO: convert to cinder params
file {$::ceph::params::service_cinder_volume_opts:
ensure => 'present',
} -> file_line {'cinder-volume.conf':
path => $::ceph::params::service_cinder_volume_opts,
line => "export CEPH_ARGS='--id ${::ceph::cinder_pool}'",
}
exec {'Create Cinder Ceph client ACL':
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
command => "ceph auth get-or-create client.${::ceph::cinder_pool} mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${::ceph::cinder_pool}, allow rx pool=${::ceph::glance_pool}'",
logoutput => true,
}
$cinder_keyring = "/etc/ceph/ceph.client.${::ceph::cinder_pool}.keyring"
exec {'Create keys for the Cinder pool':
command => "ceph auth get-or-create client.${::ceph::cinder_pool} > ${cinder_keyring}",
before => File[$cinder_keyring],
creates => $cinder_keyring,
require => Exec['Create Cinder Ceph client ACL'],
notify => Service['cinder-volume'],
returns => 0,
}
file {$cinder_keyring:
owner => cinder,
group => cinder,
require => Exec['Create keys for the Cinder pool'],
mode => '0600',
}
}

View File

@ -426,6 +426,13 @@ class PreseedPManager(object):
return self._early.append(command)
return self._early
def _getlabel(self, label):
if not label:
return ""
# XFS will refuse to format a partition if the
# disk label is > 12 characters.
return " -L {0} ".format(label[:12])
def pcount(self, disk_id, increment=0):
if ((self._pcount.get(disk_id, 0) == 0 and increment == 1) or
(self._pcount.get(disk_id, 0) >= 5)):
@ -472,7 +479,7 @@ class PreseedPManager(object):
self.recipe("1 1 -1 linux-swap method{ swap } format{ } .")
self.late("sed -i /$(blkid -s UUID -o value {0}7)/d /target/etc/fstab".format(self.disks[0]))
self.late("swapoff {0}7".format(self.disks[0]))
self.late("parted {0} rm 7".format(self.disks[0]))
self.late("parted {0} rm 7".format(self.disks[0]), True)
def _parttype(self, n):
if n == 1:
@ -489,32 +496,33 @@ class PreseedPManager(object):
pcount = self.pcount("/dev/%s" % disk["name"], 1)
tabmount = part["mount"] if part["mount"] != "swap" else "none"
if pcount == 1:
self.late("parted -s /dev/{0} mklabel msdos".format(disk["name"]))
self.late("parted -s /dev/{0} mklabel gpt".format(disk["name"]), True)
self.late("parted -a none -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["name"],
self._parttype(pcount),
self.psize("/dev/%s" % disk["name"]),
self.psize("/dev/%s" % disk["name"], part["size"] * self.factor),
self.unit))
self.unit), True)
if pcount == 1:
self.late("parted -a none -s /dev/{0} unit {1} "
"mkpart extended {2} {3}".format(
disk["name"],
self.unit,
end_size,
disk["size"]))
disk["size"]), True)
self.late("hdparm -z /dev/{0}".format(disk["name"]))
if not part.get("file_system", "xfs") in ("swap", None, "none"):
self.late("mkfs.{0} $(basename `readlink -f /dev/{1}`)"
"{2}".format(part.get("file_system", "xfs"),
disk["name"], pcount))
disk_label = self._getlabel(part.get("disk_label"))
self.late("mkfs.{0} -f $(readlink -f /dev/{1})"
"{2} {3}".format(part.get("file_system", "xfs"),
disk["name"], pcount, disk_label))
if not part["mount"] in (None, "none", "swap"):
self.late("mkdir -p /target{0}".format(part["mount"]))
if not part["mount"] in (None, "none"):
self.late("echo 'UUID=$(blkid -s UUID -o value "
"$(basename `readlink -f /dev/{0}`){1}) "
"$(readlink -f /dev/{0}){1}) "
"{2} {3} {4} 0 0'"
" >> /target/etc/fstab"
"".format(
@ -534,21 +542,21 @@ class PreseedPManager(object):
begin_size = self.psize("/dev/%s" % disk["name"])
end_size = self.psize("/dev/%s" % disk["name"], pv["size"] * self.factor)
if pcount == 1:
self.late("parted -s /dev/{0} mklabel msdos".format(disk["name"]))
self.late("parted -s /dev/{0} mklabel gpt".format(disk["name"]), True)
self.late("parted -a none -s /dev/{0} "
"unit {4} mkpart {1} {2} {3}".format(
disk["name"],
self._parttype(pcount),
begin_size,
end_size,
self.unit))
self.unit), True)
if pcount == 1:
self.late("parted -a none -s /dev/{0} unit {1} "
"mkpart extended {2} {3}".format(
disk["name"],
self.unit,
end_size,
disk["size"]))
disk["size"]), True)
self.late("hdparm -z /dev/{0}".format(disk["name"]))
self.late("pvcreate /dev/{0}{1}".format(disk["name"], pcount))
if not devices_dict.get(pv["vg"]):

View File

@ -15,8 +15,13 @@ Puppet::Type.type(:glance_api_config).provide(
'='
end
def file_path
def self.file_path
'/etc/glance/glance-api.conf'
end
# added for backwards compatibility with older versions of inifile
def file_path
self.class.file_path
end
end

View File

@ -9,10 +9,11 @@ Puppet::Type.newtype(:glance_api_config) do
newproperty(:value) do
desc 'The value of the setting to be defined.'
munge do |v|
v.to_s.strip
munge do |value|
value = value.to_s.strip
value.capitalize! if value =~ /^(true|false)$/i
value
end
end
end

View File

@ -6,14 +6,43 @@ class glance::backend::ceph(
$show_image_direct_url = $::ceph::show_image_direct_url,
) inherits glance::api {
require ::ceph
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
cwd => '/root',
}
package {'python-ceph':
ensure => latest,
}
glance_api_config {
'DEFAULT/default_store': value => $default_store;
'DEFAULT/rbd_store_user': value => $rbd_store_user;
'DEFAULT/rbd_store_pool': value => $rbd_store_pool;
'DEFAULT/show_image_direct_url': value => $show_image_direct_url;
'DEFAULT/default_store': value => $default_store;
'DEFAULT/rbd_store_user': value => $rbd_store_user;
'DEFAULT/rbd_store_pool': value => $rbd_store_pool;
'DEFAULT/show_image_direct_url': value => $show_image_direct_url;
}~> Service[$::ceph::params::service_glance_api]
exec {'Create Glance Ceph client ACL':
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
command => "ceph auth get-or-create client.${::ceph::glance_pool} mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${::ceph::glance_pool}'",
logoutput => true,
}
$glance_keyring = "/etc/ceph/ceph.client.${::ceph::glance_pool}.keyring"
exec {'Create keys for the Glance pool':
command => "ceph auth get-or-create client.${::ceph::glance_pool} > ${$glance_keyring}",
before => File[$glance_keyring],
creates => $glance_keyring,
require => Exec['Create Glance Ceph client ACL'],
notify => Service["${::ceph::params::service_glance_api}"],
returns => 0,
}
file {$glance_keyring:
owner => glance,
group => glance,
require => Exec['Create keys for the Glance pool'],
mode => '0600',
}
}

View File

@ -187,17 +187,6 @@ class horizon(
],
before => Service['httpd'],
}
if $use_syslog {
file {'/etc/httpd/conf.d/openstack-dashboard.conf':
ensure => present,
} ->
file_line { "enable_syslog":
path => "/etc/httpd/conf.d/openstack-dashboard.conf",
line => 'ErrorLog syslog:local1',
before => Service['httpd'],
require => [Package["$::horizon::params::http_service", "$::horizon::params::http_modwsgi"]],
}
}
}
'Debian': {
A2mod {

View File

@ -6,13 +6,13 @@ module Util
class IniFile
def section_regex
/^\s*\[([\w\d\.\\\/\-\:]+)\]\s*$/
/^\s*\[([\w\d\.\\\/\-\:]+)\]\s*$/
end
def setting_regex
/^(\s*)([\w\d\.\\\/\-]+)(\s*=\s*)([\S\s]*\S)\s*$/
/^(\s*)([\w\d\.\\\/\-\s]*[\w\d\.\\\/\-])([ \t]*=[ \t]*)([\S\s]*?)\s*$/
end
def commented_setting_regex
/^(\s*)[#;]+(\s*)([\w\d\.\\\/\-]+)(\s*=\s*)([\S\s]*\S)\s*$/
/^(\s*)[#;]+(\s*)([\w\d\.\\\/\-]+)([ \t]*=[ \t]*)([\S\s]*?)\s*$/
end
def initialize(path, key_val_separator = ' = ')

View File

@ -101,10 +101,17 @@ class openstack::cinder(
package_ensure => $::openstack_version['cinder'],
enabled => true,
}
class { 'cinder::volume::iscsi':
iscsi_ip_address => $iscsi_bind_host,
physical_volume => $physical_volume,
volume_group => $volume_group,
case $manage_volumes {
true, 'iscsi': {
class { 'cinder::volume::iscsi':
iscsi_ip_address => $iscsi_bind_host,
physical_volume => $physical_volume,
volume_group => $volume_group,
}
}
'ceph': {
class {'cinder::volume::ceph': }
}
}
}
}

View File

@ -94,6 +94,21 @@ if $::fuel_settings['nodes'] {
$verbose = $::fuel_settings['verbose']
$debug = $::fuel_settings['debug']
### Storage Settings ###
# Determine if any ceph parts have been asked for.
# This will ensure that monitors are set up on controllers, even if no
# ceph-osd roles during deployment
if (filter_nodes($::fuel_settings['nodes'], 'role', 'ceph-osd') or
$::fuel_settings['storage']['volumes_ceph'] or
$::fuel_settings['storage']['images_ceph'] or
$::fuel_settings['storage']['objects_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
### Syslog ###
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.

View File

@ -35,6 +35,7 @@ $cinder_hash = $::fuel_settings['cinder']
$access_hash = $::fuel_settings['access']
$nodes_hash = $::fuel_settings['nodes']
$mp_hash = $::fuel_settings['mp']
$storage_hash = $::fuel_settings['storage']
$network_manager = "nova.network.manager.${novanetwork_params['network_manager']}"
if !$rabbit_hash['user'] {
@ -47,12 +48,6 @@ if ! $::use_quantum {
}
$floating_hash = {}
if !$::fuel_settings['swift_partition'] {
$swift_partition = '/var/lib/glance/node'
}
##CALCULATED PARAMETERS
@ -107,25 +102,72 @@ $controller_nodes = sort(values($controller_internal_addresses))
$controller_node_public = $::fuel_settings['public_vip']
$controller_node_address = $::fuel_settings['management_vip']
$mountpoints = filter_hash($mp_hash,'point')
$swift_proxies = $controller_storage_addresses
$quantum_metadata_proxy_shared_secret = $quantum_params['metadata_proxy_shared_secret']
$quantum_gre_bind_addr = $::internal_address
$swift_local_net_ip = $::storage_address
$cinder_iscsi_bind_addr = $::storage_address
#TODO: awoodward fix static $use_ceph
if ($::use_ceph) {
$primary_mons = filter_nodes($nodes_hash,'role','primary-controller')
$primary_mon = $primary_mons[0]['name']
# Determine who should get the volume service
if ($::fuel_settings['role'] == 'cinder' or
$storage_hash['volumes_lvm']
) {
$manage_volumes = 'iscsi'
} elsif ($storage_hash['volumes_ceph']) {
$manage_volumes = 'ceph'
} else {
$manage_volumes = false
}
}
#Determine who should be the default backend
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
class {'ceph':
primary_mon => $primary_mon,
cluster_node_address => $controller_node_address,
}
} else {
$glance_backend = 'swift'
}
if ($use_ceph) {
$primary_mons = $controllers
$primary_mon = $controllers[0]['name']
class {'ceph':
primary_mon => $primary_mon,
cluster_node_address => $controller_node_public,
use_rgw => $storage_hash['objects_ceph'],
use_ssl => false,
glance_backend => $glance_backend,
}
}
#Test to determine if swift should be enabled
if ($storage_hash['objects_swift'] or
! $storage_hash['images_ceph']
) {
$use_swift = true
} else {
$use_swift = false
}
if ($use_swift){
if !$::fuel_settings['swift_partition'] {
$swift_partition = '/var/lib/glance/node'
}
$swift_proxies = $controller_storage_addresses
$swift_local_net_ip = $::storage_address
$master_swift_proxy_nodes = filter_nodes($nodes_hash,'role','primary-controller')
$master_swift_proxy_ip = $master_swift_proxy_nodes[0]['internal_address']
#$master_hostname = $master_swift_proxy_nodes[0]['name']
$swift_loopback = false
if $::fuel_settings['role'] == 'primary-controller' {
$primary_proxy = true
} else {
$primary_proxy = false
}
}
$network_config = {
'vlan_start' => $vlan_start,
}
@ -140,29 +182,16 @@ if !$::fuel_settings['debug']
$debug = false
}
if $::fuel_settings['role'] == 'primary-controller' {
$primary_proxy = true
} else {
$primary_proxy = false
}
if $::fuel_settings['role'] == 'primary-controller' {
$primary_controller = true
} else {
$primary_controller = false
}
$master_swift_proxy_nodes = filter_nodes($nodes_hash,'role','primary-controller')
$master_swift_proxy_ip = $master_swift_proxy_nodes[0]['internal_address']
#$master_hostname = $master_swift_proxy_nodes[0]['name']
#HARDCODED PARAMETERS
$multi_host = true
$manage_volumes = false
#Moved to CEPH if block
#$glance_backend = 'swift'
$quantum_netnode_on_cnt = true
$swift_loopback = false
$mirror_type = 'external'
Exec { logoutput => true }
@ -227,7 +256,7 @@ class compact_controller (
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
cinder_db_password => $cinder_hash[db_password],
cinder_volume_group => "cinder",
manage_volumes => $is_cinder_node,
manage_volumes => $manage_volumes,
galera_nodes => $controller_nodes,
custom_mysql_setup_class => $custom_mysql_setup_class,
mysql_skip_name_resolve => true,
@ -245,14 +274,6 @@ class compact_controller (
}
class { 'swift::keystone::auth':
password => $swift_hash[user_password],
public_address => $::fuel_settings['public_vip'],
internal_address => $::fuel_settings['management_vip'],
admin_address => $::fuel_settings['management_vip'],
}
}
class virtual_ips () {
cluster::virtual_ips { $vip_keys:
vips => $vips,
@ -265,67 +286,74 @@ class virtual_ips () {
/controller/ : {
include osnailyfacter::test_controller
$swift_zone = $node[0]['swift_zone']
class { '::cluster': stage => 'corosync_setup' } ->
class { 'virtual_ips':
stage => 'corosync_setup'
}
include ::haproxy::params
class { 'cluster::haproxy':
global_options => merge($::haproxy::params::global_options, {'log' => "/dev/log local0"}),
defaults_options => merge($::haproxy::params::defaults_options, {'mode' => 'http'}),
stage => 'cluster_head',
}
class { '::cluster': stage => 'corosync_setup' } ->
class { 'virtual_ips':
stage => 'corosync_setup'
}
include ::haproxy::params
class { 'cluster::haproxy':
global_options => merge($::haproxy::params::global_options, {'log' => "/dev/log local0"}),
defaults_options => merge($::haproxy::params::defaults_options, {'mode' => 'http'}),
stage => 'cluster_head',
}
class { compact_controller: }
class { 'openstack::swift::storage_node':
storage_type => $swift_loopback,
loopback_size => '5243780',
storage_mnt_base_dir => $swift_partition,
storage_devices => $mountpoints,
swift_zone => $swift_zone,
swift_local_net_ip => $storage_address,
master_swift_proxy_ip => $master_swift_proxy_ip,
sync_rings => ! $primary_proxy,
syslog_log_level => $syslog_log_level,
debug => $debug ? { 'true' => true, true => true, default=> false },
verbose => $verbose ? { 'true' => true, true => true, default=> false },
}
if $primary_proxy {
ring_devices {'all': storages => $controllers }
}
class { 'openstack::swift::proxy':
swift_user_password => $swift_hash[user_password],
swift_proxies => $controller_internal_addresses,
primary_proxy => $primary_proxy,
controller_node_address => $::fuel_settings['management_vip'],
swift_local_net_ip => $swift_local_net_ip,
master_swift_proxy_ip => $master_swift_proxy_ip,
syslog_log_level => $syslog_log_level,
debug => $debug ? { 'true' => true, true => true, default=> false },
verbose => $verbose ? { 'true' => true, true => true, default=> false },
if ($use_swift) {
$swift_zone = $node[0]['swift_zone']
class { 'openstack::swift::storage_node':
storage_type => $swift_loopback,
loopback_size => '5243780',
storage_mnt_base_dir => $swift_partition,
storage_devices => $mountpoints,
swift_zone => $swift_zone,
swift_local_net_ip => $storage_address,
master_swift_proxy_ip => $master_swift_proxy_ip,
sync_rings => ! $primary_proxy,
syslog_log_level => $syslog_log_level,
debug => $debug ? { 'true' => true, true => true, default=> false },
verbose => $verbose ? { 'true' => true, true => true, default=> false },
}
if $primary_proxy {
ring_devices {'all': storages => $controllers }
}
class { 'openstack::swift::proxy':
swift_user_password => $swift_hash[user_password],
swift_proxies => $controller_internal_addresses,
primary_proxy => $primary_proxy,
controller_node_address => $::fuel_settings['management_vip'],
swift_local_net_ip => $swift_local_net_ip,
master_swift_proxy_ip => $master_swift_proxy_ip,
syslog_log_level => $syslog_log_level,
debug => $debug ? { 'true' => true, true => true, default=> false },
verbose => $verbose ? { 'true' => true, true => true, default=> false },
}
if $storage_hash['objects_swift'] {
class { 'swift::keystone::auth':
password => $swift_hash[user_password],
public_address => $::fuel_settings['public_vip'],
internal_address => $::fuel_settings['management_vip'],
admin_address => $::fuel_settings['management_vip'],
}
}
}
#TODO: PUT this configuration stanza into nova class
nova_config { 'DEFAULT/start_guests_on_host_boot': value => $::fuel_settings['start_guests_on_host_boot'] }
nova_config { 'DEFAULT/use_cow_images': value => $::fuel_settings['use_cow_images'] }
nova_config { 'DEFAULT/compute_scheduler_driver': value => $::fuel_settings['compute_scheduler_driver'] }
nova_config { 'DEFAULT/use_cow_images': value => $::fuel_settings['use_cow_images'] }
nova_config { 'DEFAULT/compute_scheduler_driver': value => $::fuel_settings['compute_scheduler_driver'] }
#TODO: fix this so it dosn't break ceph
if $::hostname == $::fuel_settings['last_controller'] {
class { 'openstack::img::cirros':
os_username => shellescape($access_hash[user]),
os_password => shellescape($access_hash[password]),
os_tenant_name => shellescape($access_hash[tenant]),
os_auth_url => "http://${::fuel_settings['management_vip']}:5000/v2.0/",
img_name => "TestVM",
stage => 'glance-image',
if !($::use_ceph) {
if $::hostname == $::fuel_settings['last_controller'] {
class { 'openstack::img::cirros':
os_username => shellescape($access_hash[user]),
os_password => shellescape($access_hash[password]),
os_tenant_name => shellescape($access_hash[tenant]),
os_auth_url => "http://${::fuel_settings['management_vip']}:5000/v2.0/",
img_name => "TestVM",
stage => 'glance-image',
}
}
Class[glance::api] -> Class[openstack::img::cirros]
Class[openstack::swift::storage_node] -> Class[openstack::img::cirros]
Class[openstack::swift::proxy] -> Class[openstack::img::cirros]
Service[swift-proxy] -> Class[openstack::img::cirros]
}
if ! $::use_quantum {
nova_floating_range{ $floating_ips_range:
@ -339,10 +367,8 @@ class virtual_ips () {
}
Class[nova::api] -> Nova_floating_range <| |>
}
if defined(Class['ceph']){
Class['openstack::controller'] -> Class['ceph::glance']
Class['glance::api'] -> Class['ceph::glance']
Class['openstack::controller'] -> Class['ceph::cinder']
if ($use_ceph){
Class['openstack::controller'] -> Class['ceph']
}
#ADDONS START
@ -410,7 +436,7 @@ class virtual_ips () {
verbose => $verbose ? { 'true' => true, true => true, default=> false },
cinder_volume_group => "cinder",
vnc_enabled => true,
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
manage_volumes => $manage_volumes,
nova_user_password => $nova_hash[user_password],
cache_server_ip => $controller_nodes,
service_endpoint => $::fuel_settings['management_vip'],
@ -430,7 +456,7 @@ class virtual_ips () {
state_path => $nova_hash[state_path],
}
if defined(Class['ceph']){
if ($::use_ceph){
Class['openstack::compute'] -> Class['ceph']
}
@ -467,7 +493,7 @@ class virtual_ips () {
rabbit_host => false,
rabbit_nodes => $::fuel_settings['management_vip'],
volume_group => 'cinder',
manage_volumes => true,
manage_volumes => $manage_volumes,
enabled => true,
auth_host => $::fuel_settings['management_vip'],
iscsi_bind_host => $storage_address,

View File

@ -32,6 +32,7 @@ $swift_hash = $::fuel_settings['swift']
$cinder_hash = $::fuel_settings['cinder']
$access_hash = $::fuel_settings['access']
$nodes_hash = $::fuel_settings['nodes']
$storage_hash = $::fuel_settings['storage']
$vlan_start = $novanetwork_params['vlan_start']
$network_manager = "nova.network.manager.${novanetwork_params['network_manager']}"
$network_size = $novanetwork_params['network_size']
@ -105,19 +106,37 @@ if !$::fuel_settings['debug']
$debug = false
}
#TODO: awoodward fix static $use_ceph
if ($::use_ceph) {
$primary_mons = $controller
$primary_mon = $controller[0]['name']
# Determine who should get the volume service
if ($::fuel_settings['role'] == 'cinder' or
$storage_hash['volumes_lvm']
) {
$manage_volumes = 'iscsi'
} elsif ($storage_hash['volumes_ceph']) {
$manage_volumes = 'ceph'
} else {
$manage_volumes = false
}
#Determine who should be the default backend
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
class {'ceph':
primary_mon => $primary_mon,
cluster_node_address => $controller_node_address,
}
} else {
$glance_backend = 'file'
}
if ($use_ceph) {
$primary_mons = $controller
$primary_mon = $controller[0]['name']
class {'ceph':
primary_mon => $primary_mon,
cluster_node_address => $controller_node_public,
use_rgw => $storage_hash['objects_ceph'],
use_ssl => false,
glance_backend => $glance_backend,
}
}
case $::fuel_settings['role'] {
"controller" : {
include osnailyfacter::test_controller
@ -173,7 +192,7 @@ if ($::use_ceph) {
cinder_db_password => $cinder_hash[db_password],
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
cinder_volume_group => "cinder",
manage_volumes => $::fuel_settings['cinder'] ? { false => $manage_volumes, default =>$is_cinder_node },
manage_volumes => $manage_volumes,
use_syslog => true,
syslog_log_level => $syslog_log_level,
syslog_log_facility_glance => $syslog_log_facility_glance,
@ -247,14 +266,16 @@ if ($::use_ceph) {
# require => Class[glance::api],
# }
#TODO: fix this so it dosn't break ceph
class { 'openstack::img::cirros':
os_username => shellescape($access_hash[user]),
os_password => shellescape($access_hash[password]),
os_tenant_name => shellescape($access_hash[tenant]),
img_name => "TestVM",
stage => 'glance-image',
if !($use_ceph) {
class { 'openstack::img::cirros':
os_username => shellescape($access_hash[user]),
os_password => shellescape($access_hash[password]),
os_tenant_name => shellescape($access_hash[tenant]),
img_name => "TestVM",
stage => 'glance-image',
}
Class[glance::api] -> Class[openstack::img::cirros]
}
Class[glance::api] -> Class[openstack::img::cirros]
if !$::use_quantum {
nova_floating_range{ $floating_ips_range:
@ -269,10 +290,8 @@ if ($::use_ceph) {
Class[nova::api] -> Nova_floating_range <| |>
}
if defined(Class['ceph']){
Class['openstack::controller'] -> Class['ceph::glance']
Class['glance::api'] -> Class['ceph::glance']
Class['openstack::controller'] -> Class['ceph::cinder']
if ($use_ceph){
Class['openstack::controller'] -> Class['ceph']
}
#ADDONS START
@ -348,7 +367,7 @@ if ($::use_ceph) {
cinder_db_password => $cinder_hash[db_password],
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
cinder_volume_group => "cinder",
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
manage_volumes => $manage_volumes,
db_host => $controller_node_address,
debug => $debug ? { 'true' => true, true => true, default=> false },
verbose => $verbose ? { 'true' => true, true => true, default=> false },
@ -365,7 +384,7 @@ if ($::use_ceph) {
nova_config { 'DEFAULT/use_cow_images': value => $::fuel_settings['use_cow_images'] }
nova_config { 'DEFAULT/compute_scheduler_driver': value => $::fuel_settings['compute_scheduler_driver'] }
if defined(Class['ceph']){
if ($use_ceph){
Class['openstack::compute'] -> Class['ceph']
}
}
@ -392,7 +411,7 @@ if ($::use_ceph) {
qpid_user => $rabbit_hash[user],
qpid_nodes => [$controller_node_address],
volume_group => 'cinder',
manage_volumes => true,
manage_volumes => $manage_volumes,
enabled => true,
bind_host => $bind_host,
auth_host => $controller_node_address,