Revert "Ceph: fix PG count number"
This reverts commit 81220692c3
.
Closes-Bug: #1517115
Change-Id: I67e1ec503650f5ad76a3e0471eb5508dee48248f
This commit is contained in:
parent
81220692c3
commit
f31ab77143
@ -24,7 +24,7 @@ class ceph (
|
||||
$cluster_network = undef,
|
||||
$public_network = undef,
|
||||
|
||||
# ceph.conf osd settings
|
||||
#ceph.conf osd settings
|
||||
$osd_max_backfills = '1',
|
||||
$osd_recovery_max_active = '1',
|
||||
|
||||
@ -57,9 +57,6 @@ class ceph (
|
||||
$rgw_adm_ip = $cluster_node_address,
|
||||
$rgw_int_ip = $cluster_node_address,
|
||||
|
||||
$rgw_large_pool_name = ".rgw",
|
||||
$rgw_large_pool_pg_nums = 512,
|
||||
|
||||
# Cinder settings
|
||||
$volume_driver = 'cinder.volume.drivers.rbd.RBDDriver',
|
||||
$glance_api_version = '2',
|
||||
|
@ -35,8 +35,6 @@ class ceph::radosgw (
|
||||
$rgw_keystone_accepted_roles = $::ceph::rgw_keystone_accepted_roles,
|
||||
$rgw_keystone_revocation_interval = $::ceph::rgw_keystone_revocation_interval,
|
||||
$rgw_nss_db_path = $::ceph::rgw_nss_db_path,
|
||||
$rgw_large_pool_name = $::ceph::rgw_large_pool_name,
|
||||
$rgw_large_pool_pg_nums = $::ceph::rgw_large_pool_pg_nums,
|
||||
$pub_ip = $::ceph::rgw_pub_ip,
|
||||
$adm_ip = $::ceph::rgw_adm_ip,
|
||||
$int_ip = $::ceph::rgw_int_ip,
|
||||
@ -47,9 +45,9 @@ class ceph::radosgw (
|
||||
$syslog_level = $::ceph::syslog_log_level,
|
||||
) {
|
||||
|
||||
$keyring_path = "/etc/ceph/keyring.${rgw_id}"
|
||||
$radosgw_auth_key = "client.${rgw_id}"
|
||||
$dir_httpd_root = '/var/www/radosgw'
|
||||
$keyring_path = "/etc/ceph/keyring.${rgw_id}"
|
||||
$radosgw_auth_key = "client.${rgw_id}"
|
||||
$dir_httpd_root = '/var/www/radosgw'
|
||||
|
||||
package { [$::ceph::params::package_radosgw,
|
||||
$::ceph::params::package_fastcgi,
|
||||
@ -216,11 +214,6 @@ class ceph::radosgw (
|
||||
creates => $keyring_path
|
||||
}
|
||||
|
||||
exec { "Create ${rgw_large_pool_name} pool":
|
||||
command => "ceph -n ${radosgw_auth_key} osd pool create ${rgw_large_pool_name} ${rgw_large_pool_pg_nums} ${rgw_large_pool_pg_nums}",
|
||||
unless => "rados lspools | grep '^${rgw_large_pool_name}$'",
|
||||
}
|
||||
|
||||
file { $keyring_path: mode => '0640', }
|
||||
|
||||
Ceph_conf <||> ->
|
||||
@ -240,7 +233,6 @@ class ceph::radosgw (
|
||||
Exec["ceph create ${radosgw_auth_key}"] ->
|
||||
Exec["Populate ${radosgw_auth_key} keyring"] ->
|
||||
File[$keyring_path] ->
|
||||
Exec["Create ${rgw_large_pool_name} pool"] ->
|
||||
Firewall['012 RadosGW allow'] ~>
|
||||
Service ['httpd']
|
||||
}
|
||||
|
@ -44,8 +44,6 @@ if $use_ceph {
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
|
||||
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
|
||||
|
||||
class {'ceph':
|
||||
primary_mon => $primary_mon,
|
||||
mon_hosts => keys($mon_address_map),
|
||||
@ -75,8 +73,8 @@ if $use_ceph {
|
||||
user => $compute_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'",
|
||||
keyring_owner => 'nova',
|
||||
pg_num => $per_pool_pg_nums[$compute_pool],
|
||||
pgp_num => $per_pool_pg_nums[$compute_pool],
|
||||
pg_num => $storage_hash['pg_num'],
|
||||
pgp_num => $storage_hash['pg_num'],
|
||||
}
|
||||
|
||||
include ceph::nova_compute
|
||||
|
@ -18,31 +18,29 @@ Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
cwd => '/root',
|
||||
}
|
||||
|
||||
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
|
||||
|
||||
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
|
||||
ceph::pool {$glance_pool:
|
||||
user => $glance_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'",
|
||||
keyring_owner => 'glance',
|
||||
pg_num => $per_pool_pg_nums[$glance_pool],
|
||||
pgp_num => $per_pool_pg_nums[$glance_pool],
|
||||
pg_num => $osd_pool_default_pg_num,
|
||||
pgp_num => $osd_pool_default_pg_num,
|
||||
}
|
||||
|
||||
ceph::pool {$cinder_pool:
|
||||
user => $cinder_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'",
|
||||
keyring_owner => 'cinder',
|
||||
pg_num => $per_pool_pg_nums[$cinder_pool],
|
||||
pgp_num => $per_pool_pg_nums[$cinder_pool],
|
||||
pg_num => $osd_pool_default_pg_num,
|
||||
pgp_num => $osd_pool_default_pg_num,
|
||||
}
|
||||
|
||||
ceph::pool {$cinder_backup_pool:
|
||||
user => $cinder_backup_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rx pool=${cinder_pool}'",
|
||||
keyring_owner => 'cinder',
|
||||
pg_num => $per_pool_pg_nums[$cinder_backup_pool],
|
||||
pgp_num => $per_pool_pg_nums[$cinder_backup_pool],
|
||||
pg_num => $osd_pool_default_pg_num,
|
||||
pgp_num => $osd_pool_default_pg_num,
|
||||
}
|
||||
|
||||
Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool]
|
||||
|
@ -7,7 +7,6 @@ $keystone_hash = hiera('keystone', {})
|
||||
$management_vip = hiera('management_vip')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$public_ssl_hash = hiera('public_ssl')
|
||||
$radosgw_large_pool_name = ".rgw"
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
@ -91,8 +90,6 @@ if $use_ceph and $storage_hash['objects_ceph'] {
|
||||
rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator',
|
||||
rgw_keystone_revocation_interval => '1000000',
|
||||
rgw_nss_db_path => '/etc/ceph/nss',
|
||||
rgw_large_pool_name => $radosgw_large_pool_name,
|
||||
rgw_large_pool_pg_nums => $storage_hash[$radosgw_large_pool_name],
|
||||
|
||||
#rgw Log settings
|
||||
use_syslog => hiera('use_syslog', true),
|
||||
|
@ -693,13 +693,6 @@ storage:
|
||||
objects_ceph: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: false
|
||||
volumes_lvm: true
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -667,13 +667,6 @@ storage:
|
||||
objects_ceph: true
|
||||
osd_pool_size: '2'
|
||||
pg_num: 256
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 256
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: true
|
||||
volumes_lvm: false
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -667,13 +667,6 @@ storage:
|
||||
objects_ceph: true
|
||||
osd_pool_size: '2'
|
||||
pg_num: 256
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 256
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: true
|
||||
volumes_lvm: false
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -688,13 +688,6 @@ storage:
|
||||
objects_ceph: true
|
||||
osd_pool_size: '2'
|
||||
pg_num: 256
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 256
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: true
|
||||
volumes_lvm: false
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -668,13 +668,6 @@ storage:
|
||||
objects_ceph: true
|
||||
osd_pool_size: '2'
|
||||
pg_num: 256
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 256
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: true
|
||||
volumes_lvm: false
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -658,13 +658,6 @@ storage:
|
||||
objects_ceph: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: false
|
||||
volumes_lvm: true
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -679,13 +679,6 @@ storage:
|
||||
objects_ceph: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: false
|
||||
volumes_lvm: true
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -679,13 +679,6 @@ storage:
|
||||
objects_ceph: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: false
|
||||
volumes_lvm: true
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -679,13 +679,6 @@ storage:
|
||||
objects_ceph: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: false
|
||||
volumes_lvm: true
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -552,13 +552,6 @@ storage:
|
||||
objects_ceph: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: false
|
||||
volumes_lvm: true
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -552,13 +552,6 @@ storage:
|
||||
objects_ceph: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
volumes_ceph: false
|
||||
volumes_lvm: true
|
||||
storage_network_range: 192.168.1.0/24
|
||||
|
@ -318,13 +318,6 @@ storage:
|
||||
images_vcenter: true
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
images_ceph: false
|
||||
metadata:
|
||||
weight: 60
|
||||
|
@ -317,13 +317,6 @@ storage:
|
||||
images_vcenter: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
images_ceph: false
|
||||
metadata:
|
||||
weight: 60
|
||||
|
@ -317,13 +317,6 @@ storage:
|
||||
images_vcenter: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
images_ceph: false
|
||||
metadata:
|
||||
weight: 60
|
||||
|
@ -317,13 +317,6 @@ storage:
|
||||
images_vcenter: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
images_ceph: false
|
||||
metadata:
|
||||
weight: 60
|
||||
|
@ -317,13 +317,6 @@ storage:
|
||||
images_vcenter: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
images_ceph: false
|
||||
metadata:
|
||||
weight: 60
|
||||
|
@ -317,13 +317,6 @@ storage:
|
||||
images_vcenter: false
|
||||
osd_pool_size: '2'
|
||||
pg_num: 128
|
||||
per_pool_pg_nums:
|
||||
default_pg_num: 128
|
||||
cinder_volume: 2048
|
||||
compute: 1024
|
||||
backups: 512
|
||||
".rgw": 512
|
||||
images: 256
|
||||
images_ceph: false
|
||||
metadata:
|
||||
weight: 60
|
||||
|
@ -7,7 +7,7 @@ describe manifest do
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
ceph_monitor_nodes = Noop.hiera 'ceph_monitor_nodes'
|
||||
|
||||
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'])
|
||||
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'] or storage_hash['objects_ceph'])
|
||||
it { should contain_class('ceph').with(
|
||||
'mon_hosts' => ceph_monitor_nodes.keys,
|
||||
'osd_pool_default_size' => storage_hash['osd_pool_size'],
|
||||
|
@ -19,8 +19,8 @@ describe manifest do
|
||||
it { should contain_class('ceph::conf') }
|
||||
|
||||
it { should contain_ceph__pool('compute').with(
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['compute'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['compute'],)
|
||||
'pg_num' => storage_hash['pg_num'],
|
||||
'pgp_num' => storage_hash['pg_num'],)
|
||||
}
|
||||
|
||||
it { should contain_ceph__pool('compute').that_requires('Class[ceph::conf]') }
|
||||
|
@ -6,31 +6,29 @@ describe manifest do
|
||||
shared_examples 'catalog' do
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
|
||||
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'])
|
||||
it { should contain_ceph__pool('images').with(
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['images'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['images'],)
|
||||
}
|
||||
it { should contain_ceph__pool('volumes').with(
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['volumes'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['volumes'],)
|
||||
}
|
||||
it { should contain_ceph__pool('backups').with(
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['backups'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['backups'],)
|
||||
}
|
||||
it { should contain_ceph__pool('images').with(
|
||||
'pg_num' => storage_hash['pg_num'],
|
||||
'pgp_num' => storage_hash['pg_num'],)
|
||||
}
|
||||
it { should contain_ceph__pool('volumes').with(
|
||||
'pg_num' => storage_hash['pg_num'],
|
||||
'pgp_num' => storage_hash['pg_num'],)
|
||||
}
|
||||
it { should contain_ceph__pool('backups').with(
|
||||
'pg_num' => storage_hash['pg_num'],
|
||||
'pgp_num' => storage_hash['pg_num'],)
|
||||
}
|
||||
|
||||
if storage_hash['volumes_ceph']
|
||||
it { should contain_ceph__pool('volumes').that_notifies('Service[cinder-volume]') }
|
||||
it { should contain_ceph__pool('backups').that_notifies('Service[cinder-backup]') }
|
||||
it { should contain_service('cinder-volume') }
|
||||
it { should contain_service('cinder-backup') }
|
||||
end
|
||||
if storage_hash['volumes_ceph']
|
||||
it { should contain_ceph__pool('volumes').that_notifies('Service[cinder-volume]') }
|
||||
it { should contain_ceph__pool('backups').that_notifies('Service[cinder-backup]') }
|
||||
it { should contain_service('cinder-volume') }
|
||||
it { should contain_service('cinder-backup') }
|
||||
end
|
||||
|
||||
if storage_hash['images_ceph']
|
||||
it { should contain_ceph__pool('images').that_notifies('Service[glance-api]') }
|
||||
it { should contain_service('glance-api') }
|
||||
end
|
||||
if storage_hash['images_ceph']
|
||||
it { should contain_ceph__pool('images').that_notifies('Service[glance-api]') }
|
||||
it { should contain_service('glance-api') }
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -7,7 +7,7 @@ describe manifest do
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
ceph_monitor_nodes = Noop.hiera 'ceph_monitor_nodes'
|
||||
|
||||
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'])
|
||||
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'] or storage_hash['objects_ceph'])
|
||||
it { should contain_class('ceph::radosgw').with(
|
||||
'primary_mon' => ceph_monitor_nodes.keys[0],
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user