AIO-SX Regression: Memory usage exceed alarms appeared during test run
- Reduce both CPU and memory footprint for AIO simplex by reducing the number of workers across the board. - In addition: a. Reduce ceilometer workers to 1 as the traffic generated from max number of VMs in simplex (20) does not warrant more workers than 1. Increase batch timeout to reduce load. b. Change keystone workers factor from 1.5 to 1 which further reduces the number of keystone workers for simplex. The number of requests do not warrrant more workers than other services. c. Reduce the number of nova api metadata workers to half of nova api compute workers for both AIO and standard configs based on LO data. AIO installations, simplex sanity & sanity nightly all passed. Sanity, parallel VM/volume launch timings also improved. Ceilometer messages were cleared timely. Change-Id: Idfd09d53af834276bc5f8e26853690ae3bf9b139 Signed-off-by: Scott Little <scott.little@windriver.com> Depends-On: https://review.openstack.org/578134
This commit is contained in:
parent
63c8e73baa
commit
df9b97901c
@ -118,13 +118,19 @@ class openstack::ceilometer::collector {
|
||||
notification_workers => $::platform::params::eng_workers_by_2,
|
||||
}
|
||||
|
||||
if $::platform::params::system_type == 'All-in-one' {
|
||||
$batch_timeout = 25
|
||||
} else {
|
||||
$batch_timeout = 5
|
||||
}
|
||||
|
||||
# FIXME(mpeters): generic parameter can be moved to the puppet module
|
||||
ceilometer_config {
|
||||
'DEFAULT/csv_location': value => "${ceilometer_directory_csv}";
|
||||
'DEFAULT/csv_location_strict': value => true;
|
||||
'service_credentials/interface': value => 'internalURL';
|
||||
'notification/batch_size': value => 100;
|
||||
'notification/batch_timeout': value => 5;
|
||||
'notification/batch_timeout': value => $batch_timeout;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -608,7 +608,7 @@ class openstack::nova::api::services
|
||||
sync_db => $::platform::params::init_database,
|
||||
sync_db_api => $::platform::params::init_database,
|
||||
osapi_compute_workers => $::platform::params::eng_workers,
|
||||
metadata_workers => $::platform::params::eng_workers,
|
||||
metadata_workers => $::platform::params::eng_workers_by_2,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,41 +34,50 @@ class platform::params (
|
||||
$phys_core_count = 0 + $::physical_core_count
|
||||
$plat_res_mem = 0 + $::platform_res_mem
|
||||
|
||||
|
||||
# Engineering parameters common to openstack services:
|
||||
|
||||
# max number of workers
|
||||
$eng_max_workers = 20
|
||||
# min number of workers
|
||||
$eng_min_workers = 1
|
||||
# total system memory per worker
|
||||
$eng_worker_mb = 2000
|
||||
# memory headroom per worker (e.g., buffers, cached)
|
||||
$eng_overhead_mb = 1000
|
||||
# number of workers we can support based on memory
|
||||
if $::personality == 'controller' and str2bool($::is_compute_subfunction) {
|
||||
# Controller memory available for small footprint
|
||||
# Consistent with sysinv get_platform_reserved_memory()
|
||||
if str2bool($::is_virtual) {
|
||||
$eng_controller_mem = 6000
|
||||
if $system_type == 'All-in-one' {
|
||||
# Controller memory available for AIO
|
||||
# Consistent with sysinv get_platform_reserved_memory()
|
||||
$eng_controller_mem = 10500
|
||||
if $system_mode == 'simplex' or ($phys_core_count <= 8 and $plat_res_mem < 14500) or str2bool($::is_virtual) {
|
||||
$small_footprint = true
|
||||
} else {
|
||||
#If we have a reduced footprint xeon-d and if the platform memory
|
||||
#has not been increased by the user to the standard 14.5GB we use a
|
||||
#lowered worker count to save memory
|
||||
if $phys_core_count <= 8 and $plat_res_mem < 14500 {
|
||||
$eng_controller_mem = 7000
|
||||
} else {
|
||||
$eng_controller_mem = 10500
|
||||
}
|
||||
# For AIO duplex, keep $eng_workers at 3 for now
|
||||
$small_footprint = false
|
||||
}
|
||||
} else {
|
||||
$small_footprint = false
|
||||
$eng_controller_mem = $::memorysize_mb
|
||||
}
|
||||
|
||||
$eng_workers_mem = floor($eng_controller_mem) / ($eng_worker_mb + $eng_overhead_mb)
|
||||
|
||||
# number of workers per service
|
||||
$eng_workers = min($eng_max_workers, $eng_workers_mem, max($phys_core_count, 2))
|
||||
$eng_workers_by_2 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/2, 2))
|
||||
$eng_workers_by_4 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/4, 2))
|
||||
$eng_workers_by_5 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/5, 2))
|
||||
$eng_workers_by_6 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/6, 2))
|
||||
if $small_footprint {
|
||||
# Limit eng_workers and its derivatives to 2 and 1 respectively for simplex, Xeon-D and AIO in virtual box.
|
||||
$eng_workers = 2
|
||||
$eng_workers_by_2 = $eng_min_workers
|
||||
$eng_workers_by_4 = $eng_min_workers
|
||||
$eng_workers_by_5 = $eng_min_workers
|
||||
$eng_workers_by_6 = $eng_min_workers
|
||||
} else {
|
||||
$eng_workers = min($eng_max_workers, $eng_workers_mem, max($phys_core_count, 2))
|
||||
$eng_workers_by_2 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/2, 2))
|
||||
$eng_workers_by_4 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/4, 2))
|
||||
$eng_workers_by_5 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/5, 2))
|
||||
$eng_workers_by_6 = min($eng_max_workers, $eng_workers_mem, max($phys_core_count/6, 2))
|
||||
}
|
||||
|
||||
$init_database = (str2bool($::is_initial_config_primary) or $controller_upgrade)
|
||||
$init_keystone = (str2bool($::is_initial_config_primary) or $controller_upgrade)
|
||||
|
Loading…
x
Reference in New Issue
Block a user