From 90be973d60d833f8ead3b9b1c29e1fce31b4cc7c Mon Sep 17 00:00:00 2001 From: Emilien Macchi Date: Thu, 1 May 2014 22:10:46 +0200 Subject: [PATCH] loadbalancer/refacto: fix unit tests bug #237 --- manifests/loadbalancer/binding.pp | 18 +- spec/classes/cloud_loadbalancer_spec.rb | 216 ++++++++++++------------ 2 files changed, 118 insertions(+), 116 deletions(-) diff --git a/manifests/loadbalancer/binding.pp b/manifests/loadbalancer/binding.pp index 8965fbbc..7f3308b9 100644 --- a/manifests/loadbalancer/binding.pp +++ b/manifests/loadbalancer/binding.pp @@ -20,7 +20,7 @@ define cloud::loadbalancer::binding ( $httpchk = undef ){ -include cloud::loadbalancer + include cloud::loadbalancer # join all VIP together $vip_public_ip_array = any2array($::cloud::loadbalancer::vip_public_ip) @@ -37,25 +37,27 @@ include cloud::loadbalancer if ! $::cloud::loadbalancer::vip_internal_ip and ! $::cloud::loadbalancer::vip_public_ip { fail('vip_public_ip and vip_internal_ip are both set to false, no binding is possible.') } + $all_vip_array = split($all_vip, ',') + # when we do not want binding if ($ip == false) { notice("no HAproxy binding for ${name} has been enabled.") } else { # when we want both internal & public binding if ($ip == true) { - $listen_ip_real = $all_vip + $listen_ip_real = $all_vip_array } else { # when binding is specified in parameter - if ($ip in $all_vip) { + if ($ip in $all_vip_array) { $listen_ip_real = $ip } else { fail("${ip} is not part of VIP pools.") } - cloud::loadbalancer::listen_http { $name: - ports => $port, - httpchk => $httpchk, - listen_ip => $listen_ip_real; - } + } + cloud::loadbalancer::listen_http { $name : + ports => $port, + httpchk => $httpchk, + listen_ip => [$listen_ip_real]; } } diff --git a/spec/classes/cloud_loadbalancer_spec.rb b/spec/classes/cloud_loadbalancer_spec.rb index 3bd5cc75..741253a4 100644 --- a/spec/classes/cloud_loadbalancer_spec.rb +++ b/spec/classes/cloud_loadbalancer_spec.rb @@ -20,57 +20,48 @@ require 'spec_helper' describe 'cloud::loadbalancer' do - let :default_params do - { :ceilometer_api => true, - :cinder_api => true, - :glance_api => true, - :neutron_api => true, - :heat_api => true, - :heat_cfn_api => true, - :heat_cloudwatch_api => true, - :nova_api => true, - :ec2_api => true, - :metadata_api => true, - :swift_api => true, - :keystone_api_admin => true, - :keystone_api => true, - :horizon => true, - :horizon_ssl => false, - :spice => true, - :haproxy_auth => 'root:secrete', - :keepalived_state => 'BACKUP', - :keepalived_priority => 50, - :keepalived_public_interface => 'eth0', - :keepalived_public_ipvs => ['10.0.0.1', '10.0.0.2'], - :horizon_port => '80', - :spice_port => '6082', - :vip_public_ip => '10.0.0.1', - :vip_internal_ip => false, - :galera_ip => '10.0.0.2', - :ks_ceilometer_public_port => '8777', - :ks_nova_public_port => '8774', - :ks_ec2_public_port => '8773', - :ks_metadata_public_port => '8777', - :ks_glance_api_public_port => '9292', - :ks_glance_registry_internal_port => '9191', - :ks_swift_public_port => '8080', - :ks_keystone_public_port => '5000', - :ks_keystone_admin_port => '35357', - :ks_cinder_public_port => '8776', - :ks_neutron_public_port => '9696', - :ks_heat_public_port => '8004', - :ks_heat_cfn_public_port => '8000', - :ks_heat_cloudwatch_public_port => '8003' } - end - - let :params do - { } - end - shared_examples_for 'openstack loadbalancer' do - let :p do - default_params.merge(params) + let :params do + { :ceilometer_api => true, + :cinder_api => true, + :glance_api => true, + :neutron_api => true, + :heat_api => true, + :heat_cfn_api => true, + :heat_cloudwatch_api => true, + :nova_api => true, + :ec2_api => true, + :metadata_api => true, + :swift_api => true, + :keystone_api_admin => true, + :keystone_api => true, + :horizon => true, + :horizon_ssl => false, + :spice => true, + :haproxy_auth => 'root:secrete', + :keepalived_state => 'BACKUP', + :keepalived_priority => 50, + :keepalived_public_interface => 'eth0', + :keepalived_public_ipvs => ['10.0.0.1', '10.0.0.2'], + :horizon_port => '80', + :spice_port => '6082', + :vip_public_ip => '10.0.0.1', + :galera_ip => '10.0.0.2', + :ks_ceilometer_public_port => '8777', + :ks_nova_public_port => '8774', + :ks_ec2_public_port => '8773', + :ks_metadata_public_port => '8777', + :ks_glance_api_public_port => '9292', + :ks_glance_registry_internal_port => '9191', + :ks_swift_public_port => '8080', + :ks_keystone_public_port => '5000', + :ks_keystone_admin_port => '35357', + :ks_cinder_public_port => '8776', + :ks_neutron_public_port => '9696', + :ks_heat_public_port => '8004', + :ks_heat_cfn_public_port => '8000', + :ks_heat_cloudwatch_public_port => '8003' } end it 'configure haproxy server' do @@ -81,36 +72,6 @@ describe 'cloud::loadbalancer' do should contain_class('keepalived') end # configure keepalived server - context 'configure an OpenStack service haproxy listen with public binding only' do - before do - params.merge!( - :keystone_api => '10.0.0.2', - :vip_public_ip => '10.0.0.2', - :keepalived_public_ipvs => ['10.0.0.2'], - :vip_internal_ip => false, - ) - end - it { should contain_haproxy__listen('keystone_api_cluster').with( - :ipaddress => ['10.0.0.2'], - :ports => '5000' - )} - end - - context 'configure an OpenStack service haproxy listen with both public and internal binding' do - before :each do - params.merge!( - :nova_api => true, - :vip_public_ip => '10.0.0.2', - :vip_internal_ip => '192.168.0.1', - :keepalived_internal_ipvs => ['192.168.0.1', '192.168.0.2'] - ) - end - it { should contain_haproxy__listen('nova_api_cluster').with( - :ipaddress => ['10.0.0.2','192.168.0.1'], - :ports => '8774' - )} - end - context 'configure an internal VIP' do before do params.merge!(:keepalived_internal_ipvs => ['192.168.0.1']) @@ -121,7 +82,7 @@ describe 'cloud::loadbalancer' do 'virtual_ips' => ['192.168.0.1 dev eth1'], 'track_script' => ['haproxy'], 'state' => 'BACKUP', - 'priority' => '50', + 'priority' => params[:keepalived_priority], 'notify_master' => '"/etc/init.d/haproxy start"', 'notify_backup' => '"/etc/init.d/haproxy stop"', }) @@ -143,28 +104,25 @@ describe 'cloud::loadbalancer' do 'virtual_ips' => ['192.168.0.2 dev eth3'], 'track_script' => ['haproxy'], 'state' => 'BACKUP', - 'priority' => p[:keepalived_priority], + 'priority' => params[:keepalived_priority], 'notify_master' => '"/etc/init.d/haproxy start"', 'notify_backup' => '"/etc/init.d/haproxy stop"', }) end end - context 'when keepalived and HAproxy are in backup' do + context 'configure keepalived in backup' do it 'configure vrrp_instance with BACKUP state' do should contain_keepalived__instance('1').with({ - 'interface' => p[:keepalived_public_interface], + 'interface' => params[:keepalived_public_interface], 'virtual_ips' => ['10.0.0.1 dev eth0', '10.0.0.2 dev eth0'], 'track_script' => ['haproxy'], - 'state' => p[:keepalived_state], - 'priority' => p[:keepalived_priority], + 'state' => params[:keepalived_state], + 'priority' => params[:keepalived_priority], 'notify_master' => '"/etc/init.d/haproxy start"', 'notify_backup' => '"/etc/init.d/haproxy stop"', }) end # configure vrrp_instance with BACKUP state - it 'configure haproxy server without service managed' do - should contain_class('haproxy').with(:service_manage => false) - end # configure haproxy server end # configure keepalived in backup context 'configure keepalived in master' do @@ -173,17 +131,14 @@ describe 'cloud::loadbalancer' do end it 'configure vrrp_instance with MASTER state' do should contain_keepalived__instance('1').with({ - 'interface' => p[:keepalived_public_interface], + 'interface' => params[:keepalived_public_interface], 'track_script' => ['haproxy'], 'state' => 'MASTER', - 'priority' => p[:keepalived_priority], + 'priority' => params[:keepalived_priority], 'notify_master' => '"/etc/init.d/haproxy start"', 'notify_backup' => '"/etc/init.d/haproxy stop"', }) end - it 'configure haproxy server with service managed' do - should contain_class('haproxy').with(:service_manage => false) - end # configure haproxy server end # configure keepalived in master context 'configure logrotate file' do @@ -202,7 +157,7 @@ describe 'cloud::loadbalancer' do )} end # configure monitor haproxy listen - context 'configure galera haproxy listen' do + context 'configure monitor haproxy listen' do it { should contain_haproxy__listen('galera_cluster').with( :ipaddress => params[:galera_ip], :ports => '3306', @@ -216,6 +171,64 @@ describe 'cloud::loadbalancer' do )} end # configure monitor haproxy listen + # test backward compatibility + context 'configure OpenStack binding on public network only' do + it { should contain_haproxy__listen('spice_cluster').with( + :ipaddress => [params[:vip_public_ip]], + :ports => '6082' + )} + end + + context 'configure OpenStack binding on both public and internal networks' do + before do + params.merge!( + :nova_api => true, + :galera_ip => '172.16.0.1', + :vip_public_ip => '172.16.0.1', + :vip_internal_ip => '192.168.0.1', + :keepalived_public_ipvs => ['172.16.0.1', '172.16.0.2'], + :keepalived_internal_ipvs => ['192.168.0.1', '192.168.0.2'] + ) + end + it { should contain_haproxy__listen('nova_api_cluster').with( + :ipaddress => ['172.16.0.1', '192.168.0.1'], + :ports => '8774' + )} + end + + context 'disable an OpenStack service binding' do + before do + params.merge!(:metadata_api => false) + end + it { should_not contain_haproxy__listen('metadata_api_cluster') } + end + + context 'should fail to configure OpenStack binding when vip_public_ip and vip_internal_ip are missing' do + before do + params.merge!( + :nova_api => true, + :galera_ip => '172.16.0.1', + :vip_public_ip => false, + :vip_internal_ip => false, + :keepalived_public_ipvs => ['172.16.0.1', '172.16.0.2'] + ) + end + it_raises 'a Puppet::Error', /vip_public_ip and vip_internal_ip are both set to false, no binding is possible./ + end + + context 'should fail to configure OpenStack binding when given VIP is not in the VIP pool list' do + before do + params.merge!( + :nova_api => '10.0.0.1', + :galera_ip => '172.16.0.1', + :vip_public_ip => '172.16.0.1', + :vip_internal_ip => false, + :keepalived_public_ipvs => ['172.16.0.1', '172.16.0.2'] + ) + end + it_raises 'a Puppet::Error', /10.0.0.1 is not part of VIP pools./ + end + context 'with a public OpenStack VIP not in the keepalived VIP list' do before do params.merge!( @@ -236,20 +249,6 @@ describe 'cloud::loadbalancer' do it_raises 'a Puppet::Error', /vip_internal_ip should be part of keepalived_internal_ipvs./ end - context 'with non-valid OpenStack VIP' do - before do - params.merge!( - :vip_public_ip => '172.16.0.1', - :vip_internal_ip => '192.168.0.1', - :galera_ip => '192.168.0.1', - :keepalived_internal_ipvs => ['192.168.0.1'], - :keepalived_public_ipvs => ['172.16.0.1'], - :keystone_api => '10.0.0.1' - ) - end - it_raises 'a Puppet::Error', /10.0.0.1 is not part of VIP pools./ - end - context 'with a Galera VIP not in the keepalived VIP list' do before do params.merge!( @@ -261,6 +260,7 @@ describe 'cloud::loadbalancer' do end it_raises 'a Puppet::Error', /galera_ip should be part of keepalived_public_ipvs or keepalived_internal_ipvs./ end + end # shared:: openstack loadbalancer context 'on Debian platforms' do