From 7dc4db06ba50531b52caa636dcb277c85dbdbb36 Mon Sep 17 00:00:00 2001 From: Dan Bode Date: Tue, 25 Jun 2013 16:57:03 -0700 Subject: [PATCH] Initial commit for swift support - adds swift node definitions in Vagrant - adds ssh to Puppetfile - adds swifts nodes to site.pp - adds swift data to hiera --- .gitignore | 1 + Puppetfile | 4 +- Vagrantfile | 50 ++++++++++++++++++- hiera_data/openstack.yaml | 7 +++ hiera_data/swift-storage01.yaml | 1 + hiera_data/swift-storage02.yaml | 1 + hiera_data/swift-storage03.yaml | 1 + hiera_data/swift_storage.yaml | 6 +++ manifests/site.pp | 24 ++++++++- tests/basic_test.sh | 87 +++++++++++++++++++-------------- tests/swift.sh | 48 ++++++++++++++++++ 11 files changed, 191 insertions(+), 39 deletions(-) create mode 100644 hiera_data/swift-storage01.yaml create mode 100644 hiera_data/swift-storage02.yaml create mode 100644 hiera_data/swift-storage03.yaml create mode 100644 hiera_data/swift_storage.yaml create mode 100644 tests/swift.sh diff --git a/.gitignore b/.gitignore index 4d0d574..9e3989f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ vendor .tmp .vagrant *.log* +hiera_data/jenkins.yaml diff --git a/Puppetfile b/Puppetfile index ab0d45a..31717f6 100644 --- a/Puppetfile +++ b/Puppetfile @@ -54,6 +54,7 @@ mod 'CiscoSystems/coi', :git => "#{base_url}/bodepd/puppet-COI", :ref => 'master # no existing downstream module mod 'puppetlabs/postgresql', :git => "#{base_url}/puppetlabs/puppetlabs-postgresql", :ref => 'master' mod 'puppetlabs/puppetdb', :git => "#{base_url}/puppetlabs/puppetlabs-puppetdb", :ref => 'master' +mod 'ripienaar/ruby-puppetdb', :git => 'git://github.com/ripienaar/ruby-puppetdb' # do I really need this firewall module? mod 'puppetlabs/firewall', :git => "#{base_url}/puppetlabs/puppetlabs-firewall", :ref => 'master' # stephenrjohnson @@ -98,7 +99,8 @@ mod 'CiscoSystems/inifile', :git => "#{base_url}/CiscoSystems/puppet-inifile", : # upstream is saz mod 'CiscoSystems/memcached', :git => "#{base_url}/CiscoSystems/puppet-memcached", :ref => branch_name -#mod 'CiscoSystems/ssh', :git => "#{base_url}/CiscoSystems/puppet-ssh", :ref => branch_name +# this uses master b/c the grizzly branch does not exist +mod 'CiscoSystems/ssh', :git => "#{base_url}/CiscoSystems/puppet-ssh", :ref => 'master' # upstream is duritong mod 'CiscoSystems/sysctl', :git => "#{base_url}/CiscoSystems/puppet-sysctl", :ref => branch_name diff --git a/Vagrantfile b/Vagrantfile index a2b06bf..ccbbba6 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -111,7 +111,7 @@ def run_puppet_agent( options = ["--certname #{node_name}", '-t', '--pluginsync'] if v_config[:verbose] - options = options + ['--verbose', '--trace', '--debug', '--show_diff'] + options = options + ['--trace', '--debug', '--show_diff'] end config.vm.provision(:puppet_server) do |puppet| @@ -263,4 +263,52 @@ Vagrant::Config.run do |config| ) end + config.vm.define :swift_proxy do |config| + configure_openstack_node( + config, + 'swift-proxy01', + 512, + 'precise64', + '41', + apt_cache_proxy, + v_config + ) + end + + config.vm.define :swift_storage_1 do |config| + configure_openstack_node( + config, + 'swift-storage01', + 512, + 'precise64', + '51', + apt_cache_proxy, + v_config + ) + end + + config.vm.define :swift_storage_2 do |config| + configure_openstack_node( + config, + 'swift-storage02', + 512, + 'precise64', + '52', + apt_cache_proxy, + v_config + ) + end + + config.vm.define :swift_storage_3 do |config| + configure_openstack_node( + config, + 'swift-storage03', + 512, + 'precise64', + '53', + apt_cache_proxy, + v_config + ) + end + end diff --git a/hiera_data/openstack.yaml b/hiera_data/openstack.yaml index c42b69e..2302603 100644 --- a/hiera_data/openstack.yaml +++ b/hiera_data/openstack.yaml @@ -5,6 +5,7 @@ # controller information controller_node_internal: 192.168.242.10 controller_node_public: 192.168.242.10 +controller_node_address: 192.168.242.10 controller_hostname: control-server # information for package repos @@ -36,3 +37,9 @@ admin_password: Cisco123 keystone_admin_token: keystone_admin_token verbose: false + +# swift info +swift: true +swift_user_password: swift_user_pass +swift_hash: super_secret_swift_hash +swift_public_address: 192.168.242.41 diff --git a/hiera_data/swift-storage01.yaml b/hiera_data/swift-storage01.yaml new file mode 100644 index 0000000..becca3e --- /dev/null +++ b/hiera_data/swift-storage01.yaml @@ -0,0 +1 @@ +swift_zone: 1 diff --git a/hiera_data/swift-storage02.yaml b/hiera_data/swift-storage02.yaml new file mode 100644 index 0000000..5902f66 --- /dev/null +++ b/hiera_data/swift-storage02.yaml @@ -0,0 +1 @@ +swift_zone: 2 diff --git a/hiera_data/swift-storage03.yaml b/hiera_data/swift-storage03.yaml new file mode 100644 index 0000000..88db4b5 --- /dev/null +++ b/hiera_data/swift-storage03.yaml @@ -0,0 +1 @@ +swift_zone: 3 diff --git a/hiera_data/swift_storage.yaml b/hiera_data/swift_storage.yaml new file mode 100644 index 0000000..506948b --- /dev/null +++ b/hiera_data/swift_storage.yaml @@ -0,0 +1,6 @@ +storage_type: loopback +storage_devices: + - 1 + - 2 + - 3 +swift_local_net_ip: %{ipaddress_eth3} diff --git a/manifests/site.pp b/manifests/site.pp index fb80e13..434b8a0 100644 --- a/manifests/site.pp +++ b/manifests/site.pp @@ -28,15 +28,35 @@ node build-server { } node /control-server/ { - $role = 'openstack' + + $role = 'openstack' $openstack_role = 'controller' include coi::roles::controller + } node /compute-server\d+/ { + $role = 'openstack' $openstack_role = 'compute' include coi::roles::compute + +} + +node /swift-proxy\d+/ { + + $role = 'openstack' + $openstack_role = 'swift_proxy' + include coi::roles::swift_proxy + +} + +node /swift-storage\d+/ { + + $role = 'openstack' + $openstack_role = 'swift_storage' + include coi::roles::swift_storage + } # cache node that we use for testing so that we do not have to always reinstall @@ -45,5 +65,7 @@ node /compute-server\d+/ { # # node /cache/ { + include coi::roles::cache + } diff --git a/tests/basic_test.sh b/tests/basic_test.sh index 529795e..2200907 100755 --- a/tests/basic_test.sh +++ b/tests/basic_test.sh @@ -7,9 +7,6 @@ set -e set -u -# pull in functions that test multi-node -source tests/multi_node.sh - ret=0 datestamp=`date "+%Y%m%d%H%M%S"` @@ -35,7 +32,8 @@ fi # install modules export module_install_method=librarian if [ $module_install_method = 'librarian' ]; then - librarian-puppet install --clean --verbose + #librarian-puppet install --clean --verbose + librarian-puppet install --verbose else # eventually, this could do something like install packages echo 'librarian is the only supported install method' @@ -70,41 +68,58 @@ if [ -n "${openstack_package_repo:-}" ]; then fi fi -# clean up old vms from previous tests -destroy_multi_node_vms +if [ "${test_type:-}" = 'swift' ]; then -# deploy the vms for a multi-node deployment -deploy_multi_node_vms + source tests/swift.sh -vagrant ssh build -c 'sudo /tmp/test_nova.sh;exit $?' -vagrant ssh build -c 'ping -c 2 172.16.2.129;exit $?' + destroy_swift + deploy_swift_multi + + vagrant ssh build -c 'ruby /tmp/swift_test_file.rb;exit $?' -if [ $? -eq 0 ] - then - echo "##########################" - echo " Test Passed!" - echo "OVS ON CONTROL:" >> control.log.$datestamp - vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp - echo "OVS ON COMPUTE:" >> compute.log.$datestamp - vagrant ssh compute_basevm -c 'sudo ovs-vsctl show;exit $?' >> compute.log.$datestamp - mv build.log.$datestamp build.log.$datestamp.success - mv control.log.$datestamp control.log.$datestamp.success - mv compute.log.$datestamp compute.log.$datestamp.success - ret=0 else - echo "##########################" - echo "Ping failed to reach VM :(" - echo "OVS ON CONTROL:" >> control.log.$datestamp - vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp - echo "OVS ON COMPUTE:" >> compute.log.$datestamp - vagrant ssh compute_basevm -c 'sudo ovs-vsctl show' >> compute.log.$datestamp - vagrant ssh control_basevm -c 'sudo service quantum-plugin-openvswitch-agent restart' - sleep 2 - echo "OVS ON CONTROL AFTER AGENT RESTART:" >> control.log.$datestamp - vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp - mv build.log.$datestamp build.log.$datestamp.failed - mv control.log.$datestamp control.log.$datestamp.failed - mv compute.log.$datestamp compute.log.$datestamp.failed - ret=1 + + # pull in functions that test multi-node + source tests/multi_node.sh + + # perform a multi-node openstack installation test by default + # clean up old vms from previous tests + destroy_multi_node_vms + + # deploy the vms for a multi-node deployment + deploy_multi_node_vms + + vagrant ssh build -c 'sudo /tmp/test_nova.sh;exit $?' + vagrant ssh build -c 'ping -c 2 172.16.2.129;exit $?' + + if [ $? -eq 0 ] + then + echo "##########################" + echo " Test Passed!" + echo "OVS ON CONTROL:" >> control.log.$datestamp + vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp + echo "OVS ON COMPUTE:" >> compute.log.$datestamp + vagrant ssh compute_basevm -c 'sudo ovs-vsctl show;exit $?' >> compute.log.$datestamp + mv build.log.$datestamp build.log.$datestamp.success + mv control.log.$datestamp control.log.$datestamp.success + mv compute.log.$datestamp compute.log.$datestamp.success + ret=0 + else + echo "##########################" + echo "Ping failed to reach VM :(" + echo "OVS ON CONTROL:" >> control.log.$datestamp + vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp + echo "OVS ON COMPUTE:" >> compute.log.$datestamp + vagrant ssh compute_basevm -c 'sudo ovs-vsctl show' >> compute.log.$datestamp + vagrant ssh control_basevm -c 'sudo service quantum-plugin-openvswitch-agent restart' + sleep 2 + echo "OVS ON CONTROL AFTER AGENT RESTART:" >> control.log.$datestamp + vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp + mv build.log.$datestamp build.log.$datestamp.failed + mv control.log.$datestamp control.log.$datestamp.failed + mv compute.log.$datestamp compute.log.$datestamp.failed + ret=1 + fi fi + exit $ret diff --git a/tests/swift.sh b/tests/swift.sh new file mode 100644 index 0000000..44f2621 --- /dev/null +++ b/tests/swift.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# +# specifies things that are specific to the +# vagrant multi-node deployment scenario +# + +function destroy_swift() { + for i in build-server control-server swift-proxy01 swift-storage01 swift-storage02 swift-storage03 ; do + if VBoxManage list vms | grep $i; then + VBoxManage controlvm $i poweroff || true + # this sleep statement is to fix an issue where + # machines are still in a locked state after the + # controlvm poweroff command should be completed + sleep 1 + VBoxManage unregistervm $i --delete + fi + done + clean_swift_certs +} + +function clean_swift_certs() { + if VBoxManage list vms | grep build-server ; then + vagrant ssh build -c 'sudo bash -c "export RUBYLIB=/etc/puppet/modules-0/ruby-puppetdb/lib/; puppet query node --only-active --deactivate --puppetdb_host=build-server.domain.name --puppetdb_port=8081 --config=/etc/puppet/puppet.conf --ssldir=/var/lib/puppet/ssl --certname=build-server.domain.name || true"' + + vagrant ssh build -c 'sudo bash -c "rm /var/lib/puppet/ssl/*/swift*;rm /var/lib/puppet/ssl/ca/signed/swift* || true"' + fi +} + +function deploy_swift_multi() { + # build a cache vm if one does not already exist + for i in cache build control_basevm; do + if ! VBoxManage list vms | grep $i ; then + vagrant up $i 2>&1 | tee -a $i.log.$datestamp + fi + done + + for i in swift_storage_1 swift_storage_2 swift_storage_3 ; do + # this first pass does not succeed + vagrant up $i 2>&1 | tee -a $i.log.$datestamp || true + done + + vagrant up swift_proxy 2>&1 | tee -a swift_proxy.log.$datestamp + + for i in swift_storage_1 swift_storage_2 swift_storage_3 ; do + vagrant provision $i 2>&1 | tee -a $i.log.$datestamp + done + +}