Initial commit for swift support
- adds swift node definitions in Vagrant - adds ssh to Puppetfile - adds swifts nodes to site.pp - adds swift data to hiera
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,3 +4,4 @@ vendor
|
||||
.tmp
|
||||
.vagrant
|
||||
*.log*
|
||||
hiera_data/jenkins.yaml
|
||||
|
||||
@@ -54,6 +54,7 @@ mod 'CiscoSystems/coi', :git => "#{base_url}/bodepd/puppet-COI", :ref => 'master
|
||||
# no existing downstream module
|
||||
mod 'puppetlabs/postgresql', :git => "#{base_url}/puppetlabs/puppetlabs-postgresql", :ref => 'master'
|
||||
mod 'puppetlabs/puppetdb', :git => "#{base_url}/puppetlabs/puppetlabs-puppetdb", :ref => 'master'
|
||||
mod 'ripienaar/ruby-puppetdb', :git => 'git://github.com/ripienaar/ruby-puppetdb'
|
||||
# do I really need this firewall module?
|
||||
mod 'puppetlabs/firewall', :git => "#{base_url}/puppetlabs/puppetlabs-firewall", :ref => 'master'
|
||||
# stephenrjohnson
|
||||
@@ -98,7 +99,8 @@ mod 'CiscoSystems/inifile', :git => "#{base_url}/CiscoSystems/puppet-inifile", :
|
||||
|
||||
# upstream is saz
|
||||
mod 'CiscoSystems/memcached', :git => "#{base_url}/CiscoSystems/puppet-memcached", :ref => branch_name
|
||||
#mod 'CiscoSystems/ssh', :git => "#{base_url}/CiscoSystems/puppet-ssh", :ref => branch_name
|
||||
# this uses master b/c the grizzly branch does not exist
|
||||
mod 'CiscoSystems/ssh', :git => "#{base_url}/CiscoSystems/puppet-ssh", :ref => 'master'
|
||||
|
||||
# upstream is duritong
|
||||
mod 'CiscoSystems/sysctl', :git => "#{base_url}/CiscoSystems/puppet-sysctl", :ref => branch_name
|
||||
|
||||
50
Vagrantfile
vendored
50
Vagrantfile
vendored
@@ -111,7 +111,7 @@ def run_puppet_agent(
|
||||
options = ["--certname #{node_name}", '-t', '--pluginsync']
|
||||
|
||||
if v_config[:verbose]
|
||||
options = options + ['--verbose', '--trace', '--debug', '--show_diff']
|
||||
options = options + ['--trace', '--debug', '--show_diff']
|
||||
end
|
||||
|
||||
config.vm.provision(:puppet_server) do |puppet|
|
||||
@@ -263,4 +263,52 @@ Vagrant::Config.run do |config|
|
||||
)
|
||||
end
|
||||
|
||||
config.vm.define :swift_proxy do |config|
|
||||
configure_openstack_node(
|
||||
config,
|
||||
'swift-proxy01',
|
||||
512,
|
||||
'precise64',
|
||||
'41',
|
||||
apt_cache_proxy,
|
||||
v_config
|
||||
)
|
||||
end
|
||||
|
||||
config.vm.define :swift_storage_1 do |config|
|
||||
configure_openstack_node(
|
||||
config,
|
||||
'swift-storage01',
|
||||
512,
|
||||
'precise64',
|
||||
'51',
|
||||
apt_cache_proxy,
|
||||
v_config
|
||||
)
|
||||
end
|
||||
|
||||
config.vm.define :swift_storage_2 do |config|
|
||||
configure_openstack_node(
|
||||
config,
|
||||
'swift-storage02',
|
||||
512,
|
||||
'precise64',
|
||||
'52',
|
||||
apt_cache_proxy,
|
||||
v_config
|
||||
)
|
||||
end
|
||||
|
||||
config.vm.define :swift_storage_3 do |config|
|
||||
configure_openstack_node(
|
||||
config,
|
||||
'swift-storage03',
|
||||
512,
|
||||
'precise64',
|
||||
'53',
|
||||
apt_cache_proxy,
|
||||
v_config
|
||||
)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
@@ -5,6 +5,7 @@
|
||||
# controller information
|
||||
controller_node_internal: 192.168.242.10
|
||||
controller_node_public: 192.168.242.10
|
||||
controller_node_address: 192.168.242.10
|
||||
controller_hostname: control-server
|
||||
|
||||
# information for package repos
|
||||
@@ -36,3 +37,9 @@ admin_password: Cisco123
|
||||
keystone_admin_token: keystone_admin_token
|
||||
|
||||
verbose: false
|
||||
|
||||
# swift info
|
||||
swift: true
|
||||
swift_user_password: swift_user_pass
|
||||
swift_hash: super_secret_swift_hash
|
||||
swift_public_address: 192.168.242.41
|
||||
|
||||
1
hiera_data/swift-storage01.yaml
Normal file
1
hiera_data/swift-storage01.yaml
Normal file
@@ -0,0 +1 @@
|
||||
swift_zone: 1
|
||||
1
hiera_data/swift-storage02.yaml
Normal file
1
hiera_data/swift-storage02.yaml
Normal file
@@ -0,0 +1 @@
|
||||
swift_zone: 2
|
||||
1
hiera_data/swift-storage03.yaml
Normal file
1
hiera_data/swift-storage03.yaml
Normal file
@@ -0,0 +1 @@
|
||||
swift_zone: 3
|
||||
6
hiera_data/swift_storage.yaml
Normal file
6
hiera_data/swift_storage.yaml
Normal file
@@ -0,0 +1,6 @@
|
||||
storage_type: loopback
|
||||
storage_devices:
|
||||
- 1
|
||||
- 2
|
||||
- 3
|
||||
swift_local_net_ip: %{ipaddress_eth3}
|
||||
@@ -28,15 +28,35 @@ node build-server {
|
||||
}
|
||||
|
||||
node /control-server/ {
|
||||
$role = 'openstack'
|
||||
|
||||
$role = 'openstack'
|
||||
$openstack_role = 'controller'
|
||||
include coi::roles::controller
|
||||
|
||||
}
|
||||
|
||||
node /compute-server\d+/ {
|
||||
|
||||
$role = 'openstack'
|
||||
$openstack_role = 'compute'
|
||||
include coi::roles::compute
|
||||
|
||||
}
|
||||
|
||||
node /swift-proxy\d+/ {
|
||||
|
||||
$role = 'openstack'
|
||||
$openstack_role = 'swift_proxy'
|
||||
include coi::roles::swift_proxy
|
||||
|
||||
}
|
||||
|
||||
node /swift-storage\d+/ {
|
||||
|
||||
$role = 'openstack'
|
||||
$openstack_role = 'swift_storage'
|
||||
include coi::roles::swift_storage
|
||||
|
||||
}
|
||||
|
||||
# cache node that we use for testing so that we do not have to always reinstall
|
||||
@@ -45,5 +65,7 @@ node /compute-server\d+/ {
|
||||
#
|
||||
#
|
||||
node /cache/ {
|
||||
|
||||
include coi::roles::cache
|
||||
|
||||
}
|
||||
|
||||
@@ -7,9 +7,6 @@
|
||||
set -e
|
||||
set -u
|
||||
|
||||
# pull in functions that test multi-node
|
||||
source tests/multi_node.sh
|
||||
|
||||
ret=0
|
||||
datestamp=`date "+%Y%m%d%H%M%S"`
|
||||
|
||||
@@ -35,7 +32,8 @@ fi
|
||||
# install modules
|
||||
export module_install_method=librarian
|
||||
if [ $module_install_method = 'librarian' ]; then
|
||||
librarian-puppet install --clean --verbose
|
||||
#librarian-puppet install --clean --verbose
|
||||
librarian-puppet install --verbose
|
||||
else
|
||||
# eventually, this could do something like install packages
|
||||
echo 'librarian is the only supported install method'
|
||||
@@ -70,41 +68,58 @@ if [ -n "${openstack_package_repo:-}" ]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# clean up old vms from previous tests
|
||||
destroy_multi_node_vms
|
||||
if [ "${test_type:-}" = 'swift' ]; then
|
||||
|
||||
# deploy the vms for a multi-node deployment
|
||||
deploy_multi_node_vms
|
||||
source tests/swift.sh
|
||||
|
||||
vagrant ssh build -c 'sudo /tmp/test_nova.sh;exit $?'
|
||||
vagrant ssh build -c 'ping -c 2 172.16.2.129;exit $?'
|
||||
destroy_swift
|
||||
deploy_swift_multi
|
||||
|
||||
vagrant ssh build -c 'ruby /tmp/swift_test_file.rb;exit $?'
|
||||
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
echo "##########################"
|
||||
echo " Test Passed!"
|
||||
echo "OVS ON CONTROL:" >> control.log.$datestamp
|
||||
vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp
|
||||
echo "OVS ON COMPUTE:" >> compute.log.$datestamp
|
||||
vagrant ssh compute_basevm -c 'sudo ovs-vsctl show;exit $?' >> compute.log.$datestamp
|
||||
mv build.log.$datestamp build.log.$datestamp.success
|
||||
mv control.log.$datestamp control.log.$datestamp.success
|
||||
mv compute.log.$datestamp compute.log.$datestamp.success
|
||||
ret=0
|
||||
else
|
||||
echo "##########################"
|
||||
echo "Ping failed to reach VM :("
|
||||
echo "OVS ON CONTROL:" >> control.log.$datestamp
|
||||
vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp
|
||||
echo "OVS ON COMPUTE:" >> compute.log.$datestamp
|
||||
vagrant ssh compute_basevm -c 'sudo ovs-vsctl show' >> compute.log.$datestamp
|
||||
vagrant ssh control_basevm -c 'sudo service quantum-plugin-openvswitch-agent restart'
|
||||
sleep 2
|
||||
echo "OVS ON CONTROL AFTER AGENT RESTART:" >> control.log.$datestamp
|
||||
vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp
|
||||
mv build.log.$datestamp build.log.$datestamp.failed
|
||||
mv control.log.$datestamp control.log.$datestamp.failed
|
||||
mv compute.log.$datestamp compute.log.$datestamp.failed
|
||||
ret=1
|
||||
|
||||
# pull in functions that test multi-node
|
||||
source tests/multi_node.sh
|
||||
|
||||
# perform a multi-node openstack installation test by default
|
||||
# clean up old vms from previous tests
|
||||
destroy_multi_node_vms
|
||||
|
||||
# deploy the vms for a multi-node deployment
|
||||
deploy_multi_node_vms
|
||||
|
||||
vagrant ssh build -c 'sudo /tmp/test_nova.sh;exit $?'
|
||||
vagrant ssh build -c 'ping -c 2 172.16.2.129;exit $?'
|
||||
|
||||
if [ $? -eq 0 ]
|
||||
then
|
||||
echo "##########################"
|
||||
echo " Test Passed!"
|
||||
echo "OVS ON CONTROL:" >> control.log.$datestamp
|
||||
vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp
|
||||
echo "OVS ON COMPUTE:" >> compute.log.$datestamp
|
||||
vagrant ssh compute_basevm -c 'sudo ovs-vsctl show;exit $?' >> compute.log.$datestamp
|
||||
mv build.log.$datestamp build.log.$datestamp.success
|
||||
mv control.log.$datestamp control.log.$datestamp.success
|
||||
mv compute.log.$datestamp compute.log.$datestamp.success
|
||||
ret=0
|
||||
else
|
||||
echo "##########################"
|
||||
echo "Ping failed to reach VM :("
|
||||
echo "OVS ON CONTROL:" >> control.log.$datestamp
|
||||
vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp
|
||||
echo "OVS ON COMPUTE:" >> compute.log.$datestamp
|
||||
vagrant ssh compute_basevm -c 'sudo ovs-vsctl show' >> compute.log.$datestamp
|
||||
vagrant ssh control_basevm -c 'sudo service quantum-plugin-openvswitch-agent restart'
|
||||
sleep 2
|
||||
echo "OVS ON CONTROL AFTER AGENT RESTART:" >> control.log.$datestamp
|
||||
vagrant ssh control_basevm -c 'sudo ovs-vsctl show;exit $?' >> control.log.$datestamp
|
||||
mv build.log.$datestamp build.log.$datestamp.failed
|
||||
mv control.log.$datestamp control.log.$datestamp.failed
|
||||
mv compute.log.$datestamp compute.log.$datestamp.failed
|
||||
ret=1
|
||||
fi
|
||||
fi
|
||||
|
||||
exit $ret
|
||||
|
||||
48
tests/swift.sh
Normal file
48
tests/swift.sh
Normal file
@@ -0,0 +1,48 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# specifies things that are specific to the
|
||||
# vagrant multi-node deployment scenario
|
||||
#
|
||||
|
||||
function destroy_swift() {
|
||||
for i in build-server control-server swift-proxy01 swift-storage01 swift-storage02 swift-storage03 ; do
|
||||
if VBoxManage list vms | grep $i; then
|
||||
VBoxManage controlvm $i poweroff || true
|
||||
# this sleep statement is to fix an issue where
|
||||
# machines are still in a locked state after the
|
||||
# controlvm poweroff command should be completed
|
||||
sleep 1
|
||||
VBoxManage unregistervm $i --delete
|
||||
fi
|
||||
done
|
||||
clean_swift_certs
|
||||
}
|
||||
|
||||
function clean_swift_certs() {
|
||||
if VBoxManage list vms | grep build-server ; then
|
||||
vagrant ssh build -c 'sudo bash -c "export RUBYLIB=/etc/puppet/modules-0/ruby-puppetdb/lib/; puppet query node --only-active --deactivate --puppetdb_host=build-server.domain.name --puppetdb_port=8081 --config=/etc/puppet/puppet.conf --ssldir=/var/lib/puppet/ssl --certname=build-server.domain.name || true"'
|
||||
|
||||
vagrant ssh build -c 'sudo bash -c "rm /var/lib/puppet/ssl/*/swift*;rm /var/lib/puppet/ssl/ca/signed/swift* || true"'
|
||||
fi
|
||||
}
|
||||
|
||||
function deploy_swift_multi() {
|
||||
# build a cache vm if one does not already exist
|
||||
for i in cache build control_basevm; do
|
||||
if ! VBoxManage list vms | grep $i ; then
|
||||
vagrant up $i 2>&1 | tee -a $i.log.$datestamp
|
||||
fi
|
||||
done
|
||||
|
||||
for i in swift_storage_1 swift_storage_2 swift_storage_3 ; do
|
||||
# this first pass does not succeed
|
||||
vagrant up $i 2>&1 | tee -a $i.log.$datestamp || true
|
||||
done
|
||||
|
||||
vagrant up swift_proxy 2>&1 | tee -a swift_proxy.log.$datestamp
|
||||
|
||||
for i in swift_storage_1 swift_storage_2 swift_storage_3 ; do
|
||||
vagrant provision $i 2>&1 | tee -a $i.log.$datestamp
|
||||
done
|
||||
|
||||
}
|
||||
Reference in New Issue
Block a user