rename Quantum ==> Neutron, stage #1

This commit is contained in:
Sergey Vasilenko 2013-10-23 17:02:49 +04:00
parent 0b23df48f6
commit 13e417a327
104 changed files with 2052 additions and 6998 deletions

View File

@ -10,7 +10,7 @@ class cluster {
$unicast_addresses = undef $unicast_addresses = undef
} }
#todo: move half of openstack::corosync to this module, another half -- to quantum #todo: move half of openstack::corosync to this module, another half -- to Neutron
if defined(Stage['corosync_setup']) { if defined(Stage['corosync_setup']) {
class {'openstack::corosync': class {'openstack::corosync':
bind_address => $internal_address, bind_address => $internal_address,

View File

@ -163,7 +163,7 @@ LOGGING = {
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False
}, },
'quantumclient': { 'neutronclient': {
'handlers':<% if use_syslog -%> ['syslog']<% else -%> ['file']<% end -%>, 'handlers':<% if use_syslog -%> ['syslog']<% else -%> ['file']<% end -%>,
'level': 'DEBUG', 'level': 'DEBUG',
'propagate': False 'propagate': False

View File

@ -40,7 +40,7 @@ python-mimeparse==0.1.4
python-muranoclient==0.2 python-muranoclient==0.2
python-novaclient==2.12.0 python-novaclient==2.12.0
python-savannaclient==0.2.2 python-savannaclient==0.2.2
python-quantumclient==2.2.3 python-neutronlient==2.2.3
requests==1.2.3 requests==1.2.3
setuptools-git==1.0 setuptools-git==1.0
simplegeneric==0.8.1 simplegeneric==0.8.1

View File

@ -3,3 +3,4 @@ pkg/
.DS_Store .DS_Store
metadata.json metadata.json
coverage/ coverage/
spec/fixtures

View File

@ -1,12 +1,10 @@
name 'puppet-quantum' name 'puppet-neutron-ha'
version '0.2.2' version '0.2.2'
author 'Joe Topjian,Endre Karlson,Emilien Macchi,Dan Bode' author 'Joe Topjian,Endre Karlson,Emilien Macchi,Dan Bode'
license 'Apache License 2.0' license 'Apache License 2.0'
project_page 'https://github.com/EmilienM/openstack-quantum-puppet'
source 'https://github.com/EmilienM/openstack-quantum-puppet'
summary 'Puppet module for OpenStack Quantum'
description 'Puppet module to install and configure the OpenStack network service, Quantum' description 'Puppet module to install and configure the OpenStack network service, Quantum'
dependency 'puppetlabs/mysql', '>=0.3.0' dependency 'puppetlabs/mysql', '>=0.3.0'
dependency 'cprice404/inifile', '>=0.9.0' dependency 'cprice404/inifile', '>=0.9.0'
dependency 'puppetlabs/stdlib', '>=2.4.0' dependency 'puppetlabs/stdlib', '>=2.4.0'
dependency 'xenolog/l23network', '>=0.0.0'

View File

@ -0,0 +1,4 @@
Puppet module for OpenStack Neutron HA implementation
=====================================================
(Work under development)

View File

@ -1,28 +1,28 @@
class { 'quantum': class { 'neutron':
rabbit_password => '1', rabbit_password => '1',
verbose => 'True', verbose => 'True',
debug => 'True', debug => 'True',
rabbit_host => '172.18.66.112', rabbit_host => '172.18.66.112',
} }
class { 'quantum::server': class { 'neutron::server':
auth_password => '1', auth_password => '1',
auth_host => '172.18.66.112', auth_host => '172.18.66.112',
auth_tenant => 'service', auth_tenant => 'service',
} }
class { 'quantum::plugins::ovs': class { 'neutron::plugins::ovs':
sql_connection => "mysql://root:1@172.18.66.112/ovs_quantum" , sql_connection => "mysql://root:1@172.18.66.112/ovs_neutron" ,
tenant_network_type => 'gre', tenant_network_type => 'gre',
enable_tunneling => true, enable_tunneling => true,
} }
class { 'quantum::agents::dhcp': class { 'neutron::agents::dhcp':
debug => 'True', debug => 'True',
use_namespaces => 'False', use_namespaces => 'False',
} }
class { 'quantum::agents::l3': class { 'neutron::agents::l3':
debug => 'True', debug => 'True',
auth_url => 'http://172.18.66.112:5000/v2.0', auth_url => 'http://172.18.66.112:5000/v2.0',
auth_password => '1', auth_password => '1',
@ -30,7 +30,7 @@
metadata_ip => '172.18.66.112', metadata_ip => '172.18.66.112',
} }
class { 'quantum::agents::ovs': class { 'neutron::agents::ovs':
enable_tunneling => 'True', enable_tunneling => 'True',
local_ip => $::ipaddress_eth2, local_ip => $::ipaddress_eth2,
} }

View File

@ -1,19 +1,19 @@
diff --git a/quantum/common/exceptions.py b/quantum/common/exceptions.py diff --git a/neutron/common/exceptions.py b/neutron/common/exceptions.py
index c99c254..e24f7bc 100644 index c99c254..e24f7bc 100644
--- a/quantum/common/exceptions.py --- a/neutron/common/exceptions.py
+++ b/quantum/common/exceptions.py +++ b/neutron/common/exceptions.py
@@ -235,3 +235,7 @@ class InvalidSharedSetting(QuantumException): @@ -235,3 +235,7 @@ class InvalidSharedSetting(NeutronException):
class InvalidExtenstionEnv(QuantumException): class InvalidExtenstionEnv(NeutronException):
message = _("Invalid extension environment: %(reason)s") message = _("Invalid extension environment: %(reason)s")
+ +
+class DBError(Error): +class DBError(Error):
+ message = _("Database error") + message = _("Database error")
+ +
diff --git a/quantum/db/api.py b/quantum/db/api.py diff --git a/neutron/db/api.py b/neutron/db/api.py
index 238a9f9..737c748 100644 index 238a9f9..737c748 100644
--- a/quantum/db/api.py --- a/neutron/db/api.py
+++ b/quantum/db/api.py +++ b/neutron/db/api.py
@@ -20,12 +20,16 @@ @@ -20,12 +20,16 @@
import logging import logging
import time import time
@ -26,8 +26,8 @@ index 238a9f9..737c748 100644
+from sqlalchemy.exc import OperationalError +from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import sessionmaker, exc from sqlalchemy.orm import sessionmaker, exc
from quantum.db import model_base from neutron.db import model_base
+from quantum.common.exceptions import DBError +from neutron.common.exceptions import DBError
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
# #
# #
# OpenStack DHCP Service (quantum-dhcp-agent) # OpenStack DHCP Service (neutron-dhcp-agent)
# #
# Description: Manages an OpenStack DHCP Service (quantum-dhcp-agent) process as an HA resource # Description: Manages an OpenStack DHCP Service (neutron-dhcp-agent) process as an HA resource
# #
# Authors: Emilien Macchi # Authors: Emilien Macchi
# Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han # Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han
@ -34,14 +34,14 @@
PATH=/sbin:/usr/sbin:/bin:/usr/bin PATH=/sbin:/usr/sbin:/bin:/usr/bin
OCF_RESKEY_binary_default="quantum-dhcp-agent" OCF_RESKEY_binary_default="neutron-dhcp-agent"
OCF_RESKEY_config_default="/etc/quantum/quantum.conf" OCF_RESKEY_config_default="/etc/neutron/neutron.conf"
OCF_RESKEY_plugin_config_default="/etc/quantum/dhcp_agent.ini" OCF_RESKEY_plugin_config_default="/etc/neutron/dhcp_agent.ini"
OCF_RESKEY_user_default="quantum" OCF_RESKEY_user_default="neutron"
OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid"
OCF_RESKEY_os_auth_url_default="http://localhost:5000/v2.0" OCF_RESKEY_os_auth_url_default="http://localhost:5000/v2.0"
OCF_RESKEY_username_default="quantum" OCF_RESKEY_username_default="neutron"
OCF_RESKEY_password_default="quantum_pass" OCF_RESKEY_password_default="neutron_pass"
OCF_RESKEY_tenant_default="services" OCF_RESKEY_tenant_default="services"
: ${OCF_RESKEY_os_auth_url=${OCF_RESKEY_os_auth_url_default}} : ${OCF_RESKEY_os_auth_url=${OCF_RESKEY_os_auth_url_default}}
@ -62,7 +62,7 @@ usage() {
cat <<UEND cat <<UEND
usage: $0 (start|stop|validate-all|meta-data|status|monitor) usage: $0 (start|stop|validate-all|meta-data|status|monitor)
$0 manages an OpenStack DHCP Service (quantum-dhcp-agent) process as an HA resource $0 manages an OpenStack DHCP Service (neutron-dhcp-agent) process as an HA resource
The 'start' operation starts the networking service. The 'start' operation starts the networking service.
The 'stop' operation stops the networking service. The 'stop' operation stops the networking service.
@ -78,54 +78,54 @@ meta_data() {
cat <<END cat <<END
<?xml version="1.0"?> <?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd"> <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="quantum-dhcp-agent"> <resource-agent name="neutron-dhcp-agent">
<version>1.0</version> <version>1.0</version>
<longdesc lang="en"> <longdesc lang="en">
Resource agent for the OpenStack Quantum DHCP Service (quantum-dhcp-agent) Resource agent for the OpenStack Quantum DHCP Service (neutron-dhcp-agent)
May manage a quantum-dhcp-agent instance or a clone set that May manage a neutron-dhcp-agent instance or a clone set that
creates a distributed quantum-dhcp-agent cluster. creates a distributed neutron-dhcp-agent cluster.
</longdesc> </longdesc>
<shortdesc lang="en">Manages the OpenStack DHCP Service (quantum-dhcp-agent)</shortdesc> <shortdesc lang="en">Manages the OpenStack DHCP Service (neutron-dhcp-agent)</shortdesc>
<parameters> <parameters>
<parameter name="binary" unique="0" required="0"> <parameter name="binary" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack DHCP Server server binary (quantum-dhcp-agent) Location of the OpenStack DHCP Server server binary (neutron-dhcp-agent)
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack DHCP Server server binary (quantum-dhcp-agent)</shortdesc> <shortdesc lang="en">OpenStack DHCP Server server binary (neutron-dhcp-agent)</shortdesc>
<content type="string" default="${OCF_RESKEY_binary_default}" /> <content type="string" default="${OCF_RESKEY_binary_default}" />
</parameter> </parameter>
<parameter name="config" unique="0" required="0"> <parameter name="config" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack Quantum Service (quantum-server) configuration file Location of the OpenStack Quantum Service (neutron-server) configuration file
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack DHCP Server (quantum-server) config file</shortdesc> <shortdesc lang="en">OpenStack DHCP Server (neutron-server) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_config_default}" /> <content type="string" default="${OCF_RESKEY_config_default}" />
</parameter> </parameter>
<parameter name="plugin_config" unique="0" required="0"> <parameter name="plugin_config" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack DHCP Service (quantum-dhcp-agent) configuration file Location of the OpenStack DHCP Service (neutron-dhcp-agent) configuration file
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack DHCP Server (quantum-dhcp-agent) config file</shortdesc> <shortdesc lang="en">OpenStack DHCP Server (neutron-dhcp-agent) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_plugin_config_default}" /> <content type="string" default="${OCF_RESKEY_plugin_config_default}" />
</parameter> </parameter>
<parameter name="user" unique="0" required="0"> <parameter name="user" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
User running OpenStack DHCP Service (quantum-dhcp-agent) User running OpenStack DHCP Service (neutron-dhcp-agent)
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack DHCP Service (quantum-dhcp-agent) user</shortdesc> <shortdesc lang="en">OpenStack DHCP Service (neutron-dhcp-agent) user</shortdesc>
<content type="string" default="${OCF_RESKEY_user_default}" /> <content type="string" default="${OCF_RESKEY_user_default}" />
</parameter> </parameter>
<parameter name="pid" unique="0" required="0"> <parameter name="pid" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
The pid file to use for this OpenStack DHCP Service (quantum-dhcp-agent) instance The pid file to use for this OpenStack DHCP Service (neutron-dhcp-agent) instance
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack DHCP Service (quantum-dhcp-agent) pid file</shortdesc> <shortdesc lang="en">OpenStack DHCP Service (neutron-dhcp-agent) pid file</shortdesc>
<content type="string" default="${OCF_RESKEY_pid_default}" /> <content type="string" default="${OCF_RESKEY_pid_default}" />
</parameter> </parameter>
@ -165,9 +165,9 @@ Admin tenant name
<parameter name="additional_parameters" unique="0" required="0"> <parameter name="additional_parameters" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Additional parameters to pass on to the OpenStack DHCP Service (quantum-dhcp-agent) Additional parameters to pass on to the OpenStack DHCP Service (neutron-dhcp-agent)
</longdesc> </longdesc>
<shortdesc lang="en">Additional parameters for quantum-dhcp-agent</shortdesc> <shortdesc lang="en">Additional parameters for neutron-dhcp-agent</shortdesc>
<content type="string" /> <content type="string" />
</parameter> </parameter>
@ -190,7 +190,7 @@ END
####################################################################### #######################################################################
# Functions invoked by resource manager actions # Functions invoked by resource manager actions
quantum_dhcp_agent_validate() { neutron_dhcp_agent_validate() {
local rc local rc
check_binary $OCF_RESKEY_binary check_binary $OCF_RESKEY_binary
@ -216,18 +216,18 @@ quantum_dhcp_agent_validate() {
true true
} }
quantum_dhcp_agent_status() { neutron_dhcp_agent_status() {
local pid local pid
local rc local rc
if [ ! -f $OCF_RESKEY_pid ]; then if [ ! -f $OCF_RESKEY_pid ]; then
ocf_log info "OpenStack OVS Server (quantum-dhcp-agent) seems not to exist" ocf_log info "OpenStack OVS Server (neutron-dhcp-agent) seems not to exist"
pid=`pgrep -f ${OCF_RESKEY_binary}` pid=`pgrep -f ${OCF_RESKEY_binary}`
if [ $? -eq 0 ] if [ $? -eq 0 ]
then then
ocf_log warn "OpenStack OVS Server (quantum-dhcp-agent) was run, but no pid file found." ocf_log warn "OpenStack OVS Server (neutron-dhcp-agent) was run, but no pid file found."
ocf_log warn "Will use $pid as PID of process (quantum-dhcp-agent)" ocf_log warn "Will use $pid as PID of process (neutron-dhcp-agent)"
ocf_log warn "Writing $pid into $OCF_RESKEY_pid" ocf_log warn "Writing $pid into $OCF_RESKEY_pid"
echo $pid > $OCF_RESKEY_pid echo $pid > $OCF_RESKEY_pid
else else
@ -242,7 +242,7 @@ quantum_dhcp_agent_status() {
if [ $rc -eq 0 ]; then if [ $rc -eq 0 ]; then
return $OCF_SUCCESS return $OCF_SUCCESS
else else
ocf_log info "Old PID file $OCF_RESKEY_pid found (with pid $pid), but quantum-dhcp-agent is not running" ocf_log info "Old PID file $OCF_RESKEY_pid found (with pid $pid), but neutron-dhcp-agent is not running"
return $OCF_NOT_RUNNING return $OCF_NOT_RUNNING
fi fi
@ -251,7 +251,7 @@ quantum_dhcp_agent_status() {
# if [ $rc -eq 0 ]; then # if [ $rc -eq 0 ]; then
# return $OCF_SUCCESS # return $OCF_SUCCESS
# else # else
# ocf_log info "quantum-dhcp-agent (with pid $pid) running, but quantum-server means that agent dead." # ocf_log info "neutron-dhcp-agent (with pid $pid) running, but neutron-server means that agent dead."
# return $OCF_ERR_GENERIC # return $OCF_ERR_GENERIC
# fi # fi
} }
@ -264,24 +264,24 @@ clean_up()
} }
quantum_dhcp_agent_monitor() { neutron_dhcp_agent_monitor() {
quantum_dhcp_agent_status neutron_dhcp_agent_status
rc=$? rc=$?
return $rc return $rc
} }
quantum_dhcp_agent_start() { neutron_dhcp_agent_start() {
local rc local rc
quantum_dhcp_agent_status neutron_dhcp_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_SUCCESS ]; then if [ $rc -eq $OCF_SUCCESS ]; then
ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) already running" ocf_log info "OpenStack DHCP Server (neutron-dhcp-agent) already running"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
clean_up clean_up
# run the actual quantum-dhcp-agent daemon. Don't use ocf_run as we're sending the tool's output # run the actual neutron-dhcp-agent daemon. Don't use ocf_run as we're sending the tool's output
# straight to /dev/null anyway and using ocf_run would break stdout-redirection here. # straight to /dev/null anyway and using ocf_run would break stdout-redirection here.
su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
--config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \ --config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \
@ -290,31 +290,31 @@ quantum_dhcp_agent_start() {
# Spin waiting for the server to come up. # Spin waiting for the server to come up.
# Let the CRM/LRM time us out if required # Let the CRM/LRM time us out if required
while true; do while true; do
quantum_dhcp_agent_monitor neutron_dhcp_agent_monitor
rc=$? rc=$?
[ $rc -eq $OCF_SUCCESS ] && break [ $rc -eq $OCF_SUCCESS ] && break
if [ $rc -ne $OCF_NOT_RUNNING ]; then if [ $rc -ne $OCF_NOT_RUNNING ]; then
ocf_log err "OpenStack DHCP Server (quantum-dhcp-agent) start failed" ocf_log err "OpenStack DHCP Server (neutron-dhcp-agent) start failed"
exit $OCF_ERR_GENERIC exit $OCF_ERR_GENERIC
fi fi
sleep 1 sleep 1
done done
sleep 13 ; q-agent-cleanup.py --agent=dhcp --reschedule --remove-dead 2>&1 >> /var/log/quantum/rescheduling.log sleep 13 ; q-agent-cleanup.py --agent=dhcp --reschedule --remove-dead 2>&1 >> /var/log/neutron/rescheduling.log
ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) started" ocf_log info "OpenStack DHCP Server (neutron-dhcp-agent) started"
return $OCF_SUCCESS return $OCF_SUCCESS
} }
quantum_dhcp_agent_stop() { neutron_dhcp_agent_stop() {
local rc local rc
local pid local pid
quantum_dhcp_agent_status neutron_dhcp_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then if [ $rc -eq $OCF_NOT_RUNNING ]; then
clean_up clean_up
ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) already stopped" ocf_log info "OpenStack DHCP Server (neutron-dhcp-agent) already stopped"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
@ -324,7 +324,7 @@ quantum_dhcp_agent_stop() {
ocf_run kill -s TERM $pid ocf_run kill -s TERM $pid
rc=$? rc=$?
if [ $rc -ne 0 ]; then if [ $rc -ne 0 ]; then
ocf_log err "OpenStack DHCP Server (quantum-dhcp-agent) couldn't be stopped" ocf_log err "OpenStack DHCP Server (neutron-dhcp-agent) couldn't be stopped"
exit $OCF_ERR_GENERIC exit $OCF_ERR_GENERIC
fi fi
@ -335,26 +335,26 @@ quantum_dhcp_agent_stop() {
fi fi
count=0 count=0
while [ $count -lt $shutdown_timeout ]; do while [ $count -lt $shutdown_timeout ]; do
quantum_dhcp_agent_status neutron_dhcp_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then if [ $rc -eq $OCF_NOT_RUNNING ]; then
break break
fi fi
count=`expr $count + 1` count=`expr $count + 1`
sleep 1 sleep 1
ocf_log debug "OpenStack DHCP Server (quantum-dhcp-agent) still hasn't stopped yet. Waiting ..." ocf_log debug "OpenStack DHCP Server (neutron-dhcp-agent) still hasn't stopped yet. Waiting ..."
done done
quantum_dhcp_agent_status neutron_dhcp_agent_status
rc=$? rc=$?
if [ $rc -ne $OCF_NOT_RUNNING ]; then if [ $rc -ne $OCF_NOT_RUNNING ]; then
# SIGTERM didn't help either, try SIGKILL # SIGTERM didn't help either, try SIGKILL
ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) failed to stop after ${shutdown_timeout}s \ ocf_log info "OpenStack DHCP Server (neutron-dhcp-agent) failed to stop after ${shutdown_timeout}s \
using SIGTERM. Trying SIGKILL ..." using SIGTERM. Trying SIGKILL ..."
ocf_run kill -s KILL $pid ocf_run kill -s KILL $pid
fi fi
clean_up clean_up
ocf_log info "OpenStack DHCP Server (quantum-dhcp-agent) stopped" ocf_log info "OpenStack DHCP Server (neutron-dhcp-agent) stopped"
rm -f $OCF_RESKEY_pid rm -f $OCF_RESKEY_pid
@ -371,14 +371,14 @@ case "$1" in
esac esac
# Anything except meta-data and help must pass validation # Anything except meta-data and help must pass validation
quantum_dhcp_agent_validate || exit $? neutron_dhcp_agent_validate || exit $?
# What kind of method was invoked? # What kind of method was invoked?
case "$1" in case "$1" in
start) quantum_dhcp_agent_start;; start) neutron_dhcp_agent_start;;
stop) quantum_dhcp_agent_stop;; stop) neutron_dhcp_agent_stop;;
status) quantum_dhcp_agent_status;; status) neutron_dhcp_agent_status;;
monitor) quantum_dhcp_agent_monitor;; monitor) neutron_dhcp_agent_monitor;;
validate-all) ;; validate-all) ;;
*) usage *) usage
exit $OCF_ERR_UNIMPLEMENTED;; exit $OCF_ERR_UNIMPLEMENTED;;

View File

@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
# #
# #
# OpenStack L3 Service (quantum-l3-agent) # OpenStack L3 Service (neutron-l3-agent)
# #
# Description: Manages an OpenStack L3 Service (quantum-l3-agent) process as an HA resource # Description: Manages an OpenStack L3 Service (neutron-l3-agent) process as an HA resource
# #
# Authors: Emilien Macchi # Authors: Emilien Macchi
# Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han # Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han
@ -20,7 +20,7 @@
# OCF_RESKEY_plugin_config # OCF_RESKEY_plugin_config
# OCF_RESKEY_user # OCF_RESKEY_user
# OCF_RESKEY_pid # OCF_RESKEY_pid
# OCF_RESKEY_quantum_server_port # OCF_RESKEY_neutron_server_port
# OCF_RESKEY_additional_parameters # OCF_RESKEY_additional_parameters
####################################################################### #######################################################################
# Initialization: # Initialization:
@ -34,14 +34,14 @@
PATH=/sbin:/usr/sbin:/bin:/usr/bin PATH=/sbin:/usr/sbin:/bin:/usr/bin
OCF_RESKEY_binary_default="quantum-l3-agent" OCF_RESKEY_binary_default="neutron-l3-agent"
OCF_RESKEY_config_default="/etc/quantum/quantum.conf" OCF_RESKEY_config_default="/etc/neutron/neutron.conf"
OCF_RESKEY_plugin_config_default="/etc/quantum/l3_agent.ini" OCF_RESKEY_plugin_config_default="/etc/neutron/l3_agent.ini"
OCF_RESKEY_user_default="quantum" OCF_RESKEY_user_default="neutron"
OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid"
OCF_RESKEY_os_auth_url_default="http://localhost:5000/v2.0" OCF_RESKEY_os_auth_url_default="http://localhost:5000/v2.0"
OCF_RESKEY_username_default="quantum" OCF_RESKEY_username_default="neutron"
OCF_RESKEY_password_default="quantum_pass" OCF_RESKEY_password_default="neutron_pass"
OCF_RESKEY_tenant_default="services" OCF_RESKEY_tenant_default="services"
OCF_RESKEY_external_bridge_default="br-ex" OCF_RESKEY_external_bridge_default="br-ex"
OCF_RESKEY_debug_default=false OCF_RESKEY_debug_default=false
@ -67,7 +67,7 @@ usage() {
cat <<UEND cat <<UEND
usage: $0 (start|stop|validate-all|meta-data|status|monitor) usage: $0 (start|stop|validate-all|meta-data|status|monitor)
$0 manages an OpenStack L3 Service (quantum-l3-agent) process as an HA resource $0 manages an OpenStack L3 Service (neutron-l3-agent) process as an HA resource
The 'start' operation starts the networking service. The 'start' operation starts the networking service.
The 'stop' operation stops the networking service. The 'stop' operation stops the networking service.
@ -83,63 +83,63 @@ meta_data() {
cat <<END cat <<END
<?xml version="1.0"?> <?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd"> <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="quantum-l3-agent"> <resource-agent name="neutron-l3-agent">
<version>1.0</version> <version>1.0</version>
<longdesc lang="en"> <longdesc lang="en">
Resource agent for the OpenStack Quantum L3 Service (quantum-l3-agent) Resource agent for the OpenStack Quantum L3 Service (neutron-l3-agent)
May manage a quantum-l3-agent instance or a clone set that May manage a neutron-l3-agent instance or a clone set that
creates a distributed quantum-l3-agent cluster. creates a distributed neutron-l3-agent cluster.
</longdesc> </longdesc>
<shortdesc lang="en">Manages the OpenStack L3 Service (quantum-l3-agent)</shortdesc> <shortdesc lang="en">Manages the OpenStack L3 Service (neutron-l3-agent)</shortdesc>
<parameters> <parameters>
<parameter name="binary" unique="0" required="0"> <parameter name="binary" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack L3 Server server binary (quantum-l3-agent) Location of the OpenStack L3 Server server binary (neutron-l3-agent)
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack L3 Server server binary (quantum-l3-agent)</shortdesc> <shortdesc lang="en">OpenStack L3 Server server binary (neutron-l3-agent)</shortdesc>
<content type="string" default="${OCF_RESKEY_binary_default}" /> <content type="string" default="${OCF_RESKEY_binary_default}" />
</parameter> </parameter>
<parameter name="config" unique="0" required="0"> <parameter name="config" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack Quantum Service (quantum-server) configuration file Location of the OpenStack Quantum Service (neutron-server) configuration file
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack L3 Server (quantum-server) config file</shortdesc> <shortdesc lang="en">OpenStack L3 Server (neutron-server) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_config_default}" /> <content type="string" default="${OCF_RESKEY_config_default}" />
</parameter> </parameter>
<parameter name="plugin_config" unique="0" required="0"> <parameter name="plugin_config" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack L3 Service (quantum-l3-agent) configuration file Location of the OpenStack L3 Service (neutron-l3-agent) configuration file
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack L3 Server (quantum-l3-agent) config file</shortdesc> <shortdesc lang="en">OpenStack L3 Server (neutron-l3-agent) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_plugin_config_default}" /> <content type="string" default="${OCF_RESKEY_plugin_config_default}" />
</parameter> </parameter>
<parameter name="user" unique="0" required="0"> <parameter name="user" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
User running OpenStack L3 Service (quantum-l3-agent) User running OpenStack L3 Service (neutron-l3-agent)
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack L3 Service (quantum-l3-agent) user</shortdesc> <shortdesc lang="en">OpenStack L3 Service (neutron-l3-agent) user</shortdesc>
<content type="string" default="${OCF_RESKEY_user_default}" /> <content type="string" default="${OCF_RESKEY_user_default}" />
</parameter> </parameter>
<parameter name="pid" unique="0" required="0"> <parameter name="pid" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
The pid file to use for this OpenStack L3 Service (quantum-l3-agent) instance The pid file to use for this OpenStack L3 Service (neutron-l3-agent) instance
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack L3 Service (quantum-l3-agent) pid file</shortdesc> <shortdesc lang="en">OpenStack L3 Service (neutron-l3-agent) pid file</shortdesc>
<content type="string" default="${OCF_RESKEY_pid_default}" /> <content type="string" default="${OCF_RESKEY_pid_default}" />
</parameter> </parameter>
<parameter name="quantum_server_port" unique="0" required="0"> <parameter name="neutron_server_port" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
The listening port number of the AMQP server. Mandatory to perform a monitor check The listening port number of the AMQP server. Mandatory to perform a monitor check
</longdesc> </longdesc>
<shortdesc lang="en">AMQP listening port</shortdesc> <shortdesc lang="en">AMQP listening port</shortdesc>
<content type="integer" default="${OCF_RESKEY_quantum_server_port_default}" /> <content type="integer" default="${OCF_RESKEY_neutron_server_port_default}" />
</parameter> </parameter>
@ -202,9 +202,9 @@ External bridge for l3-agent
<parameter name="additional_parameters" unique="0" required="0"> <parameter name="additional_parameters" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Additional parameters to pass on to the OpenStack L3 Service (quantum-l3-agent) Additional parameters to pass on to the OpenStack L3 Service (neutron-l3-agent)
</longdesc> </longdesc>
<shortdesc lang="en">Additional parameters for quantum-l3-agent</shortdesc> <shortdesc lang="en">Additional parameters for neutron-l3-agent</shortdesc>
<content type="string" /> <content type="string" />
</parameter> </parameter>
@ -227,7 +227,7 @@ END
####################################################################### #######################################################################
# Functions invoked by resource manager actions # Functions invoked by resource manager actions
quantum_l3_agent_validate() { neutron_l3_agent_validate() {
local rc local rc
check_binary $OCF_RESKEY_binary check_binary $OCF_RESKEY_binary
@ -253,19 +253,19 @@ quantum_l3_agent_validate() {
true true
} }
quantum_l3_agent_status() { neutron_l3_agent_status() {
local pid local pid
local rc local rc
if [ ! -f $OCF_RESKEY_pid ]; then if [ ! -f $OCF_RESKEY_pid ]; then
ocf_log info "OpenStack OVS Server (quantum-l3-agent) seems not to exist" ocf_log info "OpenStack OVS Server (neutron-l3-agent) seems not to exist"
pid=`pgrep -f ${OCF_RESKEY_binary}` pid=`pgrep -f ${OCF_RESKEY_binary}`
if [ $? -eq 0 ] if [ $? -eq 0 ]
then then
ocf_log warn "OpenStack quantum-l3-agent was run, but no pid file found." ocf_log warn "OpenStack neutron-l3-agent was run, but no pid file found."
ocf_log warn "Will use $pid as PID of process (quantum-l3-agent)" ocf_log warn "Will use $pid as PID of process (neutron-l3-agent)"
ocf_log warn "Writing $pid into $OCF_RESKEY_pid" ocf_log warn "Writing $pid into $OCF_RESKEY_pid"
echo $pid > $OCF_RESKEY_pid echo $pid > $OCF_RESKEY_pid
else else
@ -280,7 +280,7 @@ quantum_l3_agent_status() {
if [ $rc -eq 0 ]; then if [ $rc -eq 0 ]; then
return $OCF_SUCCESS return $OCF_SUCCESS
else else
ocf_log info "Old PID file found, but OpenStack quantum-l3-agent is not running" ocf_log info "Old PID file found, but OpenStack neutron-l3-agent is not running"
return $OCF_NOT_RUNNING return $OCF_NOT_RUNNING
fi fi
} }
@ -293,49 +293,49 @@ clean_up()
} }
quantum_l3_agent_monitor() { neutron_l3_agent_monitor() {
quantum_l3_agent_status neutron_l3_agent_status
rc=$? rc=$?
return $rc return $rc
} }
quantum_l3_agent_start() { neutron_l3_agent_start() {
local rc local rc
quantum_l3_agent_status neutron_l3_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_SUCCESS ]; then if [ $rc -eq $OCF_SUCCESS ]; then
ocf_log info "OpenStack quantum-l3-agent already running" ocf_log info "OpenStack neutron-l3-agent already running"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
L3_PID=`pgrep -u ${OCF_RESKEY_user} -f ${OCF_RESKEY_binary}` L3_PID=`pgrep -u ${OCF_RESKEY_user} -f ${OCF_RESKEY_binary}`
if [ "xx$L3_PID" != "xx" ]; then if [ "xx$L3_PID" != "xx" ]; then
ocf_log info "OpenStack quantum-l3-agent already running with PID=$L3_PID" ocf_log info "OpenStack neutron-l3-agent already running with PID=$L3_PID"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
clean_up clean_up
# FIXME stderr should not be used unless quantum+agents init & OCF would reditect to stderr # FIXME stderr should not be used unless neutron+agents init & OCF would reditect to stderr
# if ocf_is_true ${OCF_RESKEY_syslog} ; then # if ocf_is_true ${OCF_RESKEY_syslog} ; then
# Disable logger because we use imfile for log files grabbing to rsyslog # Disable logger because we use imfile for log files grabbing to rsyslog
# L3_SYSLOG=" | logger -t quantum-quantum.agent.l3 " # L3_SYSLOG=" | logger -t neutron-neutron.agent.l3 "
# if ocf_is_true ${OCF_RESKEY_debug} ; then # if ocf_is_true ${OCF_RESKEY_debug} ; then
# L3_LOG=" | tee -ia /var/log/quantum/l3.log " # L3_LOG=" | tee -ia /var/log/neutron/l3.log "
# else # else
# L3_LOG=" " # L3_LOG=" "
# fi # fi
# else # else
# L3_SYSLOG="" # L3_SYSLOG=""
# if ocf_is_true ${OCF_RESKEY_debug} ; then # if ocf_is_true ${OCF_RESKEY_debug} ; then
# L3_LOG=" >> /var/log/quantum/l3.log " # L3_LOG=" >> /var/log/neutron/l3.log "
# else # else
# L3_LOG=" >> /dev/null " # L3_LOG=" >> /dev/null "
# fi # fi
# fi # fi
L3_SYSLOG="" L3_SYSLOG=""
L3_LOG=" > /dev/null " L3_LOG=" > /dev/null "
# run the actual quantum-l3-agent daemon. Don't use ocf_run as we're sending the tool's output # run the actual neutron-l3-agent daemon. Don't use ocf_run as we're sending the tool's output
# straight to /dev/null anyway and using ocf_run would break stdout-redirection here. # straight to /dev/null anyway and using ocf_run would break stdout-redirection here.
su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
@ -347,31 +347,31 @@ quantum_l3_agent_start() {
# Spin waiting for the server to come up. # Spin waiting for the server to come up.
# Let the CRM/LRM time us out if required # Let the CRM/LRM time us out if required
while true; do while true; do
quantum_l3_agent_monitor neutron_l3_agent_monitor
rc=$? rc=$?
[ $rc -eq $OCF_SUCCESS ] && break [ $rc -eq $OCF_SUCCESS ] && break
if [ $rc -ne $OCF_NOT_RUNNING ]; then if [ $rc -ne $OCF_NOT_RUNNING ]; then
ocf_log err "OpenStack quantum-l3-agent start failed" ocf_log err "OpenStack neutron-l3-agent start failed"
exit $OCF_ERR_GENERIC exit $OCF_ERR_GENERIC
fi fi
sleep 1 sleep 1
done done
sleep 13 ; q-agent-cleanup.py --agent=l3 --reschedule --remove-dead 2>&1 >> /var/log/quantum/rescheduling.log sleep 13 ; q-agent-cleanup.py --agent=l3 --reschedule --remove-dead 2>&1 >> /var/log/neutron/rescheduling.log
ocf_log info "OpenStack L3 Server (quantum-l3-agent) started" ocf_log info "OpenStack L3 Server (neutron-l3-agent) started"
return $OCF_SUCCESS return $OCF_SUCCESS
} }
quantum_l3_agent_stop() { neutron_l3_agent_stop() {
local rc local rc
local pid local pid
quantum_l3_agent_status neutron_l3_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then if [ $rc -eq $OCF_NOT_RUNNING ]; then
clean_up clean_up
ocf_log info "OpenStack L3 Server (quantum-l3-agent) already stopped" ocf_log info "OpenStack L3 Server (neutron-l3-agent) already stopped"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
@ -380,7 +380,7 @@ quantum_l3_agent_stop() {
ocf_run kill -s TERM $pid ocf_run kill -s TERM $pid
rc=$? rc=$?
if [ $rc -ne 0 ]; then if [ $rc -ne 0 ]; then
ocf_log err "OpenStack L3 Server (quantum-l3-agent) couldn't be stopped" ocf_log err "OpenStack L3 Server (neutron-l3-agent) couldn't be stopped"
exit $OCF_ERR_GENERIC exit $OCF_ERR_GENERIC
fi fi
@ -391,26 +391,26 @@ quantum_l3_agent_stop() {
fi fi
count=0 count=0
while [ $count -lt $shutdown_timeout ]; do while [ $count -lt $shutdown_timeout ]; do
quantum_l3_agent_status neutron_l3_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then if [ $rc -eq $OCF_NOT_RUNNING ]; then
break break
fi fi
count=`expr $count + 1` count=`expr $count + 1`
sleep 1 sleep 1
ocf_log debug "OpenStack L3 Server (quantum-l3-agent) still hasn't stopped yet. Waiting ..." ocf_log debug "OpenStack L3 Server (neutron-l3-agent) still hasn't stopped yet. Waiting ..."
done done
quantum_l3_agent_status neutron_l3_agent_status
rc=$? rc=$?
if [ $rc -ne $OCF_NOT_RUNNING ]; then if [ $rc -ne $OCF_NOT_RUNNING ]; then
# SIGTERM didn't help either, try SIGKILL # SIGTERM didn't help either, try SIGKILL
ocf_log info "OpenStack L3 Server (quantum-l3-agent) failed to stop after ${shutdown_timeout}s \ ocf_log info "OpenStack L3 Server (neutron-l3-agent) failed to stop after ${shutdown_timeout}s \
using SIGTERM. Trying SIGKILL ..." using SIGTERM. Trying SIGKILL ..."
ocf_run kill -s KILL $pid ocf_run kill -s KILL $pid
fi fi
ocf_log info "OpenStack quantum-l3-agent stopped" ocf_log info "OpenStack neutron-l3-agent stopped"
rm -f $OCF_RESKEY_pid rm -f $OCF_RESKEY_pid
clean_up clean_up
@ -428,14 +428,14 @@ case "$1" in
esac esac
# Anything except meta-data and help must pass validation # Anything except meta-data and help must pass validation
quantum_l3_agent_validate || exit $? neutron_l3_agent_validate || exit $?
# What kind of method was invoked? # What kind of method was invoked?
case "$1" in case "$1" in
start) quantum_l3_agent_start;; start) neutron_l3_agent_start;;
stop) quantum_l3_agent_stop;; stop) neutron_l3_agent_stop;;
status) quantum_l3_agent_status;; status) neutron_l3_agent_status;;
monitor) quantum_l3_agent_monitor;; monitor) neutron_l3_agent_monitor;;
validate-all) ;; validate-all) ;;
*) usage *) usage
exit $OCF_ERR_UNIMPLEMENTED;; exit $OCF_ERR_UNIMPLEMENTED;;

View File

@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
# #
# #
# OpenStack OVS Service (quantum-metadata-agent) # OpenStack OVS Service (neutron-metadata-agent)
# #
# Description: Manages an OpenStack OVS Service (quantum-metadata-agent) process as an HA resource # Description: Manages an OpenStack OVS Service (neutron-metadata-agent) process as an HA resource
# #
# Authors: Emilien Macchi # Authors: Emilien Macchi
# Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han # Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han
@ -32,10 +32,10 @@
PATH=/sbin:/usr/sbin:/bin:/usr/bin PATH=/sbin:/usr/sbin:/bin:/usr/bin
OCF_RESKEY_binary_default="quantum-metadata-agent" OCF_RESKEY_binary_default="neutron-metadata-agent"
OCF_RESKEY_config_default="/etc/quantum/quantum.conf" OCF_RESKEY_config_default="/etc/neutron/neutron.conf"
OCF_RESKEY_agent_config_default="/etc/quantum/metadata_agent.ini" OCF_RESKEY_agent_config_default="/etc/neutron/metadata_agent.ini"
OCF_RESKEY_user_default="quantum" OCF_RESKEY_user_default="neutron"
OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid"
: ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}} : ${OCF_RESKEY_binary=${OCF_RESKEY_binary_default}}
@ -50,7 +50,7 @@ usage() {
cat <<UEND cat <<UEND
usage: $0 (start|stop|validate-all|meta-data|status|monitor) usage: $0 (start|stop|validate-all|meta-data|status|monitor)
$0 manages an OpenStack Quantum Metadata Agent (quantum-metadata-agent) process as an HA resource $0 manages an OpenStack Quantum Metadata Agent (neutron-metadata-agent) process as an HA resource
The 'start' operation starts the networking service. The 'start' operation starts the networking service.
The 'stop' operation stops the networking service. The 'stop' operation stops the networking service.
@ -66,15 +66,15 @@ meta_data() {
cat <<END cat <<END
<?xml version="1.0"?> <?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd"> <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="quantum-metadata-agent"> <resource-agent name="neutron-metadata-agent">
<version>1.0</version> <version>1.0</version>
<longdesc lang="en"> <longdesc lang="en">
Resource agent for the OpenStack Quantum Metadata Agent Resource agent for the OpenStack Quantum Metadata Agent
May manage a quantum-metadata-agent instance or a clone set that May manage a neutron-metadata-agent instance or a clone set that
creates a distributed quantum-metadata-agent cluster. creates a distributed neutron-metadata-agent cluster.
</longdesc> </longdesc>
<shortdesc lang="en">Manages the OpenStack OVS Service (quantum-metadata-agent)</shortdesc> <shortdesc lang="en">Manages the OpenStack OVS Service (neutron-metadata-agent)</shortdesc>
<parameters> <parameters>
<parameter name="binary" unique="0" required="0"> <parameter name="binary" unique="0" required="0">
@ -87,9 +87,9 @@ Location of the Quantum Metadata Agent binary
<parameter name="config" unique="0" required="0"> <parameter name="config" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack Quantum Service (quantum-server) configuration file Location of the OpenStack Quantum Service (neutron-server) configuration file
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack OVS Server (quantum-server) config file</shortdesc> <shortdesc lang="en">OpenStack OVS Server (neutron-server) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_config_default}" /> <content type="string" default="${OCF_RESKEY_config_default}" />
</parameter> </parameter>
@ -103,17 +103,17 @@ Location of the OpenStack Quantum Metadata Agent configuration file
<parameter name="user" unique="0" required="0"> <parameter name="user" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
User running Quantum Metadata Agent service (quantum-metadata-agent) User running Quantum Metadata Agent service (neutron-metadata-agent)
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack Quantum Metadata Agent service (quantum-metadata-agent) user</shortdesc> <shortdesc lang="en">OpenStack Quantum Metadata Agent service (neutron-metadata-agent) user</shortdesc>
<content type="string" default="${OCF_RESKEY_user_default}" /> <content type="string" default="${OCF_RESKEY_user_default}" />
</parameter> </parameter>
<parameter name="pid" unique="0" required="0"> <parameter name="pid" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
The pid file to use for this Quantum Metadata Agent service (quantum-metadata-agent) instance The pid file to use for this Quantum Metadata Agent service (neutron-metadata-agent) instance
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack Quantum Metadata Agent service (quantum-metadata-agent) pid file</shortdesc> <shortdesc lang="en">OpenStack Quantum Metadata Agent service (neutron-metadata-agent) pid file</shortdesc>
<content type="string" default="${OCF_RESKEY_pid_default}" /> <content type="string" default="${OCF_RESKEY_pid_default}" />
</parameter> </parameter>
@ -134,7 +134,7 @@ END
####################################################################### #######################################################################
# Functions invoked by resource manager actions # Functions invoked by resource manager actions
quantum_metadata_agent_validate() { neutron_metadata_agent_validate() {
local rc local rc
check_binary $OCF_RESKEY_binary check_binary $OCF_RESKEY_binary
@ -160,19 +160,19 @@ quantum_metadata_agent_validate() {
true true
} }
quantum_metadata_agent_status() { neutron_metadata_agent_status() {
local pid local pid
local rc local rc
if [ ! -f $OCF_RESKEY_pid ]; then if [ ! -f $OCF_RESKEY_pid ]; then
ocf_log info "OpenStack Quantum Metadata Agent (quantum-metadata-agent) seems not to exist" ocf_log info "OpenStack Quantum Metadata Agent (neutron-metadata-agent) seems not to exist"
pid=`pgrep -f ${OCF_RESKEY_binary}` pid=`pgrep -f ${OCF_RESKEY_binary}`
if [ $? -eq 0 ] if [ $? -eq 0 ]
then then
ocf_log warn "OpenStack Quantum Metadata Agent (quantum-metadata-agent) was run, but no pid file found." ocf_log warn "OpenStack Quantum Metadata Agent (neutron-metadata-agent) was run, but no pid file found."
ocf_log warn "Will use $pid as PID of process (quantum-metadata-agent)" ocf_log warn "Will use $pid as PID of process (neutron-metadata-agent)"
ocf_log warn "Writing $pid into $OCF_RESKEY_pid" ocf_log warn "Writing $pid into $OCF_RESKEY_pid"
echo $pid > $OCF_RESKEY_pid echo $pid > $OCF_RESKEY_pid
else else
@ -187,33 +187,33 @@ quantum_metadata_agent_status() {
if [ $rc -eq 0 ]; then if [ $rc -eq 0 ]; then
return $OCF_SUCCESS return $OCF_SUCCESS
else else
ocf_log info "Old PID file $OCF_RESKEY_pid found (with pid $pid), but OpenStack Quantum Metadata Agent (quantum-metadata-agent) is not running" ocf_log info "Old PID file $OCF_RESKEY_pid found (with pid $pid), but OpenStack Quantum Metadata Agent (neutron-metadata-agent) is not running"
return $OCF_NOT_RUNNING return $OCF_NOT_RUNNING
fi fi
} }
quantum_metadata_agent_monitor() { neutron_metadata_agent_monitor() {
quantum_metadata_agent_status neutron_metadata_agent_status
rc=$? rc=$?
return $rc return $rc
} }
clean_up() { clean_up() {
ocf_log info "cleaning up quantum-metadata-agent. nothing to do." ocf_log info "cleaning up neutron-metadata-agent. nothing to do."
} }
quantum_metadata_agent_start() { neutron_metadata_agent_start() {
local rc local rc
quantum_metadata_agent_status neutron_metadata_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_SUCCESS ]; then if [ $rc -eq $OCF_SUCCESS ]; then
ocf_log info "OpenStack Quantum Metadata Agent (quantum-metadata-agent) already running" ocf_log info "OpenStack Quantum Metadata Agent (neutron-metadata-agent) already running"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
clean_up clean_up
# run the actual quantum-metadata-agent daemon. Don't use ocf_run as we're sending the tool's output # run the actual neutron-metadata-agent daemon. Don't use ocf_run as we're sending the tool's output
# straight to /dev/null anyway and using ocf_run would break stdout-redirection here. # straight to /dev/null anyway and using ocf_run would break stdout-redirection here.
su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
--config-file=$OCF_RESKEY_agent_config $OCF_RESKEY_additional_parameters"' >> \ --config-file=$OCF_RESKEY_agent_config $OCF_RESKEY_additional_parameters"' >> \
@ -222,29 +222,29 @@ quantum_metadata_agent_start() {
# Spin waiting for the server to come up. # Spin waiting for the server to come up.
# Let the CRM/LRM time us out if required # Let the CRM/LRM time us out if required
while true; do while true; do
quantum_metadata_agent_monitor neutron_metadata_agent_monitor
rc=$? rc=$?
[ $rc -eq $OCF_SUCCESS ] && break [ $rc -eq $OCF_SUCCESS ] && break
if [ $rc -ne $OCF_NOT_RUNNING ]; then if [ $rc -ne $OCF_NOT_RUNNING ]; then
ocf_log err "OpenStack Quantum Metadata Agent (quantum-metadata-agent) start failed" ocf_log err "OpenStack neutron Metadata Agent (neutron-metadata-agent) start failed"
exit $OCF_ERR_GENERIC exit $OCF_ERR_GENERIC
fi fi
sleep 1 sleep 1
done done
ocf_log info "OpenStack Quantum Metadata Agent (quantum-metadata-agent) started" ocf_log info "OpenStack Quantum Metadata Agent (neutron-metadata-agent) started"
return $OCF_SUCCESS return $OCF_SUCCESS
} }
quantum_metadata_agent_stop() { neutron_metadata_agent_stop() {
local rc local rc
local pid local pid
quantum_metadata_agent_status neutron_metadata_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then if [ $rc -eq $OCF_NOT_RUNNING ]; then
clean_up clean_up
ocf_log info "OpenStack Quantum Metadata Agent (quantum-metadata-agent) already stopped" ocf_log info "OpenStack Quantum Metadata Agent (neutron-metadata-agent) already stopped"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
@ -253,7 +253,7 @@ quantum_metadata_agent_stop() {
ocf_run kill -s TERM $pid ocf_run kill -s TERM $pid
rc=$? rc=$?
if [ $rc -ne 0 ]; then if [ $rc -ne 0 ]; then
ocf_log err "OpenStack Quantum Metadata Agent (quantum-metadata-agent) couldn't be stopped" ocf_log err "OpenStack Quantum Metadata Agent (neutron-metadata-agent) couldn't be stopped"
exit $OCF_ERR_GENERIC exit $OCF_ERR_GENERIC
fi fi
@ -264,28 +264,28 @@ quantum_metadata_agent_stop() {
fi fi
count=0 count=0
while [ $count -lt $shutdown_timeout ]; do while [ $count -lt $shutdown_timeout ]; do
quantum_metadata_agent_status neutron_metadata_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then if [ $rc -eq $OCF_NOT_RUNNING ]; then
break break
fi fi
count=`expr $count + 1` count=`expr $count + 1`
sleep 1 sleep 1
ocf_log debug "OpenStack Quantum Metadata Agent (quantum-metadata-agent) still hasn't stopped yet. Waiting ..." ocf_log debug "OpenStack Quantum Metadata Agent (neutron-metadata-agent) still hasn't stopped yet. Waiting ..."
done done
quantum_metadata_agent_status neutron_metadata_agent_status
rc=$? rc=$?
if [ $rc -ne $OCF_NOT_RUNNING ]; then if [ $rc -ne $OCF_NOT_RUNNING ]; then
# SIGTERM didn't help either, try SIGKILL # SIGTERM didn't help either, try SIGKILL
ocf_log info "OpenStack Quantum Metadata Agent (quantum-metadata-agent) failed to stop after ${shutdown_timeout}s \ ocf_log info "OpenStack Quantum Metadata Agent (neutron-metadata-agent) failed to stop after ${shutdown_timeout}s \
using SIGTERM. Trying SIGKILL ..." using SIGTERM. Trying SIGKILL ..."
ocf_run kill -s KILL $pid ocf_run kill -s KILL $pid
fi fi
clean_up clean_up
ocf_log info "OpenStack Quantum Metadata Agent (quantum-metadata-agent) stopped" ocf_log info "OpenStack Quantum Metadata Agent (neutron-metadata-agent) stopped"
rm -f $OCF_RESKEY_pid rm -f $OCF_RESKEY_pid
@ -302,16 +302,16 @@ case "$1" in
esac esac
# Anything except meta-data and help must pass validation # Anything except meta-data and help must pass validation
quantum_metadata_agent_validate || exit $? neutron_metadata_agent_validate || exit $?
# What kind of method was invoked? # What kind of method was invoked?
case "$1" in case "$1" in
start) quantum_metadata_agent_start;; start) neutron_metadata_agent_start;;
stop) quantum_metadata_agent_stop;; stop) neutron_metadata_agent_stop;;
status) quantum_metadata_agent_status;; status) neutron_metadata_agent_status;;
monitor) quantum_metadata_agent_monitor;; monitor) neutron_metadata_agent_monitor;;
validate) quantum_metadata_agent_validate;; validate) neutron_metadata_agent_validate;;
validate-all) quantum_metadata_agent_validate;; validate-all) neutron_metadata_agent_validate;;
*) usage *) usage
exit $OCF_ERR_UNIMPLEMENTED;; exit $OCF_ERR_UNIMPLEMENTED;;
esac esac

View File

@ -1,9 +1,9 @@
#!/bin/bash #!/bin/bash
# #
# #
# OpenStack OVS Service (quantum-ovs-agent) # OpenStack OVS Service (neutron-ovs-agent)
# #
# Description: Manages an OpenStack OVS Service (quantum-ovs-agent) process as an HA resource # Description: Manages an OpenStack OVS Service (neutron-ovs-agent) process as an HA resource
# #
# Authors: Emilien Macchi # Authors: Emilien Macchi
# Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han # Mainly inspired by the Nova Network resource agent written by Emilien Macchi & Sebastien Han
@ -32,10 +32,10 @@
# Fill in some defaults if no values are specified # Fill in some defaults if no values are specified
OCF_RESKEY_binary_default="quantum-openvswitch-agent" OCF_RESKEY_binary_default="neutron-openvswitch-agent"
OCF_RESKEY_config_default="/etc/quantum/quantum.conf" OCF_RESKEY_config_default="/etc/neutron/neutron.conf"
OCF_RESKEY_plugin_config_default="/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini" OCF_RESKEY_plugin_config_default="/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
OCF_RESKEY_user_default="quantum" OCF_RESKEY_user_default="neutron"
OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid" OCF_RESKEY_pid_default="$HA_RSCTMP/$OCF_RESOURCE_INSTANCE.pid"
OCF_RESKEY_amqp_server_port_default="5672" OCF_RESKEY_amqp_server_port_default="5672"
OCF_RESKEY_integration_bridge_default="br-int" OCF_RESKEY_integration_bridge_default="br-int"
@ -56,7 +56,7 @@ usage() {
cat <<UEND cat <<UEND
usage: $0 (start|stop|validate-all|meta-data|status|monitor) usage: $0 (start|stop|validate-all|meta-data|status|monitor)
$0 manages an OpenStack OVS Service (quantum-ovs-agent) process as an HA resource $0 manages an OpenStack OVS Service (neutron-ovs-agent) process as an HA resource
The 'start' operation starts the networking service. The 'start' operation starts the networking service.
The 'stop' operation stops the networking service. The 'stop' operation stops the networking service.
@ -72,79 +72,79 @@ meta_data() {
cat <<END cat <<END
<?xml version="1.0"?> <?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd"> <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<resource-agent name="quantum-ovs-agent"> <resource-agent name="neutron-ovs-agent">
<version>1.0</version> <version>1.0</version>
<longdesc lang="en"> <longdesc lang="en">
Resource agent for the OpenStack Quantum OVS Service (quantum-ovs-agent) Resource agent for the OpenStack Quantum OVS Service (neutron-ovs-agent)
May manage a quantum-ovs-agent instance or a clone set that May manage a neutron-ovs-agent instance or a clone set that
creates a distributed quantum-ovs-agent cluster. creates a distributed neutron-ovs-agent cluster.
</longdesc> </longdesc>
<shortdesc lang="en">Manages the OpenStack OVS Service (quantum-ovs-agent)</shortdesc> <shortdesc lang="en">Manages the OpenStack OVS Service (neutron-ovs-agent)</shortdesc>
<parameters> <parameters>
<parameter name="binary" unique="0" required="0"> <parameter name="binary" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack OVS Server server binary (quantum-ovs-agent) Location of the OpenStack OVS Server server binary (neutron-ovs-agent)
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack OVS Server server binary (quantum-ovs-agent)</shortdesc> <shortdesc lang="en">OpenStack OVS Server server binary (neutron-ovs-agent)</shortdesc>
<content type="string" default="${OCF_RESKEY_binary_default}" /> <content type="string" default="${OCF_RESKEY_binary_default}" />
</parameter> </parameter>
<parameter name="config" unique="0" required="0"> <parameter name="config" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack Quantum Service (quantum-server) configuration file Location of the OpenStack Quantum Service (neutron-server) configuration file
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack OVS Server (quantum-server) config file</shortdesc> <shortdesc lang="en">OpenStack OVS Server (neutron-server) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_config_default}" /> <content type="string" default="${OCF_RESKEY_config_default}" />
</parameter> </parameter>
<parameter name="plugin_config" unique="0" required="0"> <parameter name="plugin_config" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Location of the OpenStack OVS Service (quantum-ovs-agent) configuration file Location of the OpenStack OVS Service (neutron-ovs-agent) configuration file
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack OVS Server (quantum-ovs-agent) config file</shortdesc> <shortdesc lang="en">OpenStack OVS Server (neutron-ovs-agent) config file</shortdesc>
<content type="string" default="${OCF_RESKEY_plugin_config_default}" /> <content type="string" default="${OCF_RESKEY_plugin_config_default}" />
</parameter> </parameter>
<parameter name="user" unique="0" required="0"> <parameter name="user" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
User running OpenStack OVS Service (quantum-ovs-agent) User running OpenStack OVS Service (neutron-ovs-agent)
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack OVS Service (quantum-ovs-agent) user</shortdesc> <shortdesc lang="en">OpenStack OVS Service (neutron-ovs-agent) user</shortdesc>
<content type="string" default="${OCF_RESKEY_user_default}" /> <content type="string" default="${OCF_RESKEY_user_default}" />
</parameter> </parameter>
<parameter name="pid" unique="0" required="0"> <parameter name="pid" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
The pid file to use for this OpenStack OVS Service (quantum-ovs-agent) instance The pid file to use for this OpenStack OVS Service (neutron-ovs-agent) instance
</longdesc> </longdesc>
<shortdesc lang="en">OpenStack OVS Service (quantum-ovs-agent) pid file</shortdesc> <shortdesc lang="en">OpenStack OVS Service (neutron-ovs-agent) pid file</shortdesc>
<content type="string" default="${OCF_RESKEY_pid_default}" /> <content type="string" default="${OCF_RESKEY_pid_default}" />
</parameter> </parameter>
<parameter name="additional_parameters" unique="0" required="0"> <parameter name="additional_parameters" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
Additional parameters to pass on to the OpenStack OVS Service (quantum-ovs-agent) Additional parameters to pass on to the OpenStack OVS Service (neutron-ovs-agent)
</longdesc> </longdesc>
<shortdesc lang="en">Additional parameters for quantum-ovs-agent</shortdesc> <shortdesc lang="en">Additional parameters for neutron-ovs-agent</shortdesc>
<content type="string" /> <content type="string" />
</parameter> </parameter>
<parameter name="integration_bridge" unique="0" required="0"> <parameter name="integration_bridge" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
OVS integration bridge name OpenStack OVS Service (quantum-ovs-agent) OVS integration bridge name OpenStack OVS Service (neutron-ovs-agent)
</longdesc> </longdesc>
<shortdesc lang="en">Integration bridge name for quantum-ovs-agent</shortdesc> <shortdesc lang="en">Integration bridge name for neutron-ovs-agent</shortdesc>
<content type="string" default="${OCF_RESKEY_integration_bridge_default}"/> <content type="string" default="${OCF_RESKEY_integration_bridge_default}"/>
</parameter> </parameter>
<parameter name="segmentation_bridge" unique="0" required="0"> <parameter name="segmentation_bridge" unique="0" required="0">
<longdesc lang="en"> <longdesc lang="en">
OVS integration bridge name OpenStack OVS Service (quantum-ovs-agent) OVS integration bridge name OpenStack OVS Service (neutron-ovs-agent)
</longdesc> </longdesc>
<shortdesc lang="en">Segmentation bridge name for quantum-ovs-agent</shortdesc> <shortdesc lang="en">Segmentation bridge name for neutron-ovs-agent</shortdesc>
<content type="string" default="${OCF_RESKEY_segmentation_bridge_default}"/> <content type="string" default="${OCF_RESKEY_segmentation_bridge_default}"/>
</parameter> </parameter>
@ -167,7 +167,7 @@ END
####################################################################### #######################################################################
# Functions invoked by resource manager actions # Functions invoked by resource manager actions
quantum_ovs_agent_validate() { neutron_ovs_agent_validate() {
local rc local rc
check_binary $OCF_RESKEY_binary check_binary $OCF_RESKEY_binary
@ -193,19 +193,19 @@ quantum_ovs_agent_validate() {
true true
} }
quantum_ovs_agent_status() { neutron_ovs_agent_status() {
local pid local pid
local rc local rc
if [ ! -f $OCF_RESKEY_pid ]; then if [ ! -f $OCF_RESKEY_pid ]; then
ocf_log info "OpenStack OVS Server (quantum-ovs-agent) seems not to exist" ocf_log info "OpenStack OVS Server (neutron-ovs-agent) seems not to exist"
pid=`pgrep -f ${OCF_RESKEY_binary}` pid=`pgrep -f ${OCF_RESKEY_binary}`
if [ $? -eq 0 ] if [ $? -eq 0 ]
then then
ocf_log warn "OpenStack OVS Server (quantum-ovs-agent) was run, but no pid file found." ocf_log warn "OpenStack OVS Server (neutron-ovs-agent) was run, but no pid file found."
ocf_log warn "Will use $pid as PID of process (quantum-ovs-agent)" ocf_log warn "Will use $pid as PID of process (neutron-ovs-agent)"
ocf_log warn "Writing $pid into $OCF_RESKEY_pid" ocf_log warn "Writing $pid into $OCF_RESKEY_pid"
echo $pid > $OCF_RESKEY_pid echo $pid > $OCF_RESKEY_pid
else else
@ -220,13 +220,13 @@ quantum_ovs_agent_status() {
if [ $rc -eq 0 ]; then if [ $rc -eq 0 ]; then
return $OCF_SUCCESS return $OCF_SUCCESS
else else
ocf_log info "Old PID file $OCF_RESKEY_pid found (with pid $pid), but OpenStack OVS Server (quantum-ovs-agent) is not running" ocf_log info "Old PID file $OCF_RESKEY_pid found (with pid $pid), but OpenStack OVS Server (neutron-ovs-agent) is not running"
return $OCF_NOT_RUNNING return $OCF_NOT_RUNNING
fi fi
} }
quantum_ovs_agent_monitor() { neutron_ovs_agent_monitor() {
quantum_ovs_agent_status neutron_ovs_agent_status
rc=$? rc=$?
return $rc return $rc
} }
@ -258,19 +258,19 @@ clean_up() {
} }
quantum_ovs_agent_start() { neutron_ovs_agent_start() {
local rc local rc
quantum_ovs_agent_status neutron_ovs_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_SUCCESS ]; then if [ $rc -eq $OCF_SUCCESS ]; then
ocf_log info "OpenStack OVS Server (quantum-ovs-agent) already running" ocf_log info "OpenStack OVS Server (neutron-ovs-agent) already running"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
clean_up clean_up
# run the actual quantum-ovs-agent daemon. Don't use ocf_run as we're sending the tool's output # run the actual neutron-ovs-agent daemon. Don't use ocf_run as we're sending the tool's output
# straight to /dev/null anyway and using ocf_run would break stdout-redirection here. # straight to /dev/null anyway and using ocf_run would break stdout-redirection here.
su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \ su ${OCF_RESKEY_user} -s /bin/sh -c "${OCF_RESKEY_binary} --config-file=$OCF_RESKEY_config \
--config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \ --config-file=$OCF_RESKEY_plugin_config $OCF_RESKEY_additional_parameters"' >> \
@ -279,29 +279,29 @@ quantum_ovs_agent_start() {
# Spin waiting for the server to come up. # Spin waiting for the server to come up.
# Let the CRM/LRM time us out if required # Let the CRM/LRM time us out if required
while true; do while true; do
quantum_ovs_agent_monitor neutron_ovs_agent_monitor
rc=$? rc=$?
[ $rc -eq $OCF_SUCCESS ] && break [ $rc -eq $OCF_SUCCESS ] && break
if [ $rc -ne $OCF_NOT_RUNNING ]; then if [ $rc -ne $OCF_NOT_RUNNING ]; then
ocf_log err "OpenStack OVS Server (quantum-ovs-agent) start failed" ocf_log err "OpenStack OVS Server (neutron-ovs-agent) start failed"
exit $OCF_ERR_GENERIC exit $OCF_ERR_GENERIC
fi fi
sleep 1 sleep 1
done done
ocf_log info "OpenStack OVS Server (quantum-ovs-agent) started" ocf_log info "OpenStack OVS Server (neutron-ovs-agent) started"
return $OCF_SUCCESS return $OCF_SUCCESS
} }
quantum_ovs_agent_stop() { neutron_ovs_agent_stop() {
local rc local rc
local pid local pid
quantum_ovs_agent_status neutron_ovs_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then if [ $rc -eq $OCF_NOT_RUNNING ]; then
clean_up clean_up
ocf_log info "OpenStack OVS Server (quantum-ovs-agent) already stopped" ocf_log info "OpenStack OVS Server (neutron-ovs-agent) already stopped"
return $OCF_SUCCESS return $OCF_SUCCESS
fi fi
@ -311,7 +311,7 @@ quantum_ovs_agent_stop() {
ocf_run kill -s TERM $pid ocf_run kill -s TERM $pid
rc=$? rc=$?
if [ $rc -ne 0 ]; then if [ $rc -ne 0 ]; then
ocf_log err "OpenStack OVS Server (quantum-ovs-agent) couldn't be stopped" ocf_log err "OpenStack OVS Server (neutron-ovs-agent) couldn't be stopped"
exit $OCF_ERR_GENERIC exit $OCF_ERR_GENERIC
fi fi
@ -322,28 +322,28 @@ quantum_ovs_agent_stop() {
fi fi
count=0 count=0
while [ $count -lt $shutdown_timeout ]; do while [ $count -lt $shutdown_timeout ]; do
quantum_ovs_agent_status neutron_ovs_agent_status
rc=$? rc=$?
if [ $rc -eq $OCF_NOT_RUNNING ]; then if [ $rc -eq $OCF_NOT_RUNNING ]; then
break break
fi fi
count=`expr $count + 1` count=`expr $count + 1`
sleep 1 sleep 1
ocf_log debug "OpenStack OVS Server (quantum-ovs-agent) still hasn't stopped yet. Waiting ..." ocf_log debug "OpenStack OVS Server (neutron-ovs-agent) still hasn't stopped yet. Waiting ..."
done done
quantum_ovs_agent_status neutron_ovs_agent_status
rc=$? rc=$?
if [ $rc -ne $OCF_NOT_RUNNING ]; then if [ $rc -ne $OCF_NOT_RUNNING ]; then
# SIGTERM didn't help either, try SIGKILL # SIGTERM didn't help either, try SIGKILL
ocf_log info "OpenStack OVS Server (quantum-ovs-agent) failed to stop after ${shutdown_timeout}s \ ocf_log info "OpenStack OVS Server (neutron-ovs-agent) failed to stop after ${shutdown_timeout}s \
using SIGTERM. Trying SIGKILL ..." using SIGTERM. Trying SIGKILL ..."
ocf_run kill -s KILL $pid ocf_run kill -s KILL $pid
fi fi
clean_up clean_up
ocf_log info "OpenStack OVS Server (quantum-ovs-agent) stopped" ocf_log info "OpenStack OVS Server (neutron-ovs-agent) stopped"
rm -f $OCF_RESKEY_pid rm -f $OCF_RESKEY_pid
@ -360,14 +360,14 @@ case "$1" in
esac esac
# Anything except meta-data and help must pass validation # Anything except meta-data and help must pass validation
quantum_ovs_agent_validate || exit $? neutron_ovs_agent_validate || exit $?
# What kind of method was invoked? # What kind of method was invoked?
case "$1" in case "$1" in
start) quantum_ovs_agent_start;; start) neutron_ovs_agent_start;;
stop) quantum_ovs_agent_stop;; stop) neutron_ovs_agent_stop;;
status) quantum_ovs_agent_status;; status) neutron_ovs_agent_status;;
monitor) quantum_ovs_agent_monitor;; monitor) neutron_ovs_agent_monitor;;
validate-all) ;; validate-all) ;;
*) usage *) usage
exit $OCF_ERR_UNIMPLEMENTED;; exit $OCF_ERR_UNIMPLEMENTED;;

View File

@ -9,7 +9,7 @@ import logging
import logging.handlers import logging.handlers
import subprocess import subprocess
import StringIO import StringIO
from quantumclient.quantum import client as q_client from neutronclient.neutron import client as q_client
from keystoneclient.v2_0 import client as ks_client from keystoneclient.v2_0 import client as ks_client
LOG_NAME='q-agent-cleanup' LOG_NAME='q-agent-cleanup'
@ -31,7 +31,7 @@ def get_authconfig(cfg_file):
return rv return rv
class QuantumCleaner(object): class NeutronCleaner(object):
PORT_NAME_PREFIXES_BY_DEV_OWNER = { PORT_NAME_PREFIXES_BY_DEV_OWNER = {
'network:dhcp': 'tap', 'network:dhcp': 'tap',
'network:router_gateway': 'qg-', 'network:router_gateway': 'qg-',
@ -58,9 +58,9 @@ class QuantumCleaner(object):
'l3': 'qrouter', 'l3': 'qrouter',
} }
AGENT_BINARY_NAME = { AGENT_BINARY_NAME = {
'dhcp': 'quantum-dhcp-agent', 'dhcp': 'neutron-dhcp-agent',
'l3': 'quantum-l3-agent', 'l3': 'neutron-l3-agent',
'ovs': 'quantum-openvswitch-agent' 'ovs': 'neutron-openvswitch-agent'
} }
CMD__list_ovs_port = ['ovs-vsctl', 'list-ports'] CMD__list_ovs_port = ['ovs-vsctl', 'list-ports']
@ -112,7 +112,7 @@ class QuantumCleaner(object):
token=self.token, token=self.token,
) )
def _quantum_API_call(self, method, *args): def _neutron_API_call(self, method, *args):
ret_count = self.options.get('retries') ret_count = self.options.get('retries')
while True: while True:
if ret_count <= 0 : if ret_count <= 0 :
@ -130,31 +130,31 @@ class QuantumCleaner(object):
self.log.info("Can't connect to {0}, wait for server ready...".format(self.keystone.service_catalog.url_for(service_type='network'))) self.log.info("Can't connect to {0}, wait for server ready...".format(self.keystone.service_catalog.url_for(service_type='network')))
time.sleep(self.options.sleep) time.sleep(self.options.sleep)
else: else:
self.log.error("Quantum error:\n{0}".format(e.message)) self.log.error("Neutron error:\n{0}".format(e.message))
raise e raise e
ret_count -= 1 ret_count -= 1
return rv return rv
def _get_ports(self): def _get_ports(self):
return self._quantum_API_call(self.client.list_ports)['ports'] return self._neutron_API_call(self.client.list_ports)['ports']
def _get_agents(self, use_cache=True): def _get_agents(self, use_cache=True):
return self._quantum_API_call(self.client.list_agents)['agents'] return self._neutron_API_call(self.client.list_agents)['agents']
def _list_networks_on_dhcp_agent(self, agent_id): def _list_networks_on_dhcp_agent(self, agent_id):
return self._quantum_API_call(self.client.list_networks_on_dhcp_agent, agent_id)['networks'] return self._neutron_API_call(self.client.list_networks_on_dhcp_agent, agent_id)['networks']
def _list_routers_on_l3_agent(self, agent_id): def _list_routers_on_l3_agent(self, agent_id):
return self._quantum_API_call(self.client.list_routers_on_l3_agent, agent_id)['routers'] return self._neutron_API_call(self.client.list_routers_on_l3_agent, agent_id)['routers']
def _add_network_to_dhcp_agent(self, agent_id, net_id): def _add_network_to_dhcp_agent(self, agent_id, net_id):
return self._quantum_API_call(self.client.add_network_to_dhcp_agent, agent_id, {"network_id": net_id}) return self._neutron_API_call(self.client.add_network_to_dhcp_agent, agent_id, {"network_id": net_id})
def _add_router_to_l3_agent(self, agent_id, router_id): def _add_router_to_l3_agent(self, agent_id, router_id):
return self._quantum_API_call(self.client.add_router_to_l3_agent, agent_id, {"router_id": router_id}) return self._neutron_API_call(self.client.add_router_to_l3_agent, agent_id, {"router_id": router_id})
def _remove_router_from_l3_agent(self, agent_id, router_id): def _remove_router_from_l3_agent(self, agent_id, router_id):
return self._quantum_API_call(self.client.remove_router_from_l3_agent, agent_id, router_id) return self._neutron_API_call(self.client.remove_router_from_l3_agent, agent_id, router_id)
def _get_ports_by_agent(self, agent, activeonly=False, localnode=False, port_id_part_len=PORT_ID_PART_LEN): def _get_ports_by_agent(self, agent, activeonly=False, localnode=False, port_id_part_len=PORT_ID_PART_LEN):
self.log.debug("__get_ports_by_agent: start, agent='{0}', activeonly='{1}'".format(agent, activeonly)) self.log.debug("__get_ports_by_agent: start, agent='{0}', activeonly='{1}'".format(agent, activeonly))
@ -365,7 +365,7 @@ class QuantumCleaner(object):
for agent in agents['dead']: for agent in agents['dead']:
self.log.info("remove dead DHCP agent: {0}".format(agent['id'])) self.log.info("remove dead DHCP agent: {0}".format(agent['id']))
if not self.options.get('noop'): if not self.options.get('noop'):
self._quantum_API_call(self.client.delete_agent, agent['id']) self._neutron_API_call(self.client.delete_agent, agent['id'])
self.log.debug("_reschedule_agent_dhcp: end.") self.log.debug("_reschedule_agent_dhcp: end.")
def _reschedule_agent_l3(self, agent_type): def _reschedule_agent_l3(self, agent_type):
@ -401,7 +401,7 @@ class QuantumCleaner(object):
for agent in agents['dead']: for agent in agents['dead']:
self.log.info("remove dead L3 agent: {0}".format(agent['id'])) self.log.info("remove dead L3 agent: {0}".format(agent['id']))
if not self.options.get('noop'): if not self.options.get('noop'):
self._quantum_API_call(self.client.delete_agent, agent['id']) self._neutron_API_call(self.client.delete_agent, agent['id'])
# move routers from dead to alive agent # move routers from dead to alive agent
for rou in filter(lambda rr: not(rr[0]['id'] in lucky_ids), dead_routers): for rou in filter(lambda rr: not(rr[0]['id'] in lucky_ids), dead_routers):
# self.log.info("unschedule router {rou} from L3 agent {agent}".format( # self.log.info("unschedule router {rou} from L3 agent {agent}".format(
@ -458,7 +458,7 @@ class QuantumCleaner(object):
if __name__ == '__main__': if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Quantum network node cleaning tool.') parser = argparse.ArgumentParser(description='Neutron network node cleaning tool.')
parser.add_argument("-c", "--auth-config", dest="authconf", default="/root/openrc", parser.add_argument("-c", "--auth-config", dest="authconf", default="/root/openrc",
help="Authenticating config FILE", metavar="FILE") help="Authenticating config FILE", metavar="FILE")
parser.add_argument("--retries", dest="retries", type=int, default=50, parser.add_argument("--retries", dest="retries", type=int, default=50,
@ -522,7 +522,7 @@ if __name__ == '__main__':
LOG.setLevel(_log_level) LOG.setLevel(_log_level)
LOG.info("Started: {0}".format(' '.join(sys.argv))) LOG.info("Started: {0}".format(' '.join(sys.argv)))
cleaner = QuantumCleaner(get_authconfig(args.authconf), options=vars(args), log=LOG) cleaner = NeutronCleaner(get_authconfig(args.authconf), options=vars(args), log=LOG)
rc = 0 rc = 0
if vars(args).get('test-hostnames'): if vars(args).get('test-hostnames'):
rc = cleaner.test_healthy(args.agent[0]) rc = cleaner.test_healthy(args.agent[0])

View File

@ -91,11 +91,11 @@ diff --git a/openstack/common/rpc/impl_kombu.py b/openstack/common/rpc/impl_komb
index fff1ed9..3469a9b 100644 index fff1ed9..3469a9b 100644
--- a/openstack/common/rpc/impl_kombu.py --- a/openstack/common/rpc/impl_kombu.py
+++ b/openstack/common/rpc/impl_kombu.py +++ b/openstack/common/rpc/impl_kombu.py
@@ -33,6 +33,7 @@ from quantum.openstack.common import cfg @@ -33,6 +33,7 @@ from neutron.openstack.common import cfg
from quantum.openstack.common.gettextutils import _ from neutron.openstack.common.gettextutils import _
from quantum.openstack.common.rpc import amqp as rpc_amqp from neutron.openstack.common.rpc import amqp as rpc_amqp
from quantum.openstack.common.rpc import common as rpc_common from neutron.openstack.common.rpc import common as rpc_common
+from quantum.openstack.common import network_utils +from neutron.openstack.common import network_utils
kombu_opts = [ kombu_opts = [
cfg.StrOpt('kombu_ssl_version', cfg.StrOpt('kombu_ssl_version',

View File

@ -2,10 +2,10 @@
# require 'yaml' # require 'yaml'
# require 'json' # require 'json'
class MrntQuantumNR class MrntNeutronNR
def initialize(scope, cfg) def initialize(scope, cfg)
@scope = scope @scope = scope
@quantum_config = cfg @neutron_config = cfg
end end
#class method #class method
@ -70,15 +70,15 @@ class MrntQuantumNR
end end
def create_resources() def create_resources()
res__quantum_net = 'quantum_net' res__neutron_net = 'neutron_net'
res__quantum_net_type = Puppet::Type.type(res__quantum_net.downcase.to_sym) res__neutron_net_type = Puppet::Type.type(res__neutron_net.downcase.to_sym)
res__quantum_subnet = 'quantum_subnet' res__neutron_subnet = 'neutron_subnet'
res__quantum_subnet_type = Puppet::Type.type(res__quantum_subnet.downcase.to_sym) res__neutron_subnet_type = Puppet::Type.type(res__neutron_subnet.downcase.to_sym)
res__quantum_router = 'quantum_router' res__neutron_router = 'neutron_router'
res__quantum_router_type = Puppet::Type.type(res__quantum_router.downcase.to_sym) res__neutron_router_type = Puppet::Type.type(res__neutron_router.downcase.to_sym)
previous = nil previous = nil
segment_id = @quantum_config[:L2][:enable_tunneling] ? @quantum_config[:L2][:tunnel_id_ranges].split(':')[0].to_i : 0 segment_id = @neutron_config[:L2][:enable_tunneling] ? @neutron_config[:L2][:tunnel_id_ranges].split(':')[0].to_i : 0
@quantum_config[:predefined_networks].each do |net, ncfg| @neutron_config[:predefined_networks].each do |net, ncfg|
Puppet::debug("-*- processing net '#{net}': #{ncfg.inspect}") Puppet::debug("-*- processing net '#{net}': #{ncfg.inspect}")
# config network resources parameters # config network resources parameters
network_config = get_default_network_config() network_config = get_default_network_config()
@ -110,7 +110,7 @@ class MrntQuantumNR
elsif network_config[:net][:network_type].downcase == 'vlan' && ncfg[:L2][:physnet] elsif network_config[:net][:network_type].downcase == 'vlan' && ncfg[:L2][:physnet]
# Calculate segment_id for VLAN mode from personal physnet settings # Calculate segment_id for VLAN mode from personal physnet settings
_physnet = ncfg[:L2][:physnet].to_sym _physnet = ncfg[:L2][:physnet].to_sym
_segment_id_range = @quantum_config[:L2][:phys_nets][_physnet][:vlan_range] || "4094:xxx" _segment_id_range = @neutron_config[:L2][:phys_nets][_physnet][:vlan_range] || "4094:xxx"
_segment_id = _segment_id_range.split(/[:\-]/)[0].to_i _segment_id = _segment_id_range.split(/[:\-]/)[0].to_i
network_config[:net][:segment_id] = _segment_id network_config[:net][:segment_id] = _segment_id
elsif network_config[:net][:network_type].downcase == 'vlan' elsif network_config[:net][:network_type].downcase == 'vlan'
@ -119,12 +119,12 @@ class MrntQuantumNR
#else # another network types -- do nothing... #else # another network types -- do nothing...
end end
Puppet::debug("-*- using segment_id='#{network_config[:net][:segment_id]}' for net '#{net}'") Puppet::debug("-*- using segment_id='#{network_config[:net][:segment_id]}' for net '#{net}'")
# create quantum_net resource # create neutron_net resource
p_res = Puppet::Parser::Resource.new( p_res = Puppet::Parser::Resource.new(
res__quantum_net, res__neutron_net,
network_config[:net][:name].to_s, network_config[:net][:name].to_s,
:scope => @scope, :scope => @scope,
:source => res__quantum_net_type :source => res__neutron_net_type
) )
p_res.set_parameter(:ensure, :present) p_res.set_parameter(:ensure, :present)
previous && p_res.set_parameter(:require, [previous]) previous && p_res.set_parameter(:require, [previous])
@ -134,12 +134,12 @@ class MrntQuantumNR
@scope.compiler.add_resource(@scope, p_res) @scope.compiler.add_resource(@scope, p_res)
previous = p_res.to_s previous = p_res.to_s
Puppet::debug("*** Resource '#{previous}' created succefful.") Puppet::debug("*** Resource '#{previous}' created succefful.")
# create quantum_subnet resource # create neutron_subnet resource
p_res = Puppet::Parser::Resource.new( p_res = Puppet::Parser::Resource.new(
res__quantum_subnet, res__neutron_subnet,
network_config[:subnet][:name].to_s, network_config[:subnet][:name].to_s,
:scope => @scope, :scope => @scope,
:source => res__quantum_subnet_type :source => res__neutron_subnet_type
) )
p_res.set_parameter(:ensure, :present) p_res.set_parameter(:ensure, :present)
p_res.set_parameter(:require, [previous]) p_res.set_parameter(:require, [previous])
@ -152,7 +152,7 @@ class MrntQuantumNR
end end
# create pre-defined routers # create pre-defined routers
if previous # if no networks -- we don't create any router if previous # if no networks -- we don't create any router
@quantum_config[:predefined_routers].each do |rou, rcfg| @neutron_config[:predefined_routers].each do |rou, rcfg|
next if rcfg[:virtual] next if rcfg[:virtual]
# config router # config router
router_config = get_default_router_config() router_config = get_default_router_config()
@ -163,10 +163,10 @@ class MrntQuantumNR
router_config[:int_subnets] = rcfg[:internal_networks].map{|x| "#{x}__subnet"} router_config[:int_subnets] = rcfg[:internal_networks].map{|x| "#{x}__subnet"}
# create resource # create resource
p_res = Puppet::Parser::Resource.new( p_res = Puppet::Parser::Resource.new(
res__quantum_router, res__neutron_router,
router_config[:name].to_s, router_config[:name].to_s,
:scope => @scope, :scope => @scope,
:source => res__quantum_router_type :source => res__neutron_router_type
) )
p_res.set_parameter(:ensure, :present) p_res.set_parameter(:ensure, :present)
p_res.set_parameter(:require, [previous]) p_res.set_parameter(:require, [previous])
@ -183,16 +183,16 @@ end
module Puppet::Parser::Functions module Puppet::Parser::Functions
newfunction(:create_predefined_networks_and_routers , :doc => <<-EOS newfunction(:create_predefined_networks_and_routers , :doc => <<-EOS
This function get Hash of Quantum configuration This function get Hash of neutron configuration
and create predefined networks and routers. and create predefined networks and routers.
Example call: Example call:
$config = create_predefined_networks_and_routers($quantum_settings_hash) $config = create_predefined_networks_and_routers($neutron_settings_hash)
EOS EOS
) do |argv| ) do |argv|
#Puppet::Parser::Functions.autoloader.loadall #Puppet::Parser::Functions.autoloader.loadall
nr_conf = MrntQuantumNR.new(self, MrntQuantumNR.sanitize_hash(argv[0])) nr_conf = MrntNeutronNR.new(self, MrntNeutronNR.sanitize_hash(argv[0]))
nr_conf.create_resources() nr_conf.create_resources()
end end
end end

View File

@ -2,10 +2,10 @@
# require 'yaml' # require 'yaml'
# require 'json' # require 'json'
class MrntQuantumFA class MrntNeutronFA
def initialize(scope, cfg) def initialize(scope, cfg)
@scope = scope @scope = scope
@quantum_config = cfg @neutron_config = cfg
end end
#class method #class method
@ -33,7 +33,7 @@ class MrntQuantumFA
end end
def get_pool_size() def get_pool_size()
floating_range = @quantum_config[:predefined_networks][:net04_ext][:L3][:floating] floating_range = @neutron_config[:predefined_networks][:net04_ext][:L3][:floating]
Puppet::debug("Floating range is #{floating_range}") Puppet::debug("Floating range is #{floating_range}")
borders = floating_range.split(':').map{|x| x.split('.')[-1].to_i} borders = floating_range.split(':').map{|x| x.split('.')[-1].to_i}
rv = borders[1]-borders[0] rv = borders[1]-borders[0]
@ -50,16 +50,16 @@ end
module Puppet::Parser::Functions module Puppet::Parser::Functions
newfunction(:get_floatingip_pool_size_for_admin, :type => :rvalue, :doc => <<-EOS newfunction(:get_floatingip_pool_size_for_admin, :type => :rvalue, :doc => <<-EOS
This function get Hash of Quantum configuration This function get Hash of Neutron configuration
and calculate autogenerated floating IPs pool size for admin tenant. and calculate autogenerated floating IPs pool size for admin tenant.
Example call: Example call:
$pool_size = get_floatingip_pool_size_for_admin($quantum_settings_hash) $pool_size = get_floatingip_pool_size_for_admin($neutron_settings_hash)
EOS EOS
) do |argv| ) do |argv|
#Puppet::Parser::Functions.autoloader.loadall #Puppet::Parser::Functions.autoloader.loadall
nr_conf = MrntQuantumFA.new(self, MrntQuantumFA.sanitize_hash(argv[0])) nr_conf = MrntNeutronFA.new(self, MrntNeutronFA.sanitize_hash(argv[0]))
nr_conf.get_pool_size() nr_conf.get_pool_size()
end end
end end

View File

@ -2,7 +2,7 @@ require 'ipaddr'
require 'yaml' require 'yaml'
require 'json' require 'json'
class MrntQuantum class MrntNeutron
#class method #class method
def self.sanitize_array(aa) def self.sanitize_array(aa)
aa.reduce([]) do |rv, v| aa.reduce([]) do |rv, v|
@ -96,8 +96,8 @@ class MrntQuantum
return rv return rv
end end
def get_quantum_srv_api_url(srvsh) def get_neutron_srv_api_url(srvsh)
"#{srvsh[:api_protocol]}://#{get_quantum_srv_vip()}:#{srvsh[:bind_port]}" "#{srvsh[:api_protocol]}://#{get_neutron_srv_vip()}:#{srvsh[:bind_port]}"
end end
# classmethod # classmethod
@ -120,15 +120,15 @@ class MrntQuantum
l2[:phys_nets].sort().map{|n| [n[0],n[1][:vlan_range]]}.map{|n| n.delete_if{|x| x==nil||x==''}}.map{|n| n.join(':')}.join(',') l2[:phys_nets].sort().map{|n| [n[0],n[1][:vlan_range]]}.map{|n| n.delete_if{|x| x==nil||x==''}}.map{|n| n.join(':')}.join(',')
end end
def get_quantum_srv_vip() def get_neutron_srv_vip()
@fuel_config[:quantum_server_vip] || @fuel_config[:management_vip] @fuel_config[:neutron_server_vip] || @fuel_config[:management_vip]
end end
def get_quantum_srv_ip() def get_neutron_srv_ip()
@scope.function_get_network_role_property(['management', 'ipaddr']) @scope.function_get_network_role_property(['management', 'ipaddr'])
end end
def get_quantum_gre_ip() # IP, not VIP !!! def get_neutron_gre_ip() # IP, not VIP !!!
@scope.function_get_network_role_property(['mesh', 'ipaddr']) || @scope.function_get_network_role_property(['management', 'ipaddr']) @scope.function_get_network_role_property(['mesh', 'ipaddr']) || @scope.function_get_network_role_property(['management', 'ipaddr'])
end end
@ -200,7 +200,7 @@ class MrntQuantum
end end
def generate_default_quantum_config() def generate_default_neutron_config()
# fields defined as NIL are required # fields defined as NIL are required
rv = { rv = {
:amqp => { :amqp => {
@ -209,7 +209,7 @@ class MrntQuantum
:passwd => nil, :passwd => nil,
:hosts => get_amqp_vip(5672), :hosts => get_amqp_vip(5672),
:ha_mode => true, :ha_mode => true,
:control_exchange => "quantum", :control_exchange => "neutron",
:heartbeat => 60, :heartbeat => 60,
:protocol => "tcp", :protocol => "tcp",
:rabbit_virtual_host => "/", :rabbit_virtual_host => "/",
@ -219,9 +219,9 @@ class MrntQuantum
:provider => "mysql", :provider => "mysql",
:host => get_database_vip(), :host => get_database_vip(),
:port => 0, :port => 0,
:database => "quantum", :database => "neutron",
:username => "quantum", :username => "neutron",
:passwd => "quantum", :passwd => "neutron",
:reconnects => -1, :reconnects => -1,
:reconnect_interval => 2, :reconnect_interval => 2,
:charset => nil, :charset => nil,
@ -234,19 +234,19 @@ class MrntQuantum
:auth_protocol => "http", :auth_protocol => "http",
:auth_api_version => "v2.0", :auth_api_version => "v2.0",
:admin_tenant_name => "services", :admin_tenant_name => "services",
:admin_user => "quantum", :admin_user => "neutron",
:admin_password => "quantum_pass", :admin_password => "neutron_pass",
:admin_email => "quantum@localhost", :admin_email => "neutron@localhost",
:signing_dir => "/var/lib/quantum/keystone-signing", :signing_dir => "/var/lib/neutron/keystone-signing",
}, },
:server => { :server => {
:api_url => nil, # will be calculated later :api_url => nil, # will be calculated later
:api_protocol => "http", :api_protocol => "http",
:bind_host => get_quantum_srv_ip(), :bind_host => get_neutron_srv_ip(),
:bind_port => 9696, :bind_port => 9696,
:agent_down_time => 15, :agent_down_time => 15,
:allow_bulk => true, :allow_bulk => true,
:control_exchange=> 'quantum', :control_exchange=> 'neutron',
}, },
:metadata => { :metadata => {
:nova_metadata_ip => get_management_vip(), :nova_metadata_ip => get_management_vip(),
@ -279,7 +279,7 @@ class MrntQuantum
:tunnel_bridge => get_bridge_name('tunnel'), :tunnel_bridge => get_bridge_name('tunnel'),
:int_peer_patch_port => "patch-tun", :int_peer_patch_port => "patch-tun",
:tun_peer_patch_port => "patch-int", :tun_peer_patch_port => "patch-int",
:local_ip => get_quantum_gre_ip(), :local_ip => get_neutron_gre_ip(),
}, },
:L3 => { :L3 => {
:router_id => nil, :router_id => nil,
@ -301,7 +301,7 @@ class MrntQuantum
}, },
:predefined_routers => get_default_routers(), :predefined_routers => get_default_routers(),
:predefined_networks => get_default_networks(), :predefined_networks => get_default_networks(),
:root_helper => "sudo quantum-rootwrap /etc/quantum/rootwrap.conf", :root_helper => "sudo neutron-rootwrap /etc/neutron/rootwrap.conf",
:polling_interval => 2, :polling_interval => 2,
} }
rv[:database][:port] = case rv[:database][:provider].upcase.to_sym rv[:database][:port] = case rv[:database][:provider].upcase.to_sym
@ -317,28 +317,28 @@ class MrntQuantum
def initialize(scope, cfg, section_name) def initialize(scope, cfg, section_name)
@scope = scope @scope = scope
@fuel_config = cfg @fuel_config = cfg
@quantum_config_from_nailgun = cfg[section_name.to_sym()] @neutron_config_from_nailgun = cfg[section_name.to_sym()]
end end
def generate_config() def generate_config()
@quantum_config = _generate_config(generate_default_quantum_config(), @quantum_config_from_nailgun, []) @neutron_config = _generate_config(generate_default_neutron_config(), @neutron_config_from_nailgun, [])
@quantum_config[:database][:url] ||= MrntQuantum.get_database_url(@quantum_config[:database]) @neutron_config[:database][:url] ||= MrntNeutron.get_database_url(@neutron_config[:database])
@quantum_config[:keystone][:auth_url] ||= MrntQuantum.get_keystone_auth_url(@quantum_config[:keystone]) @neutron_config[:keystone][:auth_url] ||= MrntNeutron.get_keystone_auth_url(@neutron_config[:keystone])
@quantum_config[:server][:api_url] ||= get_quantum_srv_api_url(@quantum_config[:server]) @neutron_config[:server][:api_url] ||= get_neutron_srv_api_url(@neutron_config[:server])
@quantum_config[:L2][:network_vlan_ranges] = MrntQuantum.get_network_vlan_ranges(@quantum_config[:L2]) @neutron_config[:L2][:network_vlan_ranges] = MrntNeutron.get_network_vlan_ranges(@neutron_config[:L2])
@quantum_config[:L2][:bridge_mappings] = MrntQuantum.get_bridge_mappings(@quantum_config[:L2]) @neutron_config[:L2][:bridge_mappings] = MrntNeutron.get_bridge_mappings(@neutron_config[:L2])
@quantum_config[:L2][:phys_bridges] = MrntQuantum.get_phys_bridges(@quantum_config[:L2]) @neutron_config[:L2][:phys_bridges] = MrntNeutron.get_phys_bridges(@neutron_config[:L2])
@quantum_config[:amqp] ||= MrntQuantum.get_amqp_config(@quantum_config[:amqp]) @neutron_config[:amqp] ||= MrntNeutron.get_amqp_config(@neutron_config[:amqp])
if [:gre, :vxlan, :lisp].include? @quantum_config[:L2][:segmentation_type].downcase.to_sym if [:gre, :vxlan, :lisp].include? @neutron_config[:L2][:segmentation_type].downcase.to_sym
@quantum_config[:L2][:enable_tunneling] = true @neutron_config[:L2][:enable_tunneling] = true
else else
@quantum_config[:L2][:enable_tunneling] = false @neutron_config[:L2][:enable_tunneling] = false
@quantum_config[:L2][:tunnel_id_ranges] = nil @neutron_config[:L2][:tunnel_id_ranges] = nil
end end
if @quantum_config[:amqp][:passwd].nil? if @neutron_config[:amqp][:passwd].nil?
@quantum_config[:amqp][:passwd] = get_amqp_passwd() @neutron_config[:amqp][:passwd] = get_amqp_passwd()
end end
return @quantum_config return @neutron_config
end end
private private
@ -366,18 +366,18 @@ class MrntQuantum
end end
Puppet::Parser::Functions::newfunction(:sanitize_quantum_config, :type => :rvalue, :doc => <<-EOS Puppet::Parser::Functions::newfunction(:sanitize_neutron_config, :type => :rvalue, :doc => <<-EOS
This function get Hash of Quantum configuration This function get Hash of Neutron configuration
and sanitize it. and sanitize it.
Example call this: Example call this:
$config = sanitize_quantum_config($::fuel_settings, 'quantum_settings') $config = sanitize_neutron_config($::fuel_settings, 'neutron_settings')
EOS EOS
) do |argv| ) do |argv|
Puppet::Parser::Functions.autoloader.loadall Puppet::Parser::Functions.autoloader.loadall
given_config = MrntQuantum.sanitize_hash(argv[0]) given_config = MrntNeutron.sanitize_hash(argv[0])
q_conf = MrntQuantum.new(self, given_config, argv[1]) q_conf = MrntNeutron.new(self, given_config, argv[1])
rv = q_conf.generate_config() rv = q_conf.generate_config()
# pUPPET not allow hashes with SYM keys. normalize keys # pUPPET not allow hashes with SYM keys. normalize keys
JSON.load(rv.to_json) JSON.load(rv.to_json)

View File

@ -1,35 +1,35 @@
# Quantum common functions # Neutron common functions
# #
require 'puppet/util/inifile' require 'puppet/util/inifile'
require 'tempfile' require 'tempfile'
class Puppet::Provider::Quantum < Puppet::Provider class Puppet::Provider::Neutron < Puppet::Provider
def self.quantum_credentials def self.neutron_credentials
@quantum_credentials ||= get_quantum_credentials @neutron_credentials ||= get_neutron_credentials
end end
def self.get_quantum_credentials def self.get_neutron_credentials
if quantum_file and quantum_file['keystone_authtoken'] and if neutron_file and neutron_file['keystone_authtoken'] and
quantum_file['keystone_authtoken']['auth_url'] and neutron_file['keystone_authtoken']['auth_url'] and
quantum_file['keystone_authtoken']['admin_tenant_name'] and neutron_file['keystone_authtoken']['admin_tenant_name'] and
quantum_file['keystone_authtoken']['admin_user'] and neutron_file['keystone_authtoken']['admin_user'] and
quantum_file['keystone_authtoken']['admin_password'] neutron_file['keystone_authtoken']['admin_password']
q = {} q = {}
q['auth_url'] = quantum_file['keystone_authtoken']['auth_url'].strip q['auth_url'] = neutron_file['keystone_authtoken']['auth_url'].strip
q['admin_user'] = quantum_file['keystone_authtoken']['admin_user'].strip q['admin_user'] = neutron_file['keystone_authtoken']['admin_user'].strip
q['admin_password'] = quantum_file['keystone_authtoken']['admin_password'].strip q['admin_password'] = neutron_file['keystone_authtoken']['admin_password'].strip
q['admin_tenant_name'] = quantum_file['keystone_authtoken']['admin_tenant_name'].strip q['admin_tenant_name'] = neutron_file['keystone_authtoken']['admin_tenant_name'].strip
return q return q
else else
# raise(Puppet::Error, 'File: /etc/quantum/api-paste.ini does not contain all required sections.') # raise(Puppet::Error, 'File: /etc/neutron/api-paste.ini does not contain all required sections.')
raise(Puppet::Error, 'File: /etc/quantum/quantum.conf does not contain all required sections.') raise(Puppet::Error, 'File: /etc/neutron/neutron.conf does not contain all required sections.')
end end
end end
def quantum_credentials def neutron_credentials
self.class.quantum_credentials self.class.neutron_credentials
end end
def self.auth_endpoint def self.auth_endpoint
@ -37,29 +37,29 @@ class Puppet::Provider::Quantum < Puppet::Provider
end end
def self.get_auth_endpoint def self.get_auth_endpoint
quantum_credentials()['auth_url'] neutron_credentials()['auth_url']
end end
def self.quantum_file def self.neutron_file
return @quantum_file if @quantum_file return @neutron_file if @neutron_file
@quantum_file = Puppet::Util::IniConfig::File.new @neutron_file = Puppet::Util::IniConfig::File.new
@quantum_file.read('/etc/quantum/quantum.conf') @neutron_file.read('/etc/neutron/neutron.conf')
@quantum_file @neutron_file
end end
# def self.quantum_hash # def self.neutron_hash
# @quantum_hash ||= build_quantum_hash # @neutron_hash ||= build_neutron_hash
# end # end
# def quantum_hash # def neutron_hash
# self.class.quantum_hash # self.class.neutron_hash
# end # end
def self.auth_quantum(*args) def self.auth_neutron(*args)
#todo: Rewrite, using ruby-openstack #todo: Rewrite, using ruby-openstack
begin begin
q = quantum_credentials q = neutron_credentials
rescue Exception => e rescue Exception => e
raise(e) raise(e)
end end
@ -70,15 +70,15 @@ class Puppet::Provider::Quantum < Puppet::Provider
retries = 60 retries = 60
loop do loop do
begin begin
rv = quantum('--os-tenant-name', q['admin_tenant_name'], '--os-username', q['admin_user'], '--os-password', q['admin_password'], '--os-auth-url', auth_endpoint, args) rv = neutron('--os-tenant-name', q['admin_tenant_name'], '--os-username', q['admin_user'], '--os-password', q['admin_password'], '--os-auth-url', auth_endpoint, args)
break break
rescue Exception => e rescue Exception => e
if e.message =~ /(\(HTTP\s+400\))|(\[Errno 111\]\s+Connection\s+refused)|(503\s+Service\s+Unavailable)|(Max\s+retries\s+exceeded)/ if e.message =~ /(\(HTTP\s+400\))|(\[Errno 111\]\s+Connection\s+refused)|(503\s+Service\s+Unavailable)|(Max\s+retries\s+exceeded)/
notice("Can't connect to quantum backend. Waiting for retry...") notice("Can't connect to neutron backend. Waiting for retry...")
retries -= 1 retries -= 1
sleep 2 sleep 2
if retries <= 1 if retries <= 1
notice("Can't connect to quantum backend. No more retries, auth failed") notice("Can't connect to neutron backend. No more retries, auth failed")
raise(e) raise(e)
#break #break
end end
@ -91,12 +91,12 @@ class Puppet::Provider::Quantum < Puppet::Provider
return rv return rv
end end
def auth_quantum(*args) def auth_neutron(*args)
self.class.auth_quantum(args) self.class.auth_neutron(args)
end end
#todo: rewrite through API #todo: rewrite through API
def check_quantum_api_availability(timeout) def check_neutron_api_availability(timeout)
if timeout.to_i < 1 if timeout.to_i < 1
timeout = 45 # default timeout 45sec. timeout = 45 # default timeout 45sec.
end end
@ -104,7 +104,7 @@ class Puppet::Provider::Quantum < Puppet::Provider
rv = false rv = false
loop do loop do
begin begin
auth_quantum('net-list') auth_neutron('net-list')
rv = true rv = true
break break
rescue Puppet::ExecutionFailure => e rescue Puppet::ExecutionFailure => e
@ -113,7 +113,7 @@ class Puppet::Provider::Quantum < Puppet::Provider
break break
else else
wa = end_time - current_time wa = end_time - current_time
notice("Quantum API not avalaible. Wait up to #{wa} sec.") notice("Neutron API not avalaible. Wait up to #{wa} sec.")
end end
sleep(0.5) # do not remove!!! It's a positive brake! sleep(0.5) # do not remove!!! It's a positive brake!
end end
@ -123,24 +123,9 @@ class Puppet::Provider::Quantum < Puppet::Provider
#private #private
# def self.list_quantum_objects
# ids = []
# (auth_quantum('index').split("\n")[2..-1] || []).collect do |line|
# ids << line.split[0]
# end
# return ids
# end
# def self.get_quantum_attr(id, attr)
# (auth_quantum('show', id).split("\n") || []).collect do |line|
# if line =~ /^#{attr}:/
# return line.split(': ')[1..-1]
# end
# end
# end
def self.list_keystone_tenants def self.list_keystone_tenants
q = quantum_credentials q = neutron_credentials
tenants_id = {} tenants_id = {}
keystone( keystone(

View File

@ -1,6 +1,6 @@
Puppet::Type.type(:ini_setting)#.providers Puppet::Type.type(:ini_setting)#.providers
Puppet::Type.type(:quantum_api_config).provide( Puppet::Type.type(:neutron_api_config).provide(
:ini_setting, :ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby) :parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do ) do
@ -18,7 +18,7 @@ Puppet::Type.type(:quantum_api_config).provide(
end end
def file_path def file_path
'/etc/quantum/api-paste.ini' '/etc/neutron/api-paste.ini'
end end
end end

View File

@ -1,6 +1,6 @@
Puppet::Type.type(:ini_setting)#.providers Puppet::Type.type(:ini_setting)#.providers
Puppet::Type.type(:quantum_config).provide( Puppet::Type.type(:neutron_config).provide(
:ini_setting, :ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby) :parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do ) do
@ -18,7 +18,7 @@ Puppet::Type.type(:quantum_config).provide(
end end
def file_path def file_path
'/etc/quantum/quantum.conf' '/etc/neutron/neutron.conf'
end end
end end

View File

@ -1,6 +1,6 @@
Puppet::Type.type(:ini_setting)#.providers Puppet::Type.type(:ini_setting)#.providers
Puppet::Type.type(:quantum_dhcp_agent_config).provide( Puppet::Type.type(:neutron_dhcp_agent_config).provide(
:ini_setting, :ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby) :parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do ) do
@ -18,7 +18,7 @@ Puppet::Type.type(:quantum_dhcp_agent_config).provide(
end end
def file_path def file_path
'/etc/quantum/dhcp_agent.ini' '/etc/neutron/dhcp_agent.ini'
end end
end end

View File

@ -1,14 +1,14 @@
# Load the Quantum provider library to help # Load the Neutron provider library to help
require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/quantum') require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/neutron')
Puppet::Type.type(:quantum_floatingip_pool).provide( Puppet::Type.type(:neutron_floatingip_pool).provide(
:quantum, :neutron,
:parent => Puppet::Provider::Quantum :parent => Puppet::Provider::Neutron
) do ) do
desc "Manage floating-IP pool for given tenant" desc "Manage floating-IP pool for given tenant"
commands :quantum => 'quantum' commands :neutron => 'neutron'
commands :keystone => 'keystone' commands :keystone => 'keystone'
commands :sleep => 'sleep' commands :sleep => 'sleep'
@ -83,13 +83,13 @@ Puppet::Type.type(:quantum_floatingip_pool).provide(
retries = 30 retries = 30
loop do loop do
begin begin
auth_quantum('floatingip-create', '--tenant-id', tenant_id[@resource[:name]], @resource[:ext_net]) auth_neutron('floatingip-create', '--tenant-id', tenant_id[@resource[:name]], @resource[:ext_net])
break break
rescue Exception => e rescue Exception => e
notice("Can't connect to quantum backend. Waiting for retry...") notice("Can't connect to neutron backend. Waiting for retry...")
retries -= 1 retries -= 1
if retries <= 1 if retries <= 1
notice("Can't connect to quantum backend. No more retries.") notice("Can't connect to neutron backend. No more retries.")
raise(e) raise(e)
end end
sleep 2 sleep 2
@ -120,13 +120,13 @@ Puppet::Type.type(:quantum_floatingip_pool).provide(
retries = 30 retries = 30
loop do loop do
begin begin
auth_quantum('floatingip-delete', fip_id) auth_neutron('floatingip-delete', fip_id)
break break
rescue Exception => e rescue Exception => e
notice("Can't connect to quantum backend. Waiting for retry...") notice("Can't connect to neutron backend. Waiting for retry...")
retries -= 1 retries -= 1
if retries <= 1 if retries <= 1
notice("Can't connect to quantum backend. No more retries.") notice("Can't connect to neutron backend. No more retries.")
raise(e) raise(e)
end end
sleep 2 sleep 2
@ -169,13 +169,13 @@ Puppet::Type.type(:quantum_floatingip_pool).provide(
retries = 30 retries = 30
loop do loop do
begin begin
rv = auth_quantum('floatingip-list', args) rv = auth_neutron('floatingip-list', args)
break break
rescue Exception => e rescue Exception => e
notice("Can't connect to quantum backend. Waiting for retry...") notice("Can't connect to neutron backend. Waiting for retry...")
retries -= 1 retries -= 1
if retries <= 1 if retries <= 1
notice("Can't connect to quantum backend. No more retries.") notice("Can't connect to neutron backend. No more retries.")
raise(e) raise(e)
end end
sleep 2 sleep 2
@ -193,13 +193,13 @@ Puppet::Type.type(:quantum_floatingip_pool).provide(
retries = 30 retries = 30
loop do loop do
begin begin
rv = auth_quantum('floatingip-show', args) rv = auth_neutron('floatingip-show', args)
break break
rescue Exception => e rescue Exception => e
notice("Can't connect to quantum backend. Waiting for retry...") notice("Can't connect to neutron backend. Waiting for retry...")
retries -= 1 retries -= 1
if retries <= 1 if retries <= 1
notice("Can't connect to quantum backend. No more retries.") notice("Can't connect to neutron backend. No more retries.")
raise(e) raise(e)
end end
sleep 2 sleep 2

View File

@ -1,6 +1,6 @@
Puppet::Type.type(:ini_setting)#.providers Puppet::Type.type(:ini_setting)#.providers
Puppet::Type.type(:quantum_l3_agent_config).provide( Puppet::Type.type(:neutron_l3_agent_config).provide(
:ini_setting, :ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby) :parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do ) do
@ -18,7 +18,7 @@ Puppet::Type.type(:quantum_l3_agent_config).provide(
end end
def file_path def file_path
'/etc/quantum/l3_agent.ini' '/etc/neutron/l3_agent.ini'
end end
end end

View File

@ -1,4 +1,4 @@
Puppet::Type.type(:quantum_metadata_agent_config).provide(:ini_setting, Puppet::Type.type(:neutron_metadata_agent_config).provide(:ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby) :parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do ) do
@ -15,7 +15,7 @@ Puppet::Type.type(:quantum_metadata_agent_config).provide(:ini_setting,
end end
def file_path def file_path
'/etc/quantum/metadata_agent.ini' '/etc/neutron/metadata_agent.ini'
end end
end end

View File

@ -1,20 +1,20 @@
# Load the Quantum provider library to help # Load the Neutron provider library to help
require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/quantum') require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/neutron')
Puppet::Type.type(:quantum_net).provide( Puppet::Type.type(:neutron_net).provide(
:quantum, :neutron,
:parent => Puppet::Provider::Quantum :parent => Puppet::Provider::Neutron
) do ) do
desc "Manage quantum network" desc "Manage neutron network"
optional_commands :quantum => 'quantum' optional_commands :neutron => 'neutron'
optional_commands :keystone => 'keystone' optional_commands :keystone => 'keystone'
optional_commands :sleep => 'sleep' optional_commands :sleep => 'sleep'
# I need to setup caching and what-not to make this lookup performance not suck # I need to setup caching and what-not to make this lookup performance not suck
def self.instances def self.instances
network_list = auth_quantum("net-list") network_list = auth_neutron("net-list")
return [] if network_list.chomp.empty? return [] if network_list.chomp.empty?
network_list.split("\n")[3..-2].collect do |net| network_list.split("\n")[3..-2].collect do |net|
@ -48,8 +48,8 @@ Puppet::Type.type(:quantum_net).provide(
def create def create
# quantum net-create --tenant_id $tenant_id $tenant_network_name --provider:network_type vlan --provider:physical_network physnet2 --provider:segmentation_id 501) # neutron net-create --tenant_id $tenant_id $tenant_network_name --provider:network_type vlan --provider:physical_network physnet2 --provider:segmentation_id 501)
# quantum net-create $ext_net_name -- --router:external=True --tenant_id $tenant_id --provider:network_type flat) # neutron net-create $ext_net_name -- --router:external=True --tenant_id $tenant_id --provider:network_type flat)
optional_opts = [] optional_opts = []
{ {
:router_ext => '--router:external', :router_ext => '--router:external',
@ -65,9 +65,9 @@ Puppet::Type.type(:quantum_net).provide(
optional_opts.push("--shared") optional_opts.push("--shared")
end end
check_quantum_api_availability(120) check_neutron_api_availability(120)
auth_quantum('net-create', auth_neutron('net-create',
'--tenant_id', tenant_id[@resource[:tenant]], '--tenant_id', tenant_id[@resource[:tenant]],
@resource[:name], @resource[:name],
optional_opts optional_opts
@ -75,7 +75,7 @@ Puppet::Type.type(:quantum_net).provide(
end end
def destroy def destroy
auth_quantum("net-delete", @resource[:name]) auth_neutron("net-delete", @resource[:name])
end end
private private

View File

@ -0,0 +1,24 @@
Puppet::Type.type(:ini_setting)#.providers
Puppet::Type.type(:neutron_plugin_ovs).provide(
:ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do
def section
resource[:name].split('/', 2).first
end
def setting
resource[:name].split('/', 2).last
end
def separator
'='
end
def file_path
'/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini'
end
end

View File

@ -1,20 +1,20 @@
# Load the Quantum provider library to help # Load the Neutron provider library to help
require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/quantum') require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/neutron')
Puppet::Type.type(:quantum_router).provide( Puppet::Type.type(:neutron_router).provide(
:quantum, :neutron,
:parent => Puppet::Provider::Quantum :parent => Puppet::Provider::Neutron
) do ) do
desc "Manage quantum router" desc "Manage neutron router"
optional_commands :quantum => 'quantum' optional_commands :neutron => 'neutron'
optional_commands :keystone => 'keystone' optional_commands :keystone => 'keystone'
optional_commands :sleep => 'sleep' optional_commands :sleep => 'sleep'
# I need to setup caching and what-not to make this lookup performance not suck # I need to setup caching and what-not to make this lookup performance not suck
def self.instances def self.instances
router_list = auth_quantum("router-list") router_list = auth_neutron("router-list")
return [] if router_list.chomp.empty? return [] if router_list.chomp.empty?
router_list.split("\n")[3..-2].collect do |net| router_list.split("\n")[3..-2].collect do |net|
@ -54,9 +54,9 @@ Puppet::Type.type(:quantum_router).provide(
admin_state.push('--admin-state-down') admin_state.push('--admin-state-down')
end end
check_quantum_api_availability(120) check_neutron_api_availability(120)
router_info = auth_quantum('router-create', router_info = auth_neutron('router-create',
'--tenant_id', tenant_id[@resource[:tenant]], '--tenant_id', tenant_id[@resource[:tenant]],
admin_state, admin_state,
@resource[:name] @resource[:name]
@ -66,7 +66,7 @@ Puppet::Type.type(:quantum_router).provide(
# add an internal networks interfaces to a router # add an internal networks interfaces to a router
@resource[:int_subnets].each do |subnet| @resource[:int_subnets].each do |subnet|
auth_quantum('router-interface-add', auth_neutron('router-interface-add',
@resource[:name], @resource[:name],
subnet subnet
) )
@ -74,20 +74,20 @@ Puppet::Type.type(:quantum_router).provide(
#Set an gateway interface to the specified external network #Set an gateway interface to the specified external network
if @resource[:ext_net] if @resource[:ext_net]
auth_quantum('router-gateway-set', auth_neutron('router-gateway-set',
@resource[:name], @resource[:name],
@resource[:ext_net] @resource[:ext_net]
) )
# update router_id option # update router_id option
# router_id = self.class.get_id(router_info) # router_id = self.class.get_id(router_info)
# ql3a_conf = Puppet::Type.type(:quantum_l3_agent_config).new(:name => "DEFAULT/router_id", :value => router_id) # ql3a_conf = Puppet::Type.type(:neutron_l3_agent_config).new(:name => "DEFAULT/router_id", :value => router_id)
# ql3a_conf.provider.create # ql3a_conf.provider.create
end end
end end
def destroy def destroy
auth_quantum("router-delete", @resource[:name]) auth_neutron("router-delete", @resource[:name])
end end
private private

View File

@ -1,20 +1,20 @@
# Load the Quantum provider library to help # Load the Neutron provider library to help
require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/quantum') require File.join(File.dirname(__FILE__), '..','..','..', 'puppet/provider/neutron')
Puppet::Type.type(:quantum_subnet).provide( Puppet::Type.type(:neutron_subnet).provide(
:quantum, :neutron,
:parent => Puppet::Provider::Quantum :parent => Puppet::Provider::Neutron
) do ) do
desc "Manage quantum subnet/networks" desc "Manage neutron subnet/networks"
optional_commands :quantum => 'quantum' optional_commands :neutron => 'neutron'
optional_commands :keystone => 'keystone' optional_commands :keystone => 'keystone'
optional_commands :sleep => 'sleep' optional_commands :sleep => 'sleep'
# I need to setup caching and what-not to make this lookup performance not suck # I need to setup caching and what-not to make this lookup performance not suck
def self.instances def self.instances
network_list = auth_quantum("subnet-list") network_list = auth_neutron("subnet-list")
return [] if network_list.chomp.empty? return [] if network_list.chomp.empty?
network_list.split("\n")[3..-2].collect do |net| network_list.split("\n")[3..-2].collect do |net|
@ -48,9 +48,9 @@ Puppet::Type.type(:quantum_subnet).provide(
def create def create
# tenant_subnet_id=$(get_id quantum subnet-create --tenant_id $tenant_id --ip_version 4 $tenant_net_id $fixed_range --gateway $network_gateway) # tenant_subnet_id=$(get_id neutron subnet-create --tenant_id $tenant_id --ip_version 4 $tenant_net_id $fixed_range --gateway $network_gateway)
# quantum subnet-create --tenant-id $tenant --name subnet01 net01 192.168.101.0/24 # neutron subnet-create --tenant-id $tenant --name subnet01 net01 192.168.101.0/24
# quantum subnet-create --tenant-id $tenant --name pub_subnet01 --gateway 10.0.1.254 public01 10.0.1.0/24 --enable_dhcp False # neutron subnet-create --tenant-id $tenant --name pub_subnet01 --gateway 10.0.1.254 public01 10.0.1.0/24 --enable_dhcp False
# --allocation-pool start=$pool_floating_start,end=$pool_floating_end # --allocation-pool start=$pool_floating_start,end=$pool_floating_end
# --dns_nameservers list=true 8.8.8.8 # --dns_nameservers list=true 8.8.8.8
@ -75,9 +75,9 @@ Puppet::Type.type(:quantum_subnet).provide(
end end
end end
check_quantum_api_availability(120) check_neutron_api_availability(120)
auth_quantum('subnet-create', auth_neutron('subnet-create',
'--tenant-id', tenant_id[@resource[:tenant]], '--tenant-id', tenant_id[@resource[:tenant]],
'--name', @resource[:name], '--name', @resource[:name],
ip_opts, ip_opts,
@ -88,7 +88,7 @@ Puppet::Type.type(:quantum_subnet).provide(
end end
def destroy def destroy
auth_quantum("subnet-delete", @resource[:name]) auth_neutron("subnet-delete", @resource[:name])
end end
private private

View File

@ -1,4 +1,4 @@
Puppet::Type.newtype(:quantum_api_config) do Puppet::Type.newtype(:neutron_api_config) do
ensurable ensurable

View File

@ -1,9 +1,9 @@
Puppet::Type.newtype(:quantum_config) do Puppet::Type.newtype(:neutron_config) do
ensurable ensurable
newparam(:name, :namevar => true) do newparam(:name, :namevar => true) do
desc 'Section/setting name to manage from quantum.conf' desc 'Section/setting name to manage from neutron.conf'
newvalues(/\S+\/\S+/) newvalues(/\S+\/\S+/)
end end

View File

@ -1,4 +1,4 @@
Puppet::Type.newtype(:quantum_dhcp_agent_config) do Puppet::Type.newtype(:neutron_dhcp_agent_config) do
ensurable ensurable

View File

@ -1,4 +1,4 @@
Puppet::Type.newtype(:quantum_floatingip_pool) do Puppet::Type.newtype(:neutron_floatingip_pool) do
@doc = "Manage creation/deletion of floating IP pool" @doc = "Manage creation/deletion of floating IP pool"
@ -28,7 +28,7 @@ Puppet::Type.newtype(:quantum_floatingip_pool) do
end end
autorequire(:package) do autorequire(:package) do
['python-quantumclient'] ['python-neutronclient']
end end
end end

View File

@ -1,4 +1,4 @@
Puppet::Type.newtype(:quantum_l3_agent_config) do Puppet::Type.newtype(:neutron_l3_agent_config) do
ensurable ensurable

View File

@ -1,4 +1,4 @@
Puppet::Type.newtype(:quantum_metadata_agent_config) do Puppet::Type.newtype(:neutron_metadata_agent_config) do
ensurable ensurable
newparam(:name, :namevar => true) do newparam(:name, :namevar => true) do

View File

@ -1,6 +1,6 @@
Puppet::Type.newtype(:quantum_net) do Puppet::Type.newtype(:neutron_net) do
@doc = "Manage creation/deletion of quantum networks" @doc = "Manage creation/deletion of neutron networks"
ensurable ensurable
@ -37,12 +37,12 @@ Puppet::Type.newtype(:quantum_net) do
# raise(Puppet::Error, 'Label must be set') unless self[:label] # raise(Puppet::Error, 'Label must be set') unless self[:label]
# end # end
# Require the Quantum service to be running # Require the neutron service to be running
# autorequire(:service) do # autorequire(:service) do
# ['quantum-server'] # ['neutron-server']
# end # end
autorequire(:package) do autorequire(:package) do
['python-quantumclient'] ['python-neutronclient']
end end
end end

View File

@ -1,9 +1,9 @@
Puppet::Type.newtype(:quantum_plugin_ovs) do Puppet::Type.newtype(:neutron_plugin_ovs) do
ensurable ensurable
newparam(:name, :namevar => true) do newparam(:name, :namevar => true) do
desc 'Section/setting name to manage from ovs_quantum_plugin.ini' desc 'Section/setting name to manage from ovs_neutron_plugin.ini'
newvalues(/\S+\/\S+/) newvalues(/\S+\/\S+/)
end end

View File

@ -1,6 +1,6 @@
Puppet::Type.newtype(:quantum_router) do Puppet::Type.newtype(:neutron_router) do
@doc = "Manage creation/deletion of quantum routers" @doc = "Manage creation/deletion of neutron routers"
ensurable ensurable
@ -27,9 +27,8 @@ Puppet::Type.newtype(:quantum_router) do
end end
# Require the Quantum service to be running
autorequire(:package) do autorequire(:package) do
['python-quantumclient'] ['python-neutronclient']
end end
end end

View File

@ -1,6 +1,6 @@
Puppet::Type.newtype(:quantum_subnet) do Puppet::Type.newtype(:neutron_subnet) do
@doc = "Manage creation/deletion of quantum subnet/networks" @doc = "Manage creation/deletion of neutron subnet/networks"
ensurable ensurable
@ -58,13 +58,13 @@ Puppet::Type.newtype(:quantum_subnet) do
# raise(Puppet::Error, 'Label must be set') unless self[:label] # raise(Puppet::Error, 'Label must be set') unless self[:label]
# end # end
# Require the Quantum service to be running # Require the neutron service to be running
# autorequire(:service) do # autorequire(:service) do
# ['quantum-server'] # ['neutron-server']
# end # end
autorequire(:package) do autorequire(:package) do
['python-quantumclient'] ['python-neutronclient']
end end
end end

View File

@ -0,0 +1,226 @@
#
class neutron::agents::dhcp (
$neutron_config = {},
$verbose = 'False',
$debug = 'False',
$interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver',
$dhcp_driver = 'neutron.agent.linux.dhcp.Dnsmasq',
$dhcp_agent_manager='neutron.agent.dhcp_agent.DhcpAgentWithStateReport',
$state_path = '/var/lib/neutron',
$service_provider = 'generic',
) {
include 'neutron::params'
if $::operatingsystem == 'Ubuntu' {
if $service_provider == 'pacemaker' {
file { "/etc/init/neutron-dhcp-agent.override":
replace => "no",
ensure => "present",
content => "manual",
mode => 644,
before => Package['neutron-dhcp-agent'],
}
}
}
if $::neutron::params::dhcp_agent_package {
Package['neutron'] -> Package['neutron-dhcp-agent']
$dhcp_agent_package = 'neutron-dhcp-agent'
package { 'neutron-dhcp-agent':
name => $::neutron::params::dhcp_agent_package
}
} else {
$dhcp_agent_package = $::neutron::params::package_name
}
include 'neutron::waist_setup'
anchor {'neutron-dhcp-agent': }
#Anchor['neutron-metadata-agent-done'] -> Anchor['neutron-dhcp-agent']
Service<| title=='neutron-server' |> -> Anchor['neutron-dhcp-agent']
case $dhcp_driver {
/\.Dnsmasq/ : {
package { $::neutron::params::dnsmasq_packages: ensure => present, }
Package[$::neutron::params::dnsmasq_packages] -> Package[$dhcp_agent_package]
$dhcp_server_packages = $::neutron::params::dnsmasq_packages
}
default : {
fail("${dhcp_driver} is not supported as of now")
}
}
Package[$dhcp_agent_package] -> neutron_dhcp_agent_config <| |>
Package[$dhcp_agent_package] -> neutron_config <| |>
neutron_dhcp_agent_config {
'DEFAULT/debug': value => $debug;
'DEFAULT/verbose': value => $verbose;
'DEFAULT/state_path': value => $state_path;
'DEFAULT/interface_driver': value => $interface_driver;
'DEFAULT/dhcp_driver': value => $dhcp_driver;
'DEFAULT/dhcp_agent_manager':value => $dhcp_agent_manager;
'DEFAULT/auth_url': value => $neutron_config['keystone']['auth_url'];
'DEFAULT/admin_user': value => $neutron_config['keystone']['admin_user'];
'DEFAULT/admin_password': value => $neutron_config['keystone']['admin_password'];
'DEFAULT/admin_tenant_name': value => $neutron_config['keystone']['admin_tenant_name'];
'DEFAULT/resync_interval': value => $neutron_config['L3']['resync_interval'];
'DEFAULT/use_namespaces': value => $neutron_config['L3']['use_namespaces'];
'DEFAULT/root_helper': value => $neutron_config['root_helper'];
'DEFAULT/signing_dir': value => $neutron_config['keystone']['signing_dir'];
'DEFAULT/enable_isolated_metadata': value => $neutron_config['L3']['dhcp_agent']['enable_isolated_metadata'];
'DEFAULT/enable_metadata_network': value => $neutron_config['L3']['dhcp_agent']['enable_metadata_network'];
}
Service <| title == 'neutron-server' |> -> Service['neutron-dhcp-service']
if $service_provider == 'pacemaker' {
Service <| title == 'neutron-server' |> -> Cs_shadow['dhcp']
neutron_dhcp_agent_config <| |> -> Cs_shadow['dhcp']
# OCF script for pacemaker
# and his dependences
file {'neutron-dhcp-agent-ocf':
path=>'/usr/lib/ocf/resource.d/mirantis/neutron-agent-dhcp',
mode => 755,
owner => root,
group => root,
source => "puppet:///modules/neutron/ocf/neutron-agent-dhcp",
}
Package['pacemaker'] -> File['neutron-dhcp-agent-ocf']
File['neutron-dhcp-agent-ocf'] -> Cs_resource["p_${::neutron::params::dhcp_agent_service}"]
File['q-agent-cleanup.py'] -> Cs_resource["p_${::neutron::params::dhcp_agent_service}"]
File<| title=='neutron-logging.conf' |> ->
cs_resource { "p_${::neutron::params::dhcp_agent_service}":
ensure => present,
cib => 'dhcp',
primitive_class => 'ocf',
provided_by => 'mirantis',
primitive_type => 'neutron-agent-dhcp',
#require => File['neutron-agent-dhcp'],
parameters => {
'os_auth_url' => $neutron_config['keystone']['auth_url'],
'tenant' => $neutron_config['keystone']['admin_tenant_name'],
'username' => $neutron_config['keystone']['admin_user'],
'password' => $neutron_config['keystone']['admin_password'],
}
,
operations => {
'monitor' => {
'interval' => '20',
'timeout' => '30'
}
,
'start' => {
'timeout' => '360'
}
,
'stop' => {
'timeout' => '360'
}
}
,
}
Cs_commit <| title == 'ovs' |> -> Cs_shadow <| title == 'dhcp' |>
Cs_commit <| title == 'neutron-metadata-agent' |> -> Cs_shadow <| title == 'dhcp' |>
::corosync::cleanup { "p_${::neutron::params::dhcp_agent_service}": }
Cs_commit['dhcp'] -> ::Corosync::Cleanup["p_${::neutron::params::dhcp_agent_service}"]
Cs_commit['dhcp'] ~> ::Corosync::Cleanup["p_${::neutron::params::dhcp_agent_service}"]
::Corosync::Cleanup["p_${::neutron::params::dhcp_agent_service}"] -> Service['neutron-dhcp-service']
Cs_resource["p_${::neutron::params::dhcp_agent_service}"] -> Cs_colocation['dhcp-with-ovs']
Cs_resource["p_${::neutron::params::dhcp_agent_service}"] -> Cs_order['dhcp-after-ovs']
Cs_resource["p_${::neutron::params::dhcp_agent_service}"] -> Cs_colocation['dhcp-with-metadata']
Cs_resource["p_${::neutron::params::dhcp_agent_service}"] -> Cs_order['dhcp-after-metadata']
cs_shadow { 'dhcp': cib => 'dhcp' }
cs_commit { 'dhcp': cib => 'dhcp' }
cs_colocation { 'dhcp-with-ovs':
ensure => present,
cib => 'dhcp',
primitives => [
"p_${::neutron::params::dhcp_agent_service}",
"clone_p_${::neutron::params::ovs_agent_service}"
],
score => 'INFINITY',
} ->
cs_order { 'dhcp-after-ovs':
ensure => present,
cib => 'dhcp',
first => "clone_p_${::neutron::params::ovs_agent_service}",
second => "p_${::neutron::params::dhcp_agent_service}",
score => 'INFINITY',
} -> Service['neutron-dhcp-service']
cs_colocation { 'dhcp-with-metadata':
ensure => present,
cib => 'dhcp',
primitives => [
"p_${::neutron::params::dhcp_agent_service}",
"clone_p_neutron-metadata-agent"
],
score => 'INFINITY',
} ->
cs_order { 'dhcp-after-metadata':
ensure => present,
cib => 'dhcp',
first => "clone_p_neutron-metadata-agent",
second => "p_${::neutron::params::dhcp_agent_service}",
score => 'INFINITY',
} -> Service['neutron-dhcp-service']
Service['neutron-dhcp-service_stopped'] -> Cs_resource["p_${::neutron::params::dhcp_agent_service}"]
service { 'neutron-dhcp-service_stopped':
name => "${::neutron::params::dhcp_agent_service}",
enable => false,
ensure => stopped,
hasstatus => true,
hasrestart => true,
provider => $::neutron::params::service_provider,
require => [Package[$dhcp_agent_package], Class['neutron']],
}
Neutron::Network::Provider_router<||> -> Service<| title=='neutron-dhcp-service' |>
service { 'neutron-dhcp-service':
name => "p_${::neutron::params::dhcp_agent_service}",
enable => true,
ensure => running,
hasstatus => true,
hasrestart => false,
provider => $service_provider,
require => [Package[$dhcp_agent_package], Class['neutron'], Service['neutron-ovs-agent']],
}
} else {
Neutron_config <| |> ~> Service['neutron-dhcp-service']
Neutron_dhcp_agent_config <| |> ~> Service['neutron-dhcp-service']
File<| title=='neutron-logging.conf' |> ->
service { 'neutron-dhcp-service':
name => $::neutron::params::dhcp_agent_service,
enable => true,
ensure => running,
hasstatus => true,
hasrestart => true,
provider => $::neutron::params::service_provider,
require => [Package[$dhcp_agent_package], Class['neutron'], Service['neutron-ovs-agent']],
}
}
Class[neutron::waistline] -> Service[neutron-dhcp-service]
Anchor['neutron-dhcp-agent'] ->
Neutron_dhcp_agent_config <| |> ->
Cs_resource<| title=="p_${::neutron::params::dhcp_agent_service}" |> ->
Service['neutron-dhcp-service'] ->
Anchor['neutron-dhcp-agent-done']
anchor {'neutron-dhcp-agent-done': }
}
# vim: set ts=2 sw=2 et :

View File

@ -0,0 +1,243 @@
#
class neutron::agents::l3 (
$neutron_config = {},
$verbose = 'False',
$debug = 'False',
$create_networks = true, # ?????????????????
$interface_driver = 'neutron.agent.linux.interface.OVSInterfaceDriver',
$service_provider = 'generic'
) {
include 'neutron::params'
anchor {'neutron-l3': }
Service<| title=='neutron-server' |> -> Anchor['neutron-l3']
if $::operatingsystem == 'Ubuntu' {
if $service_provider == 'pacemaker' {
file { "/etc/init/neutron-l3-agent.override":
replace => "no",
ensure => "present",
content => "manual",
mode => 644,
before => Package['neutron-l3'],
}
}
}
if $::neutron::params::l3_agent_package {
$l3_agent_package = 'neutron-l3'
package { 'neutron-l3':
name => $::neutron::params::l3_agent_package,
ensure => present,
}
# do not move it to outside this IF
Package['neutron-l3'] -> Neutron_l3_agent_config <| |>
} else {
$l3_agent_package = $::neutron::params::package_name
}
include 'neutron::waist_setup'
Neutron_config <| |> -> Neutron_l3_agent_config <| |>
Neutron_l3_agent_config <| |> -> Service['neutron-l3']
# Quantum_l3_agent_config <| |> -> Quantum_router <| |>
# Quantum_l3_agent_config <| |> -> Quantum_net <| |>
# Quantum_l3_agent_config <| |> -> Quantum_subnet <| |>
neutron_l3_agent_config {
'DEFAULT/debug': value => $debug;
'DEFAULT/verbose': value => $verbose;
'DEFAULT/root_helper': value => $neutron_config['root_helper'];
'DEFAULT/auth_url': value => $neutron_config['keystone']['auth_url'];
'DEFAULT/admin_user': value => $neutron_config['keystone']['admin_user'];
'DEFAULT/admin_password': value => $neutron_config['keystone']['admin_password'];
'DEFAULT/admin_tenant_name': value => $neutron_config['keystone']['admin_tenant_name'];
'DEFAULT/metadata_ip': value => $neutron_config['metadata']['metadata_ip'];
'DEFAULT/metadata_port': value => $neutron_config['metadata']['metadata_port'];
'DEFAULT/use_namespaces': value => $neutron_config['L3']['use_namespaces'];
'DEFAULT/send_arp_for_ha': value => $neutron_config['L3']['send_arp_for_ha'];
'DEFAULT/periodic_interval': value => $neutron_config['L3']['resync_interval'];
'DEFAULT/periodic_fuzzy_delay': value => $neutron_config['L3']['resync_fuzzy_delay'];
'DEFAULT/external_network_bridge': value => $neutron_config['L3']['public_bridge'];
}
neutron_l3_agent_config{'DEFAULT/router_id': ensure => absent }
Anchor['neutron-l3'] ->
Neutron_l3_agent_config <| |> ->
Exec<| title=='setup_router_id' |> ->
#Exec<| title=='update_default_route_metric' |> ->
Service<| title=='neutron-l3' |> ->
#Exec<| title=='settle-down-default-route' |> ->
Anchor['neutron-l3-done']
# rootwrap error with L3 agent
# https://bugs.launchpad.net/neutron/+bug/1069966
$iptables_manager = "/usr/lib/${::neutron::params::python_path}/neutron/agent/linux/iptables_manager.py"
exec { 'patch-iptables-manager':
command => "sed -i '272 s|/sbin/||' ${iptables_manager}",
onlyif => "sed -n '272p' ${iptables_manager} | grep -q '/sbin/'",
path => ['/bin', '/sbin', '/usr/bin', '/usr/sbin'],
require => [Anchor['neutron-l3'], Package[$l3_agent_package]],
}
Service<| title == 'neutron-server' |> -> Service['neutron-l3']
if $service_provider == 'pacemaker' {
Service<| title == 'neutron-server' |> -> Cs_shadow['l3']
Neutron_l3_agent_config <||> -> Cs_shadow['l3']
# OCF script for pacemaker
# and his dependences
file {'neutron-l3-agent-ocf':
path=>'/usr/lib/ocf/resource.d/mirantis/neutron-agent-l3',
mode => 755,
owner => root,
group => root,
source => "puppet:///modules/neutron/ocf/neutron-agent-l3",
}
Package['pacemaker'] -> File['neutron-l3-agent-ocf']
File['neutron-l3-agent-ocf'] -> Cs_resource["p_${::neutron::params::l3_agent_service}"]
File['q-agent-cleanup.py'] -> Cs_resource["p_${::neutron::params::l3_agent_service}"]
cs_resource { "p_${::neutron::params::l3_agent_service}":
ensure => present,
cib => 'l3',
primitive_class => 'ocf',
provided_by => 'mirantis',
primitive_type => 'neutron-agent-l3',
parameters => {
'debug' => $debug,
'syslog' => $::use_syslog,
'os_auth_url' => $neutron_config['keystone']['auth_url'],
'tenant' => $neutron_config['keystone']['admin_tenant_name'],
'username' => $neutron_config['keystone']['admin_user'],
'password' => $neutron_config['keystone']['admin_password'],
},
operations => {
'monitor' => {
'interval' => '20',
'timeout' => '30'
}
,
'start' => {
'timeout' => '360'
}
,
'stop' => {
'timeout' => '360'
}
},
}
File<| title=='neutron-logging.conf' |> -> Cs_resource["p_${::neutron::params::l3_agent_service}"]
Exec<| title=='setup_router_id' |> -> Cs_resource["p_${::neutron::params::l3_agent_service}"]
cs_shadow { 'l3': cib => 'l3' }
cs_commit { 'l3': cib => 'l3' }
###
# Do not remember to be carefylly with Cs_shadow and Cs_commit orders.
# at one time onli one Shadow can be without commit
Cs_commit <| title == 'dhcp' |> -> Cs_shadow <| title == 'l3' |>
Cs_commit <| title == 'ovs' |> -> Cs_shadow <| title == 'l3' |>
Cs_commit <| title == 'neutron-metadata-agent' |> -> Cs_shadow <| title == 'l3' |>
::corosync::cleanup{"p_${::neutron::params::l3_agent_service}": }
Cs_commit['l3'] -> ::Corosync::Cleanup["p_${::neutron::params::l3_agent_service}"]
Cs_commit['l3'] ~> ::Corosync::Cleanup["p_${::neutron::params::l3_agent_service}"]
::Corosync::Cleanup["p_${::neutron::params::l3_agent_service}"] -> Service['neutron-l3']
Cs_resource["p_${::neutron::params::l3_agent_service}"] -> Cs_colocation['l3-with-ovs']
Cs_resource["p_${::neutron::params::l3_agent_service}"] -> Cs_order['l3-after-ovs']
Cs_resource["p_${::neutron::params::l3_agent_service}"] -> Cs_colocation['l3-with-metadata']
Cs_resource["p_${::neutron::params::l3_agent_service}"] -> Cs_order['l3-after-metadata']
cs_colocation { 'l3-with-ovs':
ensure => present,
cib => 'l3',
primitives => ["p_${::neutron::params::l3_agent_service}", "clone_p_${::neutron::params::ovs_agent_service}"],
score => 'INFINITY',
} ->
cs_order { 'l3-after-ovs':
ensure => present,
cib => 'l3',
first => "clone_p_${::neutron::params::ovs_agent_service}",
second => "p_${::neutron::params::l3_agent_service}",
score => 'INFINITY',
} -> Service['neutron-l3']
cs_colocation { 'l3-with-metadata':
ensure => present,
cib => 'l3',
primitives => [
"p_${::neutron::params::l3_agent_service}",
"clone_p_neutron-metadata-agent"
],
score => 'INFINITY',
} ->
cs_order { 'l3-after-metadata':
ensure => present,
cib => "l3",
first => "clone_p_neutron-metadata-agent",
second => "p_${::neutron::params::l3_agent_service}",
score => 'INFINITY',
} -> Service['neutron-l3']
# start DHCP and L3 agents on different controllers if it's possible
cs_colocation { 'dhcp-without-l3':
ensure => present,
cib => 'l3',
score => '-100',
primitives => [
"p_${::neutron::params::dhcp_agent_service}",
"p_${::neutron::params::l3_agent_service}"
],
}
# Ensure service is stopped and disabled by upstart/init/etc.
Anchor['neutron-l3'] ->
Service['neutron-l3-init_stopped'] ->
Cs_resource["p_${::neutron::params::l3_agent_service}"] ->
Service['neutron-l3'] ->
Anchor['neutron-l3-done']
service { 'neutron-l3-init_stopped':
name => "${::neutron::params::l3_agent_service}",
enable => false,
ensure => stopped,
hasstatus => true,
hasrestart => true,
provider => $::neutron::params::service_provider,
}
service { 'neutron-l3':
name => "p_${::neutron::params::l3_agent_service}",
enable => true,
ensure => running,
hasstatus => true,
hasrestart => false,
provider => "pacemaker",
}
} else {
Neutron_config <| |> ~> Service['neutron-l3']
Neutron_l3_agent_config <| |> ~> Service['neutron-l3']
File<| title=='neutron-logging.conf' |> ->
service { 'neutron-l3':
name => $::neutron::params::l3_agent_service,
enable => true,
ensure => running,
hasstatus => true,
hasrestart => true,
provider => $::neutron::params::service_provider,
}
}
anchor {'neutron-l3-cellar': }
Anchor['neutron-l3-cellar'] -> Anchor['neutron-l3-done']
anchor {'neutron-l3-done': }
Anchor['neutron-l3'] -> Anchor['neutron-l3-done']
}
# vim: set ts=2 sw=2 et :

View File

@ -1,76 +1,76 @@
class quantum::agents::metadata ( class neutron::agents::metadata (
$quantum_config = {}, $neutron_config = {},
$debug = false, $debug = false,
$verbose = false, $verbose = false,
$service_provider = 'generic' $service_provider = 'generic'
) { ) {
$cib_name = "quantum-metadata-agent" $cib_name = "neutron-metadata-agent"
$res_name = "p_$cib_name" $res_name = "p_$cib_name"
include 'quantum::params' include 'neutron::params'
anchor {'quantum-metadata-agent': } anchor {'neutron-metadata-agent': }
Service<| title=='quantum-server' |> -> Anchor['quantum-metadata-agent'] Service<| title=='neutron-server' |> -> Anchor['neutron-metadata-agent']
# add instructions to nova.conf # add instructions to nova.conf
nova_config { nova_config {
'DEFAULT/service_quantum_metadata_proxy': value => true; 'DEFAULT/service_neutron_metadata_proxy': value => true;
'DEFAULT/quantum_metadata_proxy_shared_secret': value => $quantum_config['metadata']['metadata_proxy_shared_secret']; 'DEFAULT/neutron_metadata_proxy_shared_secret': value => $neutron_config['metadata']['metadata_proxy_shared_secret'];
} -> Nova::Generic_service<| title=='api' |> } -> Nova::Generic_service<| title=='api' |>
quantum_metadata_agent_config { neutron_metadata_agent_config {
'DEFAULT/debug': value => $debug; 'DEFAULT/debug': value => $debug;
'DEFAULT/auth_region': value => $quantum_config['keystone']['auth_region']; 'DEFAULT/auth_region': value => $neutron_config['keystone']['auth_region'];
'DEFAULT/auth_url': value => $quantum_config['keystone']['auth_url']; 'DEFAULT/auth_url': value => $neutron_config['keystone']['auth_url'];
'DEFAULT/admin_user': value => $quantum_config['keystone']['admin_user']; 'DEFAULT/admin_user': value => $neutron_config['keystone']['admin_user'];
'DEFAULT/admin_password': value => $quantum_config['keystone']['admin_password']; 'DEFAULT/admin_password': value => $neutron_config['keystone']['admin_password'];
'DEFAULT/admin_tenant_name': value => $quantum_config['keystone']['admin_tenant_name']; 'DEFAULT/admin_tenant_name': value => $neutron_config['keystone']['admin_tenant_name'];
'DEFAULT/nova_metadata_ip': value => $quantum_config['metadata']['nova_metadata_ip']; 'DEFAULT/nova_metadata_ip': value => $neutron_config['metadata']['nova_metadata_ip'];
'DEFAULT/nova_metadata_port': value => $quantum_config['metadata']['nova_metadata_port']; 'DEFAULT/nova_metadata_port': value => $neutron_config['metadata']['nova_metadata_port'];
'DEFAULT/use_namespaces': value => $quantum_config['L3']['use_namespaces']; 'DEFAULT/use_namespaces': value => $neutron_config['L3']['use_namespaces'];
'DEFAULT/metadata_proxy_shared_secret': value => $quantum_config['metadata']['metadata_proxy_shared_secret']; 'DEFAULT/metadata_proxy_shared_secret': value => $neutron_config['metadata']['metadata_proxy_shared_secret'];
} }
if $::quantum::params::metadata_agent_package { if $::neutron::params::metadata_agent_package {
package { 'quantum-metadata-agent': package { 'neutron-metadata-agent':
name => $::quantum::params::metadata_agent_package, name => $::neutron::params::metadata_agent_package,
ensure => present, ensure => present,
} }
# do not move it to outside this IF # do not move it to outside this IF
Anchor['quantum-metadata-agent'] -> Anchor['neutron-metadata-agent'] ->
Package['quantum-metadata-agent'] -> Package['neutron-metadata-agent'] ->
Quantum_metadata_agent_config<||> Neutron_metadata_agent_config<||>
} }
if $service_provider == 'generic' { if $service_provider == 'generic' {
# non-HA architecture # non-HA architecture
service { 'quantum-metadata-agent': service { 'neutron-metadata-agent':
name => $::quantum::params::metadata_agent_service, name => $::neutron::params::metadata_agent_service,
enable => true, enable => true,
ensure => running, ensure => running,
} }
Anchor['quantum-metadata-agent'] -> Anchor['neutron-metadata-agent'] ->
Quantum_metadata_agent_config<||> -> Neutron_metadata_agent_config<||> ->
Service['quantum-metadata-agent'] -> Service['neutron-metadata-agent'] ->
Anchor['quantum-metadata-agent-done'] Anchor['neutron-metadata-agent-done']
} else { } else {
# OCF script for pacemaker # OCF script for pacemaker
# and his dependences # and his dependences
file {'quantum-metadata-agent-ocf': file {'neutron-metadata-agent-ocf':
path=>'/usr/lib/ocf/resource.d/mirantis/quantum-agent-metadata', path=>'/usr/lib/ocf/resource.d/mirantis/neutron-agent-metadata',
mode => 755, mode => 755,
owner => root, owner => root,
group => root, group => root,
source => "puppet:///modules/quantum/ocf/quantum-agent-metadata", source => "puppet:///modules/neutron/ocf/neutron-agent-metadata",
} }
Package['pacemaker'] -> File['quantum-metadata-agent-ocf'] Package['pacemaker'] -> File['neutron-metadata-agent-ocf']
service { 'quantum-metadata-agent__disabled': service { 'neutron-metadata-agent__disabled':
name => $::quantum::params::metadata_agent_service, name => $::neutron::params::metadata_agent_service,
enable => false, enable => false,
ensure => stopped, ensure => stopped,
} }
@ -81,13 +81,13 @@ class quantum::agents::metadata (
::corosync::cleanup { $res_name: } ::corosync::cleanup { $res_name: }
::Corosync::Cleanup["$res_name"] -> Service[$res_name] ::Corosync::Cleanup["$res_name"] -> Service[$res_name]
File<| title=='quantum-logging.conf' |> -> File<| title=='neutron-logging.conf' |> ->
cs_resource { "$res_name": cs_resource { "$res_name":
ensure => present, ensure => present,
cib => $cib_name, cib => $cib_name,
primitive_class => 'ocf', primitive_class => 'ocf',
provided_by => 'mirantis', provided_by => 'mirantis',
primitive_type => 'quantum-agent-metadata', primitive_type => 'neutron-agent-metadata',
parameters => { parameters => {
#'nic' => $vip[nic], #'nic' => $vip[nic],
#'ip' => $vip[ip], #'ip' => $vip[ip],
@ -127,15 +127,15 @@ class quantum::agents::metadata (
provider => "pacemaker" provider => "pacemaker"
} }
Anchor['quantum-metadata-agent'] -> Anchor['neutron-metadata-agent'] ->
Quantum_metadata_agent_config<||> -> Neutron_metadata_agent_config<||> ->
File['quantum-metadata-agent-ocf'] -> File['neutron-metadata-agent-ocf'] ->
Service['quantum-metadata-agent__disabled'] -> Service['neutron-metadata-agent__disabled'] ->
Cs_resource["$res_name"] -> Cs_resource["$res_name"] ->
Service["$res_name"] -> Service["$res_name"] ->
Anchor['quantum-metadata-agent-done'] Anchor['neutron-metadata-agent-done']
} }
anchor {'quantum-metadata-agent-done': } anchor {'neutron-metadata-agent-done': }
} }
# vim: set ts=2 sw=2 et : # vim: set ts=2 sw=2 et :

View File

@ -0,0 +1,220 @@
class neutron::agents::ovs (
$neutron_config = {},
$service_provider = 'generic'
#$bridge_uplinks = ['br-ex:eth2'],
#$bridge_mappings = ['physnet1:br-ex'],
#$integration_bridge = 'br-int',
#$enable_tunneling = true,
) {
include 'neutron::params'
include 'neutron::waist_setup'
if defined(Anchor['neutron-plugin-ovs-done']) {
# install neutron-ovs-agent at the same host where
# neutron-server + neutron-ovs-plugin
Anchor['neutron-plugin-ovs-done'] -> Anchor['neutron-ovs-agent']
}
if defined(Anchor['neutron-server-done']) {
Anchor['neutron-server-done'] -> Anchor['neutron-ovs-agent']
}
anchor {'neutron-ovs-agent': }
if $::operatingsystem == 'Ubuntu' {
if $service_provider == 'pacemaker' {
file { "/etc/init/neutron-plugin-openvswitch-agent.override":
replace => "no",
ensure => "present",
content => "manual",
mode => 644,
before => Package['neutron-plugin-ovs-agent'],
}
}
}
if $::neutron::params::ovs_agent_package {
Package['neutron'] -> Package['neutron-plugin-ovs-agent']
$ovs_agent_package = 'neutron-plugin-ovs-agent'
package { 'neutron-plugin-ovs-agent':
name => $::neutron::params::ovs_agent_package,
}
} else {
$ovs_agent_package = $::neutron::params::ovs_server_package
}
if !defined(Anchor['neutron-server-done']) {
# if defined -- this depends already defined
Package[$ovs_agent_package] -> Neutron_plugin_ovs <| |>
}
l23network::l2::bridge { $neutron_config['L2']['integration_bridge']:
external_ids => "bridge-id=${neutron_config['L2']['integration_bridge']}",
ensure => present,
skip_existing => true,
}
if $neutron_config['L2']['enable_tunneling'] {
L23network::L2::Bridge<| |> ->
Anchor['neutron-ovs-agent-done']
l23network::l2::bridge { $neutron_config['L2']['tunnel_bridge']:
external_ids => "bridge-id=${neutron_config['L2']['tunnel_bridge']}",
ensure => present,
skip_existing => true,
} ->
Anchor['neutron-ovs-agent-done']
neutron_plugin_ovs { 'OVS/local_ip': value => $neutron_config['L2']['local_ip']; }
} else {
L23network::L2::Bridge[$neutron_config['L2']['integration_bridge']] ->
Anchor['neutron-ovs-agent-done']
neutron::agents::utils::bridges { $neutron_config['L2']['phys_bridges']: } ->
Anchor['neutron-ovs-agent-done']
}
#Quantum_config <| |> ~> Service['quantum-ovs-agent']
#Quantum_plugin_ovs <| |> ~> Service['quantum-ovs-agent']
#Service <| title == 'quantum-server' |> -> Service['quantum-ovs-agent']
if $service_provider == 'pacemaker' {
Neutron_config <| |> -> Cs_shadow['ovs']
Neutron_plugin_ovs <| |> -> Cs_shadow['ovs']
L23network::L2::Bridge <| |> -> Cs_shadow['ovs']
cs_shadow { 'ovs': cib => 'ovs' }
cs_commit { 'ovs': cib => 'ovs' }
::corosync::cleanup { "p_${::neutron::params::ovs_agent_service}": }
Cs_commit['ovs'] -> ::Corosync::Cleanup["p_${::neutron::params::ovs_agent_service}"]
Cs_commit['ovs'] ~> ::Corosync::Cleanup["p_${::neutron::params::ovs_agent_service}"]
::Corosync::Cleanup["p_${::neutron::params::ovs_agent_service}"] -> Service['neutron-ovs-agent']
# OCF script for pacemaker
# and his dependences
file {'neutron-ovs-agent-ocf':
path=>'/usr/lib/ocf/resource.d/mirantis/neutron-agent-ovs',
mode => 755,
owner => root,
group => root,
source => "puppet:///modules/neutron/ocf/neutron-agent-ovs",
}
File['neutron-ovs-agent-ocf'] -> Cs_resource["p_${::neutron::params::ovs_agent_service}"]
File<| title=='neutron-logging.conf' |> ->
cs_resource { "p_${::neutron::params::ovs_agent_service}":
ensure => present,
cib => 'ovs',
primitive_class => 'ocf',
provided_by => 'mirantis',
primitive_type => 'neutron-agent-ovs',
multistate_hash => {
'type' => 'clone',
},
ms_metadata => {
'interleave' => 'true',
},
parameters => {
},
operations => {
'monitor' => {
'interval' => '20',
'timeout' => '30'
},
'start' => {
'timeout' => '480'
},
'stop' => {
'timeout' => '480'
}
},
}
case $::osfamily {
/(?i)redhat/: {
$started_status = "is running"
}
/(?i)debian/: {
$started_status = "start/running"
}
default: { fail("The $::osfamily operating system is not supported.") }
}
service { 'neutron-ovs-agent_stopped':
name => $::neutron::params::ovs_agent_service,
enable => false,
ensure => stopped,
hasstatus => false,
hasrestart => false
}
if $::osfamily =~ /(?i)debian/ {
exec { 'neutron-ovs-agent_stopped':
#todo: rewrite as script, that returns zero or wait, when it can return zero
name => "bash -c \"service ${::neutron::params::ovs_agent_service} stop || ( kill `pgrep -f neutron-openvswitch-agent` || : )\"",
onlyif => "service ${::neutron::params::ovs_agent_service} status | grep \'${started_status}\'",
path => ['/usr/bin', '/usr/sbin', '/bin', '/sbin'],
returns => [0,""]
}
}
L23network::L2::Bridge<| |> ->
Package[$ovs_agent_package] ->
Service['neutron-ovs-agent_stopped'] ->
Exec<| title=='neutron-ovs-agent_stopped' |> ->
Cs_resource["p_${::neutron::params::ovs_agent_service}"] ->
Service['neutron-ovs-agent']
service { 'neutron-ovs-agent':
name => "p_${::neutron::params::ovs_agent_service}",
enable => true,
ensure => running,
hasstatus => true,
hasrestart => false,
provider => $service_provider,
}
} else {
# NON-HA mode
service { 'neutron-ovs-agent':
name => $::neutron::params::ovs_agent_service,
enable => true,
ensure => running,
hasstatus => true,
hasrestart => true,
provider => $::neutron::params::service_provider,
}
Neutron_config<||> ~> Service['neutron-ovs-agent']
Neutron_plugin_ovs<||> ~> Service['neutron-ovs-agent']
}
Neutron_config<||> -> Service['neutron-ovs-agent']
Neutron_plugin_ovs<||> -> Service['neutron-ovs-agent']
Class[neutron::waistline] -> Service['neutron-ovs-agent']
#todo: This service must be disabled if Quantum-ovs-agent managed by pacemaker
if $::osfamily == 'redhat' {
service { 'neutron-ovs-cleanup':
name => 'neutron-ovs-cleanup',
enable => true,
ensure => stopped,# !!! Warning !!!
hasstatus => false, # !!! 'stopped' is not mistake
hasrestart => false, # !!! cleanup is simple script running once at OS boot
}
Service['neutron-ovs-agent'] -> # it's not mistate!
Service['neutron-ovs-cleanup'] -> # cleanup service after agent.
Anchor['neutron-ovs-agent-done']
}
Anchor['neutron-ovs-agent'] ->
Service['neutron-ovs-agent'] ->
Anchor['neutron-ovs-agent-done']
anchor{'neutron-ovs-agent-done': }
Anchor['neutron-ovs-agent-done'] -> Anchor<| title=='neutron-l3' |>
Anchor['neutron-ovs-agent-done'] -> Anchor<| title=='neutron-dhcp-agent' |>
}
# vim: set ts=2 sw=2 et :

View File

@ -1,4 +1,4 @@
define quantum::agents::utils::bridges { define neutron::agents::utils::bridges {
$bridge = $name $bridge = $name
if !defined(L23network::L2::Bridge[$bridge]) { if !defined(L23network::L2::Bridge[$bridge]) {
l23network::l2::bridge {$bridge: l23network::l2::bridge {$bridge:

View File

@ -0,0 +1,11 @@
class neutron::client (
$package_ensure = present
) {
include 'neutron::params'
package { 'python-neutronclient':
name => $::neutron::params::client_package_name,
ensure => $package_ensure
}
}
# vim: set ts=2 sw=2 et :

View File

@ -1,18 +1,18 @@
# #
class quantum::db::mysql ( class neutron::db::mysql (
$password, $password,
$dbname = 'quantum', $dbname = 'neutron',
$user = 'quantum', $user = 'neutron',
$host = '127.0.0.1', $host = '127.0.0.1',
$allowed_hosts = undef, $allowed_hosts = undef,
$charset = 'latin1', $charset = 'latin1',
$cluster_id = 'localzone' $cluster_id = 'localzone'
) { ) {
Class['mysql::server'] -> Class['quantum::db::mysql'] Class['mysql::server'] -> Class['neutron::db::mysql']
if $::osfamily=="Debian"{ if $::osfamily=="Debian"{
Class['quantum::db::mysql']->Package['quantum-server'] Class['neutron::db::mysql']->Package['neutron-server']
} }
require 'mysql::python' require 'mysql::python'
@ -26,7 +26,7 @@ class quantum::db::mysql (
} }
if $allowed_hosts { if $allowed_hosts {
quantum::db::mysql::host_access { $allowed_hosts: neutron::db::mysql::host_access { $allowed_hosts:
user => $user, user => $user,
password => $password, password => $password,
database => $dbname, database => $dbname,

View File

@ -1,7 +1,7 @@
# #
# Used to grant access to the quantum mysql DB # Used to grant access to the neutron mysql DB
# #
define quantum::db::mysql::host_access ($user, $password, $database) { define neutron::db::mysql::host_access ($user, $password, $database) {
database_user { "${user}@${name}": database_user { "${user}@${name}":
password_hash => mysql_password($password), password_hash => mysql_password($password),
provider => 'mysql', provider => 'mysql',

View File

@ -3,36 +3,36 @@
# [syslog_log_facility] Facility for syslog, if used. Optional. # [syslog_log_facility] Facility for syslog, if used. Optional.
# [syslog_log_level] logging level for non verbose and non debug mode. Optional. # [syslog_log_level] logging level for non verbose and non debug mode. Optional.
# #
class quantum ( class neutron (
$quantum_config = {}, $neutron_config = {},
$enabled = true, $enabled = true,
$verbose = 'False', $verbose = 'False',
$debug = 'False', $debug = 'False',
$core_plugin = 'quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2', $core_plugin = 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2',
$auth_strategy = 'keystone', $auth_strategy = 'keystone',
$log_file = '/var/log/quantum/server.log', $log_file = '/var/log/neutron/server.log',
$log_dir = '/var/log/quantum', $log_dir = '/var/log/neutron',
$use_syslog = false, $use_syslog = false,
$syslog_log_facility = 'LOCAL4', $syslog_log_facility = 'LOCAL4',
$syslog_log_level = 'WARNING', $syslog_log_level = 'WARNING',
$server_ha_mode = false, $server_ha_mode = false,
) { ) {
include 'quantum::params' include 'neutron::params'
anchor {'quantum-init':} anchor {'neutron-init':}
if ! defined(File['/etc/quantum']) { if ! defined(File['/etc/neutron']) {
file {'/etc/quantum': file {'/etc/neutron':
ensure => directory, ensure => directory,
owner => 'root', owner => 'root',
group => 'root', group => 'root',
mode => 755, mode => 755,
#require => Package['quantum'] #require => Package['neutron']
} }
} }
package {'quantum': package {'neutron':
name => $::quantum::params::package_name, name => $::neutron::params::package_name,
ensure => present ensure => present
} }
@ -41,88 +41,88 @@ class quantum (
mode => 755, mode => 755,
owner => root, owner => root,
group => root, group => root,
source => "puppet:///modules/quantum/q-agent-cleanup.py", source => "puppet:///modules/neutron/q-agent-cleanup.py",
} }
file {'quantum-root': file {'neutron-root':
path => '/etc/sudoers.d/quantum-root', path => '/etc/sudoers.d/neutron-root',
mode => 600, mode => 600,
owner => root, owner => root,
group => root, group => root,
source => "puppet:///modules/quantum/quantum-root", source => "puppet:///modules/neutron/neutron-root",
before => Package['quantum'], before => Package['neutron'],
} }
file {'/var/cache/quantum': file {'/var/cache/neutron':
ensure => directory, ensure => directory,
path => '/var/cache/quantum', path => '/var/cache/neutron',
mode => 755, mode => 755,
owner => quantum, owner => neutron,
group => quantum, group => neutron,
} }
case $quantum_config['amqp']['provider'] { case $neutron_config['amqp']['provider'] {
'rabbitmq': { 'rabbitmq': {
quantum_config { neutron_config {
'DEFAULT/rpc_backend': value => 'quantum.openstack.common.rpc.impl_kombu'; 'DEFAULT/rpc_backend': value => 'neutron.openstack.common.rpc.impl_kombu';
'DEFAULT/rabbit_userid': value => $quantum_config['amqp']['username']; 'DEFAULT/rabbit_userid': value => $neutron_config['amqp']['username'];
'DEFAULT/rabbit_password': value => $quantum_config['amqp']['passwd']; 'DEFAULT/rabbit_password': value => $neutron_config['amqp']['passwd'];
'DEFAULT/rabbit_virtual_host': value => $quantum_config['amqp']['rabbit_virtual_host']; 'DEFAULT/rabbit_virtual_host': value => $neutron_config['amqp']['rabbit_virtual_host'];
} }
if $quantum_config['amqp']['ha_mode'] { if $neutron_config['amqp']['ha_mode'] {
quantum_config { neutron_config {
'DEFAULT/rabbit_ha_queues': value => 'True'; 'DEFAULT/rabbit_ha_queues': value => 'True';
'DEFAULT/rabbit_hosts': value => $quantum_config['amqp']['hosts']; 'DEFAULT/rabbit_hosts': value => $neutron_config['amqp']['hosts'];
'DEFAULT/rabbit_host': ensure => absent; 'DEFAULT/rabbit_host': ensure => absent;
'DEFAULT/rabbit_port': ensure => absent; 'DEFAULT/rabbit_port': ensure => absent;
} }
} else { } else {
quantum_config { neutron_config {
'DEFAULT/rabbit_ha_queues': value => 'False'; 'DEFAULT/rabbit_ha_queues': value => 'False';
'DEFAULT/rabbit_hosts': ensure => absent; 'DEFAULT/rabbit_hosts': ensure => absent;
'DEFAULT/rabbit_host': value => $quantum_config['amqp']['hosts']; 'DEFAULT/rabbit_host': value => $neutron_config['amqp']['hosts'];
'DEFAULT/rabbit_port': value => $quantum_config['amqp']['port']; 'DEFAULT/rabbit_port': value => $neutron_config['amqp']['port'];
} }
} }
} }
'qpid', 'qpid-rh': { 'qpid', 'qpid-rh': {
quantum_config { neutron_config {
'DEFAULT/rpc_backend': value => 'quantum.openstack.common.rpc.impl_qpid'; 'DEFAULT/rpc_backend': value => 'neutron.openstack.common.rpc.impl_qpid';
'DEFAULT/qpid_hosts': value => $quantum_config['amqp']['hosts']; 'DEFAULT/qpid_hosts': value => $neutron_config['amqp']['hosts'];
'DEFAULT/qpid_port': value => $quantum_config['amqp']['port']; 'DEFAULT/qpid_port': value => $neutron_config['amqp']['port'];
'DEFAULT/qpid_username': value => $quantum_config['amqp']['username']; 'DEFAULT/qpid_username': value => $neutron_config['amqp']['username'];
'DEFAULT/qpid_password': value => $quantum_config['amqp']['passwd']; 'DEFAULT/qpid_password': value => $neutron_config['amqp']['passwd'];
} }
} }
} }
if $server_ha_mode { if $server_ha_mode {
$server_bind_host = $quantum_config['server']['bind_host'] $server_bind_host = $neutron_config['server']['bind_host']
} else { } else {
$server_bind_host = '0.0.0.0' $server_bind_host = '0.0.0.0'
} }
quantum_config { neutron_config {
'DEFAULT/verbose': value => $verbose; 'DEFAULT/verbose': value => $verbose;
'DEFAULT/debug': value => $debug; 'DEFAULT/debug': value => $debug;
'DEFAULT/auth_strategy': value => $auth_strategy; 'DEFAULT/auth_strategy': value => $auth_strategy;
'DEFAULT/core_plugin': value => $core_plugin; 'DEFAULT/core_plugin': value => $core_plugin;
'DEFAULT/bind_host': value => $server_bind_host; 'DEFAULT/bind_host': value => $server_bind_host;
'DEFAULT/bind_port': value => $quantum_config['server']['bind_port']; 'DEFAULT/bind_port': value => $neutron_config['server']['bind_port'];
'DEFAULT/base_mac': value => $quantum_config['L2']['base_mac']; 'DEFAULT/base_mac': value => $neutron_config['L2']['base_mac'];
'DEFAULT/mac_generation_retries': value => $quantum_config['L2']['mac_generation_retries']; 'DEFAULT/mac_generation_retries': value => $neutron_config['L2']['mac_generation_retries'];
'DEFAULT/dhcp_lease_duration': value => $quantum_config['L3']['dhcp_agent']['lease_duration']; 'DEFAULT/dhcp_lease_duration': value => $neutron_config['L3']['dhcp_agent']['lease_duration'];
'DEFAULT/allow_bulk': value => $quantum_config['server']['allow_bulk']; 'DEFAULT/allow_bulk': value => $neutron_config['server']['allow_bulk'];
'DEFAULT/allow_overlapping_ips': value => $quantum_config['L3']['allow_overlapping_ips']; 'DEFAULT/allow_overlapping_ips': value => $neutron_config['L3']['allow_overlapping_ips'];
'DEFAULT/control_exchange': value => $quantum_config['server']['control_exchange']; 'DEFAULT/control_exchange': value => $neutron_config['server']['control_exchange'];
'DEFAULT/network_auto_schedule': value => $quantum_config['L3']['network_auto_schedule']; 'DEFAULT/network_auto_schedule': value => $neutron_config['L3']['network_auto_schedule'];
'DEFAULT/router_auto_schedule': value => $quantum_config['L3']['router_auto_schedule']; 'DEFAULT/router_auto_schedule': value => $neutron_config['L3']['router_auto_schedule'];
'DEFAULT/agent_down_time': value => $quantum_config['server']['agent_down_time']; 'DEFAULT/agent_down_time': value => $neutron_config['server']['agent_down_time'];
'keystone_authtoken/auth_host': value => $quantum_config['keystone']['auth_host']; 'keystone_authtoken/auth_host': value => $neutron_config['keystone']['auth_host'];
'keystone_authtoken/auth_port': value => $quantum_config['keystone']['auth_port']; 'keystone_authtoken/auth_port': value => $neutron_config['keystone']['auth_port'];
'keystone_authtoken/auth_url': value => $quantum_config['keystone']['auth_url']; 'keystone_authtoken/auth_url': value => $neutron_config['keystone']['auth_url'];
'keystone_authtoken/admin_tenant_name': value => $quantum_config['keystone']['admin_tenant_name']; 'keystone_authtoken/admin_tenant_name': value => $neutron_config['keystone']['admin_tenant_name'];
'keystone_authtoken/admin_user': value => $quantum_config['keystone']['admin_user']; 'keystone_authtoken/admin_user': value => $neutron_config['keystone']['admin_user'];
'keystone_authtoken/admin_password': value => $quantum_config['keystone']['admin_password']; 'keystone_authtoken/admin_password': value => $neutron_config['keystone']['admin_password'];
} }
# logging for agents grabbing from stderr. It's workarround for bug in quantum-logging # logging for agents grabbing from stderr. It's workarround for bug in quantum-logging
# server givs this parameters from command line # server givs this parameters from command line
@ -134,74 +134,74 @@ class quantum (
# quantum-ovs/metadata/l3/dhcp/-agents: # quantum-ovs/metadata/l3/dhcp/-agents:
# daemon --user quantum --pidfile $pidfile "$exec --config-file /etc/$proj/$proj.conf --config-file $config &>>/var/log/$proj/$plugin.log & echo \$! > $pidfile" # daemon --user quantum --pidfile $pidfile "$exec --config-file /etc/$proj/$proj.conf --config-file $config &>>/var/log/$proj/$plugin.log & echo \$! > $pidfile"
quantum_config { neutron_config {
'DEFAULT/log_file': ensure=> absent; 'DEFAULT/log_file': ensure=> absent;
'DEFAULT/logfile': ensure=> absent; 'DEFAULT/logfile': ensure=> absent;
} }
if $use_syslog and !$debug =~ /(?i)(true|yes)/ { if $use_syslog and !$debug =~ /(?i)(true|yes)/ {
quantum_config { neutron_config {
'DEFAULT/log_dir': ensure=> absent; 'DEFAULT/log_dir': ensure=> absent;
'DEFAULT/logdir': ensure=> absent; 'DEFAULT/logdir': ensure=> absent;
'DEFAULT/log_config': value => "/etc/quantum/logging.conf"; 'DEFAULT/log_config': value => "/etc/neutron/logging.conf";
'DEFAULT/use_stderr': ensure=> absent; 'DEFAULT/use_stderr': ensure=> absent;
'DEFAULT/use_syslog': value=> true; 'DEFAULT/use_syslog': value=> true;
'DEFAULT/syslog_log_facility': value=> $syslog_log_facility; 'DEFAULT/syslog_log_facility': value=> $syslog_log_facility;
} }
file { "quantum-logging.conf": file { "neutron-logging.conf":
content => template('quantum/logging.conf.erb'), content => template('neutron/logging.conf.erb'),
path => "/etc/quantum/logging.conf", path => "/etc/neutron/logging.conf",
owner => "root", owner => "root",
group => "quantum", group => "neutron",
mode => 640, mode => 640,
} }
} else { } else {
quantum_config { neutron_config {
# logging for agents grabbing from stderr. It's workarround for bug in quantum-logging # logging for agents grabbing from stderr. It's workarround for bug in neutron-logging
'DEFAULT/use_syslog': ensure=> absent; 'DEFAULT/use_syslog': ensure=> absent;
'DEFAULT/syslog_log_facility': ensure=> absent; 'DEFAULT/syslog_log_facility': ensure=> absent;
'DEFAULT/log_config': ensure=> absent; 'DEFAULT/log_config': ensure=> absent;
# FIXME stderr should not be used unless quantum+agents init & OCF scripts would be fixed to redirect its output to stderr! # FIXME stderr should not be used unless neutron+agents init & OCF scripts would be fixed to redirect its output to stderr!
#'DEFAULT/use_stderr': value => true; #'DEFAULT/use_stderr': value => true;
'DEFAULT/use_stderr': ensure=> absent; 'DEFAULT/use_stderr': ensure=> absent;
'DEFAULT/log_dir': value => $log_dir; 'DEFAULT/log_dir': value => $log_dir;
} }
file { "quantum-logging.conf": file { "neutron-logging.conf":
content => template('quantum/logging.conf-nosyslog.erb'), content => template('neutron/logging.conf-nosyslog.erb'),
path => "/etc/quantum/logging.conf", path => "/etc/neutron/logging.conf",
owner => "root", owner => "root",
group => "quantum", group => "neutron",
mode => 640, mode => 640,
} }
} }
# We must setup logging before start services under pacemaker # We must setup logging before start services under pacemaker
File['quantum-logging.conf'] -> Service<| title == "$::quantum::params::server_service" |> File['neutron-logging.conf'] -> Service<| title == "$::neutron::params::server_service" |>
File['quantum-logging.conf'] -> Anchor<| title == 'quantum-ovs-agent' |> File['neutron-logging.conf'] -> Anchor<| title == 'neutron-ovs-agent' |>
File['quantum-logging.conf'] -> Anchor<| title == 'quantum-l3' |> File['neutron-logging.conf'] -> Anchor<| title == 'neutron-l3' |>
File['quantum-logging.conf'] -> Anchor<| title == 'quantum-dhcp-agent' |> File['neutron-logging.conf'] -> Anchor<| title == 'neutron-dhcp-agent' |>
File <| title=='/etc/quantum' |> -> File <| title=='quantum-logging.conf' |> File <| title=='/etc/neutron' |> -> File <| title=='neutron-logging.conf' |>
if defined(Anchor['quantum-server-config-done']) { if defined(Anchor['neutron-server-config-done']) {
$endpoint_quantum_main_configuration = 'quantum-server-config-done' $endpoint_neutron_main_configuration = 'neutron-server-config-done'
} else { } else {
$endpoint_quantum_main_configuration = 'quantum-init-done' $endpoint_neutron_main_configuration = 'neutron-init-done'
} }
# FIXME Workaround for FUEL-842: remove explicit --log-config from init scripts cuz it breaks logging! # FIXME Workaround for FUEL-842: remove explicit --log-config from init scripts cuz it breaks logging!
# FIXME this hack should be deleted after FUEL-842 have resolved # FIXME this hack should be deleted after FUEL-842 have resolved
exec {'init-dirty-hack': exec {'init-dirty-hack':
command => "sed -i 's/\-\-log\-config=\$loggingconf//g' /etc/init.d/quantum-*", command => "sed -i 's/\-\-log\-config=\$loggingconf//g' /etc/init.d/neutron-*",
path => ["/sbin", "/bin", "/usr/sbin", "/usr/bin"], path => ["/sbin", "/bin", "/usr/sbin", "/usr/bin"],
} }
Anchor['quantum-init'] -> Anchor['neutron-init'] ->
Package['quantum'] -> Package['neutron'] ->
Exec['init-dirty-hack'] -> Exec['init-dirty-hack'] ->
File['/var/cache/quantum'] -> File['/var/cache/neutron'] ->
Quantum_config<||> -> Neutron_config<||> ->
Quantum_api_config<||> -> Neutron_api_config<||> ->
Anchor[$endpoint_quantum_main_configuration] Anchor[$endpoint_neutron_main_configuration]
anchor {'quantum-init-done':} anchor {'neutron-init-done':}
} }
# vim: set ts=2 sw=2 et : # vim: set ts=2 sw=2 et :

View File

@ -0,0 +1,39 @@
class neutron::keystone::auth (
neutron_config = {},
$configure_endpoint = true,
$service_type = 'network',
$public_address = '127.0.0.1',
$admin_address = '127.0.0.1',
$internal_address = '127.0.0.1',
) {
keystone_user { $neutron_config['keystone']['admin_user']:
ensure => present,
password => $neutron_config['keystone']['admin_password'],
email => $neutron_config['keystone']['admin_email'],
tenant => $neutron_config['keystone']['admin_tenant_name'],
}
keystone_user_role { "${neutron_config['keystone']['admin_user']}@services":
ensure => present,
roles => 'admin',
}
Keystone_user_role["${neutron_config['keystone']['admin_user']}@services"] ~> Service <| name == 'neutron-server' |>
keystone_service { $neutron_config['keystone']['admin_user']:
ensure => present,
type => $service_type,
description => "Neutron Networking Service",
}
if $configure_endpoint {
# keystone_endpoint { "${region}/$neutron_config['keystone']['admin_user']":
keystone_endpoint { $neutron_config['keystone']['admin_user']:
region => $neutron_config['keystone']['auth_region'],
ensure => present,
public_url => "http://${public_address}:${neutron_config['server']['bind_port']}",
admin_url => "http://${admin_address}:${$neutron_config['server']['bind_port']}",
internal_url => "http://${internal_address}:${$neutron_config['server']['bind_port']}",
}
}
}

View File

@ -0,0 +1,17 @@
class neutron::network::predefined_netwoks (
$neutron_config = {},
) {
create_predefined_networks_and_routers($neutron_config)
Keystone_user_role<| title=="$auth_user@$auth_tenant"|> -> Neutron_net<| |>
Service <| title == 'keystone' |> -> Neutron_net <| |>
Anchor['neutron-plugin-ovs-done'] -> Neutron_net <| |>
neutron_floatingip_pool{'admin':
pool_size => get_floatingip_pool_size_for_admin($neutron_config)
}
Neutron_net<||> -> Neutron_floatingip_pool<||>
Neutron_subnet<||> -> Neutron_floatingip_pool<||>
Neutron_router<||> -> Neutron_floatingip_pool<||>
}
# vim: set ts=2 sw=2 et :

View File

@ -0,0 +1,25 @@
#
# Use Case: Provider Router with Private Networks
#
define neutron::network::provider_router (
$neutron_config = {},
$router_subnets = undef,
$router_extnet = undef
) {
Neutron_subnet <| |> -> Neutron_router <| |>
Service <| title == 'keystone' |> -> Neutron_router <| |>
# create router
neutron_router { $title:
ensure => present,
neutron_config=> $neutron_config,
int_subnets => $router_subnets,
ext_net => $router_extnet,
tenant => $neutron_config['keystone']['admin_tenant_name'],
auth_url => $neutron_config['keystone']['auth_url'],
auth_user => $neutron_config['keystone']['admin_user'],
auth_password => $neutron_config['keystone']['admin_password'],
auth_tenant => $neutron_config['keystone']['admin_tenant_name'],
}
}
# vim: set ts=2 sw=2 et :

View File

@ -1,7 +1,7 @@
# #
# Use Case: Provider Router with Private Networks # Use Case: Provider Router with Private Networks
# #
define quantum::network::setup ( define neutron::network::setup (
$tenant_name = 'admin', $tenant_name = 'admin',
$physnet = undef, $physnet = undef,
$network_type = 'gre', $network_type = 'gre',
@ -16,11 +16,11 @@ define quantum::network::setup (
$shared = 'False', $shared = 'False',
) { ) {
Quantum_net<||> -> Quantum_subnet<||> Neutron_net<||> -> Neutron_subnet<||>
Service <| title == 'keystone' |> -> Quantum_net <| |> Service <| title == 'keystone' |> -> Neutron_net <| |>
Service <| title == 'keystone' |> -> Quantum_subnet <| |> Service <| title == 'keystone' |> -> Neutron_subnet <| |>
# create network # create network
quantum_net { $title: neutron_net { $title:
ensure => present, ensure => present,
tenant => $tenant_name, tenant => $tenant_name,
physnet => $physnet, physnet => $physnet,
@ -38,7 +38,7 @@ define quantum::network::setup (
} }
# create subnet # create subnet
quantum_subnet { $subnet_name: neutron_subnet { $subnet_name:
ensure => present, ensure => present,
tenant => $tenant_name, tenant => $tenant_name,
cidr => $subnet_cidr, cidr => $subnet_cidr,

View File

@ -0,0 +1,83 @@
class neutron::params {
case $::osfamily {
'Debian', 'Ubuntu': {
$package_name = 'neutron-common'
$server_package = 'neutron-server'
$server_service = 'neutron-server'
$ovs_agent_package = 'neutron-plugin-openvswitch-agent'
$ovs_agent_service = 'neutron-plugin-openvswitch-agent'
$ovs_server_package = 'neutron-plugin-openvswitch'
$ovs_cleanup_service = false
$dhcp_agent_package = 'neutron-dhcp-agent'
$dhcp_agent_service = 'neutron-dhcp-agent'
$dnsmasq_packages = ['dnsmasq-base', 'dnsmasq-utils']
$isc_dhcp_packages = ['isc-dhcp-server']
$l3_agent_package = 'neutron-l3-agent'
$l3_agent_service = 'neutron-l3-agent'
$linuxbridge_agent_package = 'neutron-plugin-linuxbridge-agent'
$linuxbridge_agent_service = 'neutron-plugin-linuxbridge-agent'
$linuxbridge_server_package = 'neutron-plugin-linuxbridge'
$linuxbridge_config_file = '/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini'
$metadata_agent_package = 'neutron-metadata-agent'
$metadata_agent_service = 'neutron-metadata-agent'
$cliff_package = 'python-cliff'
$kernel_headers = "linux-headers-${::kernelrelease}"
$python_path = 'python2.7/dist-packages'
$cidr_package = 'ipcalc'
$vlan_package = 'vlan'
case $::operatingsystem {
'Debian': {
$service_provider = undef
}
default: {
$service_provider = 'upstart'
}
}
}
'RedHat': {
$package_name = 'openstack-neutron'
$server_package = false
$server_service = 'neutron-server'
$ovs_agent_package = false
$ovs_agent_service = 'neutron-openvswitch-agent'
$ovs_server_package = 'openstack-neutron-openvswitch'
$dhcp_agent_package = false
$dhcp_agent_service = 'neutron-dhcp-agent'
$dnsmasq_packages = ['dnsmasq', 'dnsmasq-utils']
$isc_dhcp_packages = ['dhcp']
$l3_agent_package = false
$l3_agent_service = 'neutron-l3-agent'
$cliff_package = 'python-cliff'
$kernel_headers = "linux-headers-${::kernelrelease}"
$python_path = 'python2.6/site-packages'
$cidr_package = "whatmask"
$vlan_package = 'vconfig'
$service_provider = undef
$linuxbridge_agent_package = 'openstack-neutron-linuxbridge'
$linuxbridge_agent_service = 'neutron-linuxbridge-agent'
$linuxbridge_server_package = 'openstack-neutron-linuxbridge'
$linuxbridge_config_file = '/etc/neutron/plugins/linuxbridge/linuxbridge_conf.ini'
$metadata_agent_service = 'neutron-metadata-agent'
}
}
}

View File

@ -0,0 +1,102 @@
class neutron::plugins::ovs (
$neutron_config = {},
) {
# todo: Remove plugin section, add plugin to server class
include 'neutron::params'
include 'l23network::params'
Anchor<| title=='neutron-server-config-done' |> ->
Anchor['neutron-plugin-ovs']
Anchor['neutron-plugin-ovs-done'] ->
Anchor<| title=='neutron-server-done' |>
anchor {'neutron-plugin-ovs':}
Neutron_plugin_ovs<||> ~> Service<| title == 'neutron-server' |>
# not need!!!
# agent starts after server
# Quantum_plugin_ovs<||> ~> Service<| title == 'neutron-ovs-agent' |>
case $neutron_config['database']['provider'] {
/(?i)mysql/: {
require 'mysql::python'
}
/(?i)postgresql/: {
$backend_package = 'python-psycopg2'
}
/(?i)sqlite/: {
$backend_package = 'python-pysqlite2'
}
defeault: {
fail('Unsupported backend configured')
}
}
if ! defined(File['/etc/neutron']) {
file {'/etc/neutron':
ensure => directory,
owner => 'root',
group => 'root',
mode => '0755',
}
}
package { 'neutron-plugin-ovs':
name => $::neutron::params::ovs_server_package,
} ->
File['/etc/neutron'] ->
file {'/etc/neutron/plugins':
ensure => directory,
mode => '0755',
} ->
file {'/etc/neutron/plugins/openvswitch':
ensure => directory,
mode => '0755',
} ->
file { '/etc/neutron/plugin.ini':
ensure => link,
target => '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini',
}
neutron_plugin_ovs {
'DATABASE/sql_connection': value => $neutron_config['database']['url'];
'DATABASE/sql_max_retries': value => $neutron_config['database']['reconnects'];
'DATABASE/reconnect_interval': value => $neutron_config['database']['reconnect_interval'];
} ->
neutron_plugin_ovs {
'OVS/integration_bridge': value => $neutron_config['L2']['integration_bridge'];
'OVS/tenant_network_type': value => $neutron_config['L2']['segmentation_type'];
'OVS/enable_tunneling': value => $neutron_config['L2']['enable_tunneling'];
'AGENT/polling_interval': value => $neutron_config['polling_interval'];
'AGENT/root_helper': value => $neutron_config['root_helper'];
}
if $neutron_config['L2']['enable_tunneling'] {
neutron_plugin_ovs {
'OVS/tunnel_bridge': value => $neutron_config['L2']['tunnel_bridge'];
'OVS/tunnel_id_ranges': value => $neutron_config['L2']['tunnel_id_ranges'];
'OVS/network_vlan_ranges': value => join(keys($neutron_config['L2']['phys_nets']), ','); # do not belive OS documentation!!!
'OVS/bridge_mappings': value => $neutron_config['L2']['bridge_mappings'];
#todo: remove ext_net from mappings. Affect NEutron
}
} else {
neutron_plugin_ovs {
'OVS/network_vlan_ranges': value => $neutron_config['L2']['network_vlan_ranges'];
'OVS/bridge_mappings': value => $neutron_config['L2']['bridge_mappings'];
'OVS/tunnel_bridge': ensure => absent;
'OVS/tunnel_id_ranges': ensure => absent;
}
}
File['/etc/neutron/plugin.ini'] ->
Neutron_plugin_ovs<||> ->
Anchor<| title=='neutron-server-config-done' |>
File['/etc/neutron/plugin.ini'] ->
Anchor['neutron-plugin-ovs-done']
Anchor['neutron-plugin-ovs'] -> Anchor['neutron-plugin-ovs-done']
anchor {'neutron-plugin-ovs-done':}
}
# vim: set ts=2 sw=2 et :

View File

@ -0,0 +1,94 @@
#
class neutron::server (
$neutron_config = {},
$primary_controller = false,
) {
include 'neutron::params'
require 'keystone::python'
Anchor['neutron-init-done'] ->
Anchor['neutron-server']
anchor {'neutron-server':}
if $::operatingsystem == 'Ubuntu' {
if $service_provider == 'pacemaker' {
file { "/etc/init/neutron-metadata-agent.override":
replace => "no",
ensure => "present",
content => "manual",
mode => 644,
before => Package['neutron-server'],
}
}
}
if $::neutron::params::server_package {
$server_package = 'neutron-server'
package {$server_package:
name => $::neutron::params::server_package,
ensure => $package_ensure
}
} else {
$server_package = 'neutron'
}
Package[$server_package] -> Neutron_config<||>
Package[$server_package] -> Neutron_api_config<||>
if defined(Anchor['neutron-plugin-ovs']) {
Package["$server_package"] -> Anchor['neutron-plugin-ovs']
}
Neutron_config<||> ~> Service['neutron-server']
Neutron_api_config<||> ~> Service['neutron-server']
neutron_api_config {
'filter:authtoken/auth_url': value => $neutron_config['keystone']['auth_url'];
'filter:authtoken/auth_host': value => $neutron_config['keystone']['auth_host'];
'filter:authtoken/auth_port': value => $neutron_config['keystone']['auth_port'];
'filter:authtoken/admin_tenant_name': value => $neutron_config['keystone']['admin_tenant_name'];
'filter:authtoken/admin_user': value => $neutron_config['keystone']['admin_user'];
'filter:authtoken/admin_password': value => $neutron_config['keystone']['admin_password'];
}
File<| title=='neutron-logging.conf' |> ->
service {'neutron-server':
name => $::neutron::params::server_service,
ensure => running,
enable => true,
hasstatus => true,
hasrestart => true,
provider => $::neutron::params::service_provider,
}
Anchor['neutron-server'] ->
Neutron_config<||> ->
Neutron_api_config<||> ->
Anchor['neutron-server-config-done'] ->
Service['neutron-server'] ->
Anchor['neutron-server-done']
# if defined(Anchor['neutron-plugin-ovs-done']) {
# Anchor['neutron-server-config-done'] ->
# Anchor['neutron-plugin-ovs-done'] ->
# Anchor['neutron-server-done']
# }
anchor {'neutron-server-config-done':}
if $primary_controller {
Anchor['neutron-server-config-done'] ->
class { 'neutron::network::predefined_netwoks':
neutron_config => $neutron_config,
} -> Anchor['neutron-server-done']
Service['neutron-server'] -> Class['neutron::network::predefined_netwoks']
}
anchor {'neutron-server-done':}
Anchor['neutron-server'] -> Anchor['neutron-server-done']
}
# vim: set ts=2 sw=2 et :

View File

@ -0,0 +1,34 @@
class neutron::waist_setup {
# pseudo class for divide up and down
include 'neutron::waistline'
if ! defined(Package[python-amqp]) {
package { 'python-amqp':
ensure => present,
}
}
if ! defined(Package[python-keystoneclient]) {
package { 'python-keystoneclient':
ensure => present,
}
}
Package[python-amqp] -> Class[neutron::waistline]
Package[python-keystoneclient] -> Class[neutron::waistline]
Nova_config<||> -> Class[neutron::waistline]
if defined(Service[keystone]) {
Service[keystone] -> Class[neutron::waistline]
}
if defined(Service[haproxy]) {
Service[haproxy] -> Class[neutron::waistline]
Haproxy_service<||> -> Class[neutron::waistline]
}
if defined(Class[neutron]) {
Class[neutron] -> Class[neutron::waistline]
}
if defined(Service[mysql-galera]) {
Service[mysql-galera] -> Class[neutron::waistline]
}
}

View File

@ -1,3 +1,3 @@
class quantum::waistline { class neutron::waistline {
# pseudo class for divide up and down # pseudo class for divide up and down
} }

View File

@ -2,7 +2,7 @@ require 'spec_helper'
require 'json' require 'json'
require 'yaml' require 'yaml'
class QuantumNRConfig class NeutronNRConfig
def initialize(init_v) def initialize(init_v)
@def_v = {} @def_v = {}
@def_v.replace(init_v) @def_v.replace(init_v)
@ -12,7 +12,7 @@ class QuantumNRConfig
'username' => "nova", 'username' => "nova",
'passwd' => "nova", 'passwd' => "nova",
'hosts' => "#{@def_v[:management_vip]}:5672", 'hosts' => "#{@def_v[:management_vip]}:5672",
'control_exchange' => "quantum", 'control_exchange' => "neutron",
'heartbeat' => 60, 'heartbeat' => 60,
'protocol' => "tcp", 'protocol' => "tcp",
'rabbit_virtual_host' => "/", 'rabbit_virtual_host' => "/",
@ -22,9 +22,9 @@ class QuantumNRConfig
'provider' => "mysql", 'provider' => "mysql",
'host' => "#{@def_v[:management_vip]}", 'host' => "#{@def_v[:management_vip]}",
'port' => 3306, 'port' => 3306,
'database' => "quantum", 'database' => "neutron",
'username' => "quantum", 'username' => "neutron",
'passwd' => "quantum", 'passwd' => "neutron",
'reconnects' => -1, 'reconnects' => -1,
'reconnect_interval' => 2, 'reconnect_interval' => 2,
'url' => nil, 'url' => nil,
@ -37,10 +37,10 @@ class QuantumNRConfig
'auth_protocol' => "http", 'auth_protocol' => "http",
'auth_api_version' => "v2.0", 'auth_api_version' => "v2.0",
'admin_tenant_name' => "services", 'admin_tenant_name' => "services",
'admin_user' => "quantum", 'admin_user' => "neutron",
'admin_password' => "quantum_pass", 'admin_password' => "neutron_pass",
'admin_email' => "quantum@localhost", 'admin_email' => "neutron@localhost",
'signing_dir' => "/var/lib/quantum/keystone-signing", 'signing_dir' => "/var/lib/neutron/keystone-signing",
}, },
'server' => { 'server' => {
'api_url' => "http://#{@def_v[:management_vip]}:9696", 'api_url' => "http://#{@def_v[:management_vip]}:9696",
@ -49,7 +49,7 @@ class QuantumNRConfig
'bind_port' => 9696, 'bind_port' => 9696,
'agent_down_time' => 15, 'agent_down_time' => 15,
'allow_bulk' => true, 'allow_bulk' => true,
'control_exchange'=> 'quantum', 'control_exchange'=> 'neutron',
}, },
'metadata' => { 'metadata' => {
'nova_metadata_ip' => "#{@def_v[:management_vip]}", 'nova_metadata_ip' => "#{@def_v[:management_vip]}",
@ -141,7 +141,7 @@ class QuantumNRConfig
}, },
}, },
'polling_interval' => 2, 'polling_interval' => 2,
'root_helper' => "sudo quantum-rootwrap /etc/quantum/rootwrap.conf" 'root_helper' => "sudo neutron-rootwrap /etc/neutron/rootwrap.conf"
} }
@def_config['keystone']['auth_url'] = "http://#{@def_v[:management_vip]}:35357/v2.0" @def_config['keystone']['auth_url'] = "http://#{@def_v[:management_vip]}:35357/v2.0"
init_v.each() do |k,v| init_v.each() do |k,v|
@ -174,13 +174,13 @@ describe 'create_predefined_networks_and_routers' , :type => :puppet_function do
# @topscope = @scope.compiler.topscope # @topscope = @scope.compiler.topscope
# @scope.parent = @topscope # @scope.parent = @topscope
# Puppet::Parser::Functions.function(:create_resources) # Puppet::Parser::Functions.function(:create_resources)
@qnr_config = QuantumNRConfig.new({ @qnr_config = NeutronNRConfig.new({
:management_vip => '192.168.0.254', :management_vip => '192.168.0.254',
:management_ip => '192.168.0.11' :management_ip => '192.168.0.11'
}) })
# Puppet::Parser::Scope.any_instance.stubs(:function_get_network_role_property).with('management', 'ipaddr').returns(@q_config.get_def(:management_ip)) # Puppet::Parser::Scope.any_instance.stubs(:function_get_network_role_property).with('management', 'ipaddr').returns(@q_config.get_def(:management_ip))
@cfg = @qnr_config.get_def_config() @cfg = @qnr_config.get_def_config()
cfg_q = @cfg['quantum_settings'] cfg_q = @cfg['neutron_settings']
# @res_cfg = Marshal.load(Marshal.dump(cfg_q)) # @res_cfg = Marshal.load(Marshal.dump(cfg_q))
end end
@ -190,11 +190,11 @@ describe 'create_predefined_networks_and_routers' , :type => :puppet_function do
# it 'should calculate auth url if auth properties not given' do # it 'should calculate auth url if auth properties not given' do
# @cfg['quantum_settings']['keystone'] = {} # @cfg['neutron_settings']['keystone'] = {}
# subject.call([@cfg, 'quantum_settings'])['keystone']['auth_url'].should == "http://192.168.0.254:35357/v2.0" # subject.call([@cfg, 'neutron_settings'])['keystone']['auth_url'].should == "http://192.168.0.254:35357/v2.0"
# end # end
# it 'should calculate auth url if some auth properties given' do # it 'should calculate auth url if some auth properties given' do
# @cfg['quantum_settings']['keystone'] = { # @cfg['neutron_settings']['keystone'] = {
# 'auth_host' => "1.2.3.4", # 'auth_host' => "1.2.3.4",
# 'auth_port' => 666, # 'auth_port' => 666,
# 'auth_region' => 'RegionOne', # 'auth_region' => 'RegionOne',
@ -203,18 +203,18 @@ describe 'create_predefined_networks_and_routers' , :type => :puppet_function do
# 'admin_tenant_name' => "xxXXxx", # 'admin_tenant_name' => "xxXXxx",
# 'admin_user' => "user_q", # 'admin_user' => "user_q",
# 'admin_password' => "pass_q", # 'admin_password' => "pass_q",
# 'admin_email' => "test.quantum@localhost", # 'admin_email' => "test.neutron@localhost",
# } # }
# subject.call([@cfg, 'quantum_settings'])['keystone']['auth_url'].should == "https://1.2.3.4:666/v10.0" # subject.call([@cfg, 'neutron_settings'])['keystone']['auth_url'].should == "https://1.2.3.4:666/v10.0"
# end # end
# it 'enable_tunneling must be True if segmentation_type is GRE' do # it 'enable_tunneling must be True if segmentation_type is GRE' do
# @cfg['quantum_settings']['L2']['segmentation_type'] = 'gre' # @cfg['neutron_settings']['L2']['segmentation_type'] = 'gre'
# subject.call([@cfg, 'quantum_settings'])['L2']['enable_tunneling'].should == true # subject.call([@cfg, 'neutron_settings'])['L2']['enable_tunneling'].should == true
# end # end
# it 'enable_tunneling must be False if segmentation_type is VLAN' do # it 'enable_tunneling must be False if segmentation_type is VLAN' do
# @cfg['quantum_settings']['L2']['segmentation_type'] = 'vlan' # @cfg['neutron_settings']['L2']['segmentation_type'] = 'vlan'
# subject.call([@cfg, 'quantum_settings'])['L2']['enable_tunneling'].should == false # subject.call([@cfg, 'neutron_settings'])['L2']['enable_tunneling'].should == false
# end # end
end end

View File

@ -2,7 +2,7 @@ require 'spec_helper'
require 'json' require 'json'
require 'yaml' require 'yaml'
class QuantumNRConfig class NeutronNRConfig
def initialize(init_v) def initialize(init_v)
@def_v = {} @def_v = {}
@def_v.replace(init_v) @def_v.replace(init_v)
@ -59,7 +59,7 @@ describe 'create_floating_ips_for_admin' , :type => :puppet_function do
let(:scope) { PuppetlabsSpec::PuppetInternals.scope } let(:scope) { PuppetlabsSpec::PuppetInternals.scope }
before :each do before :each do
@qnr_config = QuantumNRConfig.new({ @qnr_config = NeutronNRConfig.new({
:management_vip => '192.168.0.254', :management_vip => '192.168.0.254',
:management_ip => '192.168.0.11' :management_ip => '192.168.0.11'
}) })
@ -72,7 +72,7 @@ describe 'create_floating_ips_for_admin' , :type => :puppet_function do
end end
it 'Must return 10' do it 'Must return 10' do
subject.call([@cfg, 'quantum_settings']).should == 10 subject.call([@cfg, 'neutron_settings']).should == 10
# [ # [
# '10.100.100.244', # '10.100.100.244',
# '10.100.100.245', # '10.100.100.245',
@ -90,12 +90,12 @@ describe 'create_floating_ips_for_admin' , :type => :puppet_function do
it 'Must return zero' do it 'Must return zero' do
@cfg['predefined_networks']['net04_ext']['L3']['floating'] = "10.100.100.250:10.100.100.254" @cfg['predefined_networks']['net04_ext']['L3']['floating'] = "10.100.100.250:10.100.100.254"
subject.call([@cfg, 'quantum_settings']).should == 0 #[] subject.call([@cfg, 'neutron_settings']).should == 0 #[]
end end
it 'Must return array of 3 ip address' do it 'Must return array of 3 ip address' do
@cfg['predefined_networks']['net04_ext']['L3']['floating'] = "10.100.100.247:10.100.100.254" @cfg['predefined_networks']['net04_ext']['L3']['floating'] = "10.100.100.247:10.100.100.254"
subject.call([@cfg, 'quantum_settings']).should == 3 #["10.100.100.252", "10.100.100.253", "10.100.100.254"] subject.call([@cfg, 'neutron_settings']).should == 3 #["10.100.100.252", "10.100.100.253", "10.100.100.254"]
end end
end end

View File

@ -4,7 +4,7 @@ require 'yaml'
#require 'puppet/parser/functions/lib/sanitize_bool_in_hash.rb' #require 'puppet/parser/functions/lib/sanitize_bool_in_hash.rb'
class QuantumConfig class NeutronConfig
def initialize(init_v) def initialize(init_v)
@def_v = {} @def_v = {}
@def_v.replace(init_v) @def_v.replace(init_v)
@ -39,13 +39,13 @@ class QuantumConfig
'rabbit' => { 'rabbit' => {
'password' => 'nova' 'password' => 'nova'
}, },
'quantum_settings' => { 'neutron_settings' => {
'amqp' => { 'amqp' => {
'provider' => "rabbitmq", 'provider' => "rabbitmq",
'username' => "nova", 'username' => "nova",
'passwd' => "nova", 'passwd' => "nova",
'hosts' => "#{@def_v[:management_vip]}:5672", 'hosts' => "#{@def_v[:management_vip]}:5672",
'control_exchange' => "quantum", 'control_exchange' => "neutron",
'heartbeat' => 60, 'heartbeat' => 60,
'protocol' => "tcp", 'protocol' => "tcp",
'rabbit_virtual_host' => "/", 'rabbit_virtual_host' => "/",
@ -55,9 +55,9 @@ class QuantumConfig
'provider' => "mysql", 'provider' => "mysql",
'host' => "#{@def_v[:management_vip]}", 'host' => "#{@def_v[:management_vip]}",
'port' => 3306, 'port' => 3306,
'database' => "quantum", 'database' => "neutron",
'username' => "quantum", 'username' => "neutron",
'passwd' => "quantum", 'passwd' => "neutron",
'reconnects' => -1, 'reconnects' => -1,
'reconnect_interval' => 2, 'reconnect_interval' => 2,
'url' => nil, 'url' => nil,
@ -70,10 +70,10 @@ class QuantumConfig
'auth_protocol' => "http", 'auth_protocol' => "http",
'auth_api_version' => "v2.0", 'auth_api_version' => "v2.0",
'admin_tenant_name' => "services", 'admin_tenant_name' => "services",
'admin_user' => "quantum", 'admin_user' => "neutron",
'admin_password' => "quantum_pass", 'admin_password' => "neutron_pass",
'admin_email' => "quantum@localhost", 'admin_email' => "neutron@localhost",
'signing_dir' => "/var/lib/quantum/keystone-signing", 'signing_dir' => "/var/lib/neutron/keystone-signing",
}, },
'server' => { 'server' => {
'api_url' => "http://#{@def_v[:management_vip]}:9696", 'api_url' => "http://#{@def_v[:management_vip]}:9696",
@ -82,7 +82,7 @@ class QuantumConfig
'bind_port' => 9696, 'bind_port' => 9696,
'agent_down_time' => 15, 'agent_down_time' => 15,
'allow_bulk' => true, 'allow_bulk' => true,
'control_exchange'=> 'quantum', 'control_exchange'=> 'neutron',
}, },
'metadata' => { 'metadata' => {
'nova_metadata_ip' => "#{@def_v[:management_vip]}", 'nova_metadata_ip' => "#{@def_v[:management_vip]}",
@ -177,10 +177,10 @@ class QuantumConfig
}, },
}, },
'polling_interval' => 2, 'polling_interval' => 2,
'root_helper' => "sudo quantum-rootwrap /etc/quantum/rootwrap.conf", 'root_helper' => "sudo neutron-rootwrap /etc/neutron/rootwrap.conf",
}, },
} }
@def_config['quantum_settings']['keystone']['auth_url'] = "http://#{@def_v[:management_vip]}:35357/v2.0" @def_config['neutron_settings']['keystone']['auth_url'] = "http://#{@def_v[:management_vip]}:35357/v2.0"
init_v.each() do |k,v| init_v.each() do |k,v|
@def_config[k.to_s()] = v @def_config[k.to_s()] = v
end end
@ -201,7 +201,7 @@ class QuantumConfig
end end
describe 'sanitize_quantum_config' , :type => :puppet_function do describe 'sanitize_neutron_config' , :type => :puppet_function do
let(:scope) { PuppetlabsSpec::PuppetInternals.scope } let(:scope) { PuppetlabsSpec::PuppetInternals.scope }
before :each do before :each do
@ -211,102 +211,102 @@ describe 'sanitize_quantum_config' , :type => :puppet_function do
# @topscope = @scope.compiler.topscope # @topscope = @scope.compiler.topscope
# @scope.parent = @topscope # @scope.parent = @topscope
# Puppet::Parser::Functions.function(:create_resources) # Puppet::Parser::Functions.function(:create_resources)
@q_config = QuantumConfig.new({ @q_config = NeutronConfig.new({
:management_vip => '192.168.0.254', :management_vip => '192.168.0.254',
:management_ip => '192.168.0.11' :management_ip => '192.168.0.11'
}) })
Puppet::Parser::Scope.any_instance.stubs(:function_get_network_role_property).with(['management', 'ipaddr']).returns(@q_config.get_def(:management_ip)) Puppet::Parser::Scope.any_instance.stubs(:function_get_network_role_property).with(['management', 'ipaddr']).returns(@q_config.get_def(:management_ip))
Puppet::Parser::Scope.any_instance.stubs(:function_get_network_role_property).with(['mesh', 'ipaddr']).returns(@q_config.get_def(:management_ip)) Puppet::Parser::Scope.any_instance.stubs(:function_get_network_role_property).with(['mesh', 'ipaddr']).returns(@q_config.get_def(:management_ip))
@cfg = @q_config.get_def_config() @cfg = @q_config.get_def_config()
cfg_q = @cfg['quantum_settings'] cfg_q = @cfg['neutron_settings']
@res_cfg = Marshal.load(Marshal.dump(cfg_q)) @res_cfg = Marshal.load(Marshal.dump(cfg_q))
@res_cfg['L2']['enable_tunneling'] = true @res_cfg['L2']['enable_tunneling'] = true
end end
it 'should exist' do it 'should exist' do
Puppet::Parser::Functions.function('sanitize_quantum_config').should == 'function_sanitize_quantum_config' Puppet::Parser::Functions.function('sanitize_neutron_config').should == 'function_sanitize_neutron_config'
end end
# it 'should return default config if incoming hash is empty' do # it 'should return default config if incoming hash is empty' do
# @res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' # @res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
# should run.with_params({},'quantum_settings').and_return(@res_cfg) # should run.with_params({},'neutron_settings').and_return(@res_cfg)
# end # end
it 'should return default config if default config given as incoming' do it 'should return default config if default config given as incoming' do
@res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' @res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
should run.with_params(@cfg,'quantum_settings').and_return(@res_cfg) should run.with_params(@cfg,'neutron_settings').and_return(@res_cfg)
end end
it 'should substitute default values if missing required field in config (amqp)' do it 'should substitute default values if missing required field in config (amqp)' do
cfg = Marshal.load(Marshal.dump(@cfg)) cfg = Marshal.load(Marshal.dump(@cfg))
cfg['quantum_settings']['L3'].delete('dhcp_agent') cfg['neutron_settings']['L3'].delete('dhcp_agent')
res_cfg = Marshal.load(Marshal.dump(@res_cfg)) res_cfg = Marshal.load(Marshal.dump(@res_cfg))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
subject.call([@cfg, 'quantum_settings'])['amqp'].should == res_cfg['amqp'] subject.call([@cfg, 'neutron_settings'])['amqp'].should == res_cfg['amqp']
end end
it 'should substitute default values if missing required field in config (database)' do it 'should substitute default values if missing required field in config (database)' do
cfg = Marshal.load(Marshal.dump(@cfg)) cfg = Marshal.load(Marshal.dump(@cfg))
cfg['quantum_settings']['L3'].delete('dhcp_agent') cfg['neutron_settings']['L3'].delete('dhcp_agent')
res_cfg = Marshal.load(Marshal.dump(@res_cfg)) res_cfg = Marshal.load(Marshal.dump(@res_cfg))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
subject.call([@cfg, 'quantum_settings'])['database'].should == res_cfg['database'] subject.call([@cfg, 'neutron_settings'])['database'].should == res_cfg['database']
end end
it 'should substitute default values if missing required field in config (server)' do it 'should substitute default values if missing required field in config (server)' do
cfg = Marshal.load(Marshal.dump(@cfg)) cfg = Marshal.load(Marshal.dump(@cfg))
cfg['quantum_settings']['L3'].delete('dhcp_agent') cfg['neutron_settings']['L3'].delete('dhcp_agent')
res_cfg = Marshal.load(Marshal.dump(@res_cfg)) res_cfg = Marshal.load(Marshal.dump(@res_cfg))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
subject.call([@cfg, 'quantum_settings'])['server'].should == res_cfg['server'] subject.call([@cfg, 'neutron_settings'])['server'].should == res_cfg['server']
end end
it 'should substitute default values if missing required field in config (keystone)' do it 'should substitute default values if missing required field in config (keystone)' do
cfg = Marshal.load(Marshal.dump(@cfg)) cfg = Marshal.load(Marshal.dump(@cfg))
cfg['quantum_settings']['L3'].delete('dhcp_agent') cfg['neutron_settings']['L3'].delete('dhcp_agent')
res_cfg = Marshal.load(Marshal.dump(@res_cfg)) res_cfg = Marshal.load(Marshal.dump(@res_cfg))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
subject.call([@cfg, 'quantum_settings'])['keystone'].should == res_cfg['keystone'] subject.call([@cfg, 'neutron_settings'])['keystone'].should == res_cfg['keystone']
end end
it 'should substitute default values if missing required field in config (L2)' do it 'should substitute default values if missing required field in config (L2)' do
cfg = Marshal.load(Marshal.dump(@cfg)) cfg = Marshal.load(Marshal.dump(@cfg))
cfg['quantum_settings']['L3'].delete('dhcp_agent') cfg['neutron_settings']['L3'].delete('dhcp_agent')
res_cfg = Marshal.load(Marshal.dump(@res_cfg)) res_cfg = Marshal.load(Marshal.dump(@res_cfg))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
subject.call([@cfg, 'quantum_settings'])['L2'].should == res_cfg['L2'] subject.call([@cfg, 'neutron_settings'])['L2'].should == res_cfg['L2']
end end
it 'should substitute default values if missing required field in config (L3)' do it 'should substitute default values if missing required field in config (L3)' do
cfg = Marshal.load(Marshal.dump(@cfg)) cfg = Marshal.load(Marshal.dump(@cfg))
cfg['quantum_settings']['L3'].delete('dhcp_agent') cfg['neutron_settings']['L3'].delete('dhcp_agent')
res_cfg = Marshal.load(Marshal.dump(@res_cfg)) res_cfg = Marshal.load(Marshal.dump(@res_cfg))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
subject.call([@cfg, 'quantum_settings'])['L3'].should == res_cfg['L3'] subject.call([@cfg, 'neutron_settings'])['L3'].should == res_cfg['L3']
end end
it 'should substitute default values if missing required field in config (predefined_networks)' do it 'should substitute default values if missing required field in config (predefined_networks)' do
cfg = Marshal.load(Marshal.dump(@cfg)) cfg = Marshal.load(Marshal.dump(@cfg))
cfg['quantum_settings']['L3'].delete('dhcp_agent') cfg['neutron_settings']['L3'].delete('dhcp_agent')
res_cfg = Marshal.load(Marshal.dump(@res_cfg)) res_cfg = Marshal.load(Marshal.dump(@res_cfg))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
subject.call([@cfg, 'quantum_settings'])['predefined_networks'].should == res_cfg['predefined_networks'] subject.call([@cfg, 'neutron_settings'])['predefined_networks'].should == res_cfg['predefined_networks']
end end
it 'should substitute default values if missing required field in config (predefined_routers)' do it 'should substitute default values if missing required field in config (predefined_routers)' do
cfg = Marshal.load(Marshal.dump(@cfg)) cfg = Marshal.load(Marshal.dump(@cfg))
cfg['quantum_settings']['L3'].delete('dhcp_agent') cfg['neutron_settings']['L3'].delete('dhcp_agent')
res_cfg = Marshal.load(Marshal.dump(@res_cfg)) res_cfg = Marshal.load(Marshal.dump(@res_cfg))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
subject.call([@cfg, 'quantum_settings'])['predefined_routers'].should == res_cfg['predefined_routers'] subject.call([@cfg, 'neutron_settings'])['predefined_routers'].should == res_cfg['predefined_routers']
end end
it 'should calculate database url if database properties not given' do it 'should calculate database url if database properties not given' do
@cfg['quantum_settings']['database'] = {} @cfg['neutron_settings']['database'] = {}
subject.call([@cfg, 'quantum_settings'])['database']['url'].should == "mysql://quantum:quantum@192.168.0.254:3306/quantum" subject.call([@cfg, 'neutron_settings'])['database']['url'].should == "mysql://neutron:neutron@192.168.0.254:3306/neutron"
end end
it 'should calculate database url if some database properties given' do it 'should calculate database url if some database properties given' do
@cfg['quantum_settings']['database'] = { @cfg['neutron_settings']['database'] = {
'provider' => 'mysql', 'provider' => 'mysql',
'database' => 'qq_database', 'database' => 'qq_database',
'username' => 'qq_username', 'username' => 'qq_username',
@ -314,35 +314,35 @@ describe 'sanitize_quantum_config' , :type => :puppet_function do
'host' => '5.4.3.2', 'host' => '5.4.3.2',
'port' => 666, 'port' => 666,
} }
subject.call([@cfg, 'quantum_settings'])['database']['url'].should == "mysql://qq_username:qq_password@5.4.3.2:666/qq_database" subject.call([@cfg, 'neutron_settings'])['database']['url'].should == "mysql://qq_username:qq_password@5.4.3.2:666/qq_database"
end end
it 'should can substitute values in deep level' do it 'should can substitute values in deep level' do
@cfg['quantum_settings']['amqp']['provider'] = "XXXXXXXXXXxxxx" @cfg['neutron_settings']['amqp']['provider'] = "XXXXXXXXXXxxxx"
@cfg['quantum_settings']['L2']['base_mac'] = "aa:aa:aa:00:00:00" @cfg['neutron_settings']['L2']['base_mac'] = "aa:aa:aa:00:00:00"
@cfg['quantum_settings']['L2']['integration_bridge'] = "xx-xxx" @cfg['neutron_settings']['L2']['integration_bridge'] = "xx-xxx"
@cfg['quantum_settings']['L2']['local_ip'] = "9.9.9.9" @cfg['neutron_settings']['L2']['local_ip'] = "9.9.9.9"
@cfg['quantum_settings']['predefined_networks']['net04_ext']['L3']['nameservers'] = ["127.0.0.1"] @cfg['neutron_settings']['predefined_networks']['net04_ext']['L3']['nameservers'] = ["127.0.0.1"]
res_cfg = Marshal.load(Marshal.dump(@cfg['quantum_settings'])) res_cfg = Marshal.load(Marshal.dump(@cfg['neutron_settings']))
res_cfg['database']['url'] = 'mysql://quantum:quantum@192.168.0.254:3306/quantum' res_cfg['database']['url'] = 'mysql://neutron:neutron@192.168.0.254:3306/neutron'
res_cfg['L2']['enable_tunneling'] = true res_cfg['L2']['enable_tunneling'] = true
#should run.with_params(@cfg,'quantum_settings').and_return(res_cfg) #should run.with_params(@cfg,'neutron_settings').and_return(res_cfg)
subject.call([@cfg, 'quantum_settings']).should == res_cfg subject.call([@cfg, 'neutron_settings']).should == res_cfg
end end
it 'should calculate hostname if amqp host not given' do it 'should calculate hostname if amqp host not given' do
@cfg['quantum_settings']['amqp'] = { @cfg['neutron_settings']['amqp'] = {
'provider' => "rabbitmq", 'provider' => "rabbitmq",
} }
subject.call([@cfg, 'quantum_settings'])['amqp'].should == @res_cfg['amqp'] subject.call([@cfg, 'neutron_settings'])['amqp'].should == @res_cfg['amqp']
end end
it 'should calculate auth url if auth properties not given' do it 'should calculate auth url if auth properties not given' do
@cfg['quantum_settings']['keystone'] = {} @cfg['neutron_settings']['keystone'] = {}
subject.call([@cfg, 'quantum_settings'])['keystone']['auth_url'].should == "http://192.168.0.254:35357/v2.0" subject.call([@cfg, 'neutron_settings'])['keystone']['auth_url'].should == "http://192.168.0.254:35357/v2.0"
end end
it 'should calculate auth url if some auth properties given' do it 'should calculate auth url if some auth properties given' do
@cfg['quantum_settings']['keystone'] = { @cfg['neutron_settings']['keystone'] = {
'auth_host' => "1.2.3.4", 'auth_host' => "1.2.3.4",
'auth_port' => 666, 'auth_port' => 666,
'auth_region' => 'RegionOne', 'auth_region' => 'RegionOne',
@ -351,28 +351,28 @@ describe 'sanitize_quantum_config' , :type => :puppet_function do
'admin_tenant_name' => "xxXXxx", 'admin_tenant_name' => "xxXXxx",
'admin_user' => "user_q", 'admin_user' => "user_q",
'admin_password' => "pass_q", 'admin_password' => "pass_q",
'admin_email' => "test.quantum@localhost", 'admin_email' => "test.neutron@localhost",
} }
subject.call([@cfg, 'quantum_settings'])['keystone']['auth_url'].should == "https://1.2.3.4:666/v10.0" subject.call([@cfg, 'neutron_settings'])['keystone']['auth_url'].should == "https://1.2.3.4:666/v10.0"
end end
it 'enable_tunneling must be True if segmentation_type is GRE' do it 'enable_tunneling must be True if segmentation_type is GRE' do
@cfg['quantum_settings']['L2']['segmentation_type'] = 'gre' @cfg['neutron_settings']['L2']['segmentation_type'] = 'gre'
subject.call([@cfg, 'quantum_settings'])['L2']['enable_tunneling'].should == true subject.call([@cfg, 'neutron_settings'])['L2']['enable_tunneling'].should == true
end end
it 'enable_tunneling must be False if segmentation_type is VLAN' do it 'enable_tunneling must be False if segmentation_type is VLAN' do
@cfg['quantum_settings']['L2']['segmentation_type'] = 'vlan' @cfg['neutron_settings']['L2']['segmentation_type'] = 'vlan'
subject.call([@cfg, 'quantum_settings'])['L2']['enable_tunneling'].should == false subject.call([@cfg, 'neutron_settings'])['L2']['enable_tunneling'].should == false
end end
end end
require "#{File.expand_path(File.dirname(__FILE__))}/../../lib/puppet/parser/functions/sanitize_quantum_config.rb" require "#{File.expand_path(File.dirname(__FILE__))}/../../lib/puppet/parser/functions/sanitize_neutron_config.rb"
describe MrntQuantum do describe MrntNeutron do
describe '.get_keystone_auth_url' do describe '.get_keystone_auth_url' do
it 'should return right auth url' do it 'should return right auth url' do
MrntQuantum.get_keystone_auth_url({ MrntNeutron.get_keystone_auth_url({
:auth_protocol => 'http', :auth_protocol => 'http',
:auth_host => 'localhost', :auth_host => 'localhost',
:auth_port => '5000', :auth_port => '5000',
@ -383,7 +383,7 @@ describe MrntQuantum do
describe '.get_amqp_config' do describe '.get_amqp_config' do
it 'should return hash with amqp hosts declaration as string for HA mode' do it 'should return hash with amqp hosts declaration as string for HA mode' do
MrntQuantum.get_amqp_config({ MrntNeutron.get_amqp_config({
:provider => 'rabbitmq', :provider => 'rabbitmq',
:hosts => "1.2.3.4:567 , 2.3.4.5:678, 3.4.5.6,4.5.6.7:890", :hosts => "1.2.3.4:567 , 2.3.4.5:678, 3.4.5.6,4.5.6.7:890",
:port => 555, :port => 555,
@ -398,7 +398,7 @@ describe MrntQuantum do
end end
describe '.get_amqp_config' do describe '.get_amqp_config' do
it 'should return hash with amqp hosts declaration as array of string for HA mode' do it 'should return hash with amqp hosts declaration as array of string for HA mode' do
MrntQuantum.get_amqp_config({ MrntNeutron.get_amqp_config({
:provider => 'rabbitmq', :provider => 'rabbitmq',
:hosts => ['1.2.3.4:567', '2.3.4.5:678', '3.4.5.6', '4.5.6.7:890'], :hosts => ['1.2.3.4:567', '2.3.4.5:678', '3.4.5.6', '4.5.6.7:890'],
:port => 555, :port => 555,
@ -413,7 +413,7 @@ describe MrntQuantum do
end end
describe '.get_amqp_config' do describe '.get_amqp_config' do
it 'should return hash with amqp hosts declaration as array of string without ports for HA mode' do it 'should return hash with amqp hosts declaration as array of string without ports for HA mode' do
MrntQuantum.get_amqp_config({ MrntNeutron.get_amqp_config({
:provider => 'rabbitmq', :provider => 'rabbitmq',
:hosts => ['1.2.3.4', '2.3.4.5', '3.4.5.6', '4.5.6.7'], :hosts => ['1.2.3.4', '2.3.4.5', '3.4.5.6', '4.5.6.7'],
:port => 555, :port => 555,
@ -428,7 +428,7 @@ describe MrntQuantum do
end end
describe '.get_amqp_config' do describe '.get_amqp_config' do
it 'should return hash with amqp host declaration as string without port for solo mode' do it 'should return hash with amqp host declaration as string without port for solo mode' do
MrntQuantum.get_amqp_config({ MrntNeutron.get_amqp_config({
:provider => 'rabbitmq', :provider => 'rabbitmq',
:hosts => '1.2.3.4:567', :hosts => '1.2.3.4:567',
:port => 555, :port => 555,
@ -443,7 +443,7 @@ describe MrntQuantum do
end end
describe '.get_amqp_config' do describe '.get_amqp_config' do
it 'should return hash with amqp host declaration as string without port for solo mode' do it 'should return hash with amqp host declaration as string without port for solo mode' do
MrntQuantum.get_amqp_config({ MrntNeutron.get_amqp_config({
:provider => 'rabbitmq', :provider => 'rabbitmq',
:hosts => '1.2.3.4', :hosts => '1.2.3.4',
:port => 555, :port => 555,
@ -459,7 +459,7 @@ describe MrntQuantum do
describe '.get_database_url' do describe '.get_database_url' do
it 'should return database url with charset' do it 'should return database url with charset' do
MrntQuantum.get_database_url({ MrntNeutron.get_database_url({
:provider => "mysql", :provider => "mysql",
:host => "1.2.3.4", :host => "1.2.3.4",
:port => 3306, :port => 3306,
@ -472,7 +472,7 @@ describe MrntQuantum do
end end
describe '.get_database_url' do describe '.get_database_url' do
it 'should return database url without charset' do it 'should return database url without charset' do
MrntQuantum.get_database_url({ MrntNeutron.get_database_url({
:provider => "mysql", :provider => "mysql",
:host => "1.2.3.4", :host => "1.2.3.4",
:port => 3306, :port => 3306,
@ -484,7 +484,7 @@ describe MrntQuantum do
end end
describe '.get_database_url' do describe '.get_database_url' do
it 'should return sqlite url' do it 'should return sqlite url' do
MrntQuantum.get_database_url({ MrntNeutron.get_database_url({
:provider => "sqlite", :provider => "sqlite",
:database => "/var/lib/aaa/bbb/ddd.sql", :database => "/var/lib/aaa/bbb/ddd.sql",
}).should == "sqlite:///var/lib/aaa/bbb/ddd.sql" }).should == "sqlite:///var/lib/aaa/bbb/ddd.sql"
@ -492,7 +492,7 @@ describe MrntQuantum do
end end
describe '.get_database_url' do describe '.get_database_url' do
it 'should return sqlite url, with absolute path' do it 'should return sqlite url, with absolute path' do
MrntQuantum.get_database_url({ MrntNeutron.get_database_url({
:provider => "sqlite", :provider => "sqlite",
:database => "var/lib/aaa/bbb/ddd.sql", :database => "var/lib/aaa/bbb/ddd.sql",
}).should == "sqlite:///var/lib/aaa/bbb/ddd.sql" }).should == "sqlite:///var/lib/aaa/bbb/ddd.sql"
@ -501,7 +501,7 @@ describe MrntQuantum do
describe '.get_bridge_mappings' do describe '.get_bridge_mappings' do
it 'should return string with mapping bridges to OS internal physnets' do it 'should return string with mapping bridges to OS internal physnets' do
MrntQuantum.get_bridge_mappings({ MrntNeutron.get_bridge_mappings({
:phys_nets => { :phys_nets => {
:physnet1 => { :physnet1 => {
:bridge => "br-ex", :bridge => "br-ex",
@ -522,7 +522,7 @@ describe MrntQuantum do
describe '.get_network_vlan_ranges' do describe '.get_network_vlan_ranges' do
it 'should return string with mapping vlan-IDs OS internal physnets' do it 'should return string with mapping vlan-IDs OS internal physnets' do
MrntQuantum.get_network_vlan_ranges({ MrntNeutron.get_network_vlan_ranges({
:phys_nets => { :phys_nets => {
:physnet1 => { :physnet1 => {
:bridge => "br-ex", :bridge => "br-ex",
@ -543,7 +543,7 @@ describe MrntQuantum do
describe '.get_phys_bridges' do describe '.get_phys_bridges' do
it 'should return array of using phys_bridges' do it 'should return array of using phys_bridges' do
MrntQuantum.get_phys_bridges({ MrntNeutron.get_phys_bridges({
:phys_nets => { :phys_nets => {
:physnet1 => { :physnet1 => {
:bridge => "br-ex", :bridge => "br-ex",

View File

@ -1,13 +1,13 @@
require 'spec_helper' require 'spec_helper'
describe Puppet::Type.type(:quantum_floatingip_pool).provider(:quantum) do describe Puppet::Type.type(:neutron_floatingip_pool).provider(:neutron) do
let(:resource) { Puppet::Type.type(:quantum_floatingip_pool).new(:name => 'admin', :provider => :quantum) } let(:resource) { Puppet::Type.type(:neutron_floatingip_pool).new(:name => 'admin', :provider => :neutron) }
let(:provider) { resource.provider } let(:provider) { resource.provider }
describe "#instances" do describe "#instances" do
before(:each) do before(:each) do
provider.class.stubs(:quantum).with( provider.class.stubs(:neutron).with(
'--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/',
['floatingip-list', ['--format=csv', '--field=id', '--field=floating_ip_address']]).returns(''' ['floatingip-list', ['--format=csv', '--field=id', '--field=floating_ip_address']]).returns('''
"id","floating_ip_address" "id","floating_ip_address"
@ -16,7 +16,7 @@ describe Puppet::Type.type(:quantum_floatingip_pool).provider(:quantum) do
"aaebec15-b59b-4e03-9b74-e17b49ffa528","10.20.3.133" "aaebec15-b59b-4e03-9b74-e17b49ffa528","10.20.3.133"
"bce4e408-03e3-421a-80c7-a5c96a835c4e","10.20.3.136" "bce4e408-03e3-421a-80c7-a5c96a835c4e","10.20.3.136"
''') ''')
provider.class.stubs(:quantum).with( provider.class.stubs(:neutron).with(
'--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/',
['floatingip-show', ['--format', 'shell', '17029d36-72c3-4ab4-9da2-cdecc689842f']] ['floatingip-show', ['--format', 'shell', '17029d36-72c3-4ab4-9da2-cdecc689842f']]
).returns(''' ).returns('''
@ -28,7 +28,7 @@ port_id=""
router_id="" router_id=""
tenant_id="70e116e152c34eac8966f3eaa7080e89" tenant_id="70e116e152c34eac8966f3eaa7080e89"
''') ''')
provider.class.stubs(:quantum).with( provider.class.stubs(:neutron).with(
'--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/',
['floatingip-show', ['--format', 'shell', '324355f8-0992-4950-8d16-dea6d670b0fe']] ['floatingip-show', ['--format', 'shell', '324355f8-0992-4950-8d16-dea6d670b0fe']]
).returns(''' ).returns('''
@ -40,7 +40,7 @@ port_id=""
router_id="" router_id=""
tenant_id="315f150b76874b2bb07b9f03530fafc4" tenant_id="315f150b76874b2bb07b9f03530fafc4"
''') ''')
provider.class.stubs(:quantum).with( provider.class.stubs(:neutron).with(
'--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/',
['floatingip-show', ['--format', 'shell', 'aaebec15-b59b-4e03-9b74-e17b49ffa528']] ['floatingip-show', ['--format', 'shell', 'aaebec15-b59b-4e03-9b74-e17b49ffa528']]
).returns(''' ).returns('''
@ -52,7 +52,7 @@ port_id=""
router_id="" router_id=""
tenant_id="315f150b76874b2bb07b9f03530fafc4" tenant_id="315f150b76874b2bb07b9f03530fafc4"
''') ''')
provider.class.stubs(:quantum).with( provider.class.stubs(:neutron).with(
'--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/', '--os-tenant-name', 'admin', '--os-username', 'admin', '--os-password', 'admin', '--os-auth-url', 'http://10.20.1.2:5000/v2.0/',
['floatingip-show', ['--format', 'shell', 'bce4e408-03e3-421a-80c7-a5c96a835c4e']] ['floatingip-show', ['--format', 'shell', 'bce4e408-03e3-421a-80c7-a5c96a835c4e']]
).returns(''' ).returns('''
@ -88,7 +88,7 @@ tenant_id="315f150b76874b2bb07b9f03530fafc4"
# '315f150b76874b2bb07b9f03530fafc4' => 'ttt' # '315f150b76874b2bb07b9f03530fafc4' => 'ttt'
# }) # })
provider.class.stubs(:quantum_credentials).returns({ provider.class.stubs(:neutron_credentials).returns({
'auth_url' => "http://10.20.1.2:5000/v2.0/", 'auth_url' => "http://10.20.1.2:5000/v2.0/",
'admin_user' => "admin", 'admin_user' => "admin",
'admin_password' => "admin", 'admin_password' => "admin",

View File

@ -22,10 +22,10 @@ handlers = production,devel,stderr
propagate = 1 propagate = 1
[formatter_debug] [formatter_debug]
format = quantum-%(name)s %(levelname)s: %(module)s %(funcName)s %(message)s format = neutron-%(name)s %(levelname)s: %(module)s %(funcName)s %(message)s
[formatter_normal] [formatter_normal]
format = quantum-%(name)s %(levelname)s: %(message)s format = neutron-%(name)s %(levelname)s: %(message)s
[formatter_default] [formatter_default]
format=%(asctime)s %(levelname)s: %(module)s %(name)s:%(lineno)d %(funcName)s %(message)s format=%(asctime)s %(levelname)s: %(module)s %(name)s:%(lineno)d %(funcName)s %(message)s
@ -78,43 +78,43 @@ args = (sys.stdout,)
[logger_l3agent] [logger_l3agent]
handlers = l3agent handlers = l3agent
level=NOTSET level=NOTSET
qualname = quantum.agent.l3_agent qualname = neutron.agent.l3_agent
[handler_l3agent] [handler_l3agent]
class = logging.FileHandler class = logging.FileHandler
args = ('/var/log/quantum/l3.log',) args = ('/var/log/neutron/l3.log',)
formatter = default formatter = default
[logger_dhcpagent] [logger_dhcpagent]
handlers = dhcpagent handlers = dhcpagent
level=NOTSET level=NOTSET
qualname = quantum.agent.dhcp_agent qualname = neutron.agent.dhcp_agent
[handler_dhcpagent] [handler_dhcpagent]
class = logging.FileHandler class = logging.FileHandler
args = ('/var/log/quantum/dhcp.log',) args = ('/var/log/neutron/dhcp.log',)
formatter = default formatter = default
[logger_ovsagent] [logger_ovsagent]
handlers = ovsagent handlers = ovsagent
level=NOTSET level=NOTSET
qualname = quantum.plugins.openvswitch.agent.ovs_quantum_agent qualname = neutron.plugins.openvswitch.agent.ovs_neutron_agent
[handler_ovsagent] [handler_ovsagent]
class = logging.FileHandler class = logging.FileHandler
args = ('/var/log/quantum/ovs.log',) args = ('/var/log/neutron/ovs.log',)
formatter = default formatter = default
[logger_metadata] [logger_metadata]
handlers = metadata handlers = metadata
level=NOTSET level=NOTSET
qualname = quantum.agent.metadata qualname = neutron.agent.metadata
[handler_metadata] [handler_metadata]
class = logging.FileHandler class = logging.FileHandler
args = ('/var/log/quantum/metadata.log',) args = ('/var/log/neutron/metadata.log',)
formatter = default formatter = default
<% end -%> <% end -%>

View File

@ -9,7 +9,7 @@
<project>lvm</project> <project>lvm</project>
<project>memcached</project> <project>memcached</project>
<project>mysql</project> <project>mysql</project>
<project>quantum</project> <project>neutron</project>
<project>rabbitmq</project> <project>rabbitmq</project>
<project>stdlib</project> <project>stdlib</project>
<project>sysctl</project> <project>sysctl</project>

View File

@ -6,7 +6,7 @@ class nova::metadata_api (
$admin_auth_url = 'http://127.0.0.1:35357/v2.0', $admin_auth_url = 'http://127.0.0.1:35357/v2.0',
$admin_tenant_name = 'services', $admin_tenant_name = 'services',
$admin_user = 'nova', $admin_user = 'nova',
$auth_password = 'quantum_pass', $auth_password = 'neutron_pass',
$service_endpoint = '127.0.0.1', $service_endpoint = '127.0.0.1',
$listen_ip = '0.0.0.0', $listen_ip = '0.0.0.0',
$controller_nodes = ['127.0.0.1'], $controller_nodes = ['127.0.0.1'],
@ -17,8 +17,7 @@ class nova::metadata_api (
$rabbit_ha_virtual_ip= false, $rabbit_ha_virtual_ip= false,
$qpid_user = 'nova', $qpid_user = 'nova',
$qpid_password = 'qpid_pw', $qpid_password = 'qpid_pw',
$qpid_node = false, $qpid_node = false
$quantum_netnode_on_cnt= false,
) { ) {
include nova::params include nova::params
@ -63,25 +62,25 @@ class nova::metadata_api (
$memcached_servers = join(regsubst($controller_nodes, '$', ':11211'), ',') $memcached_servers = join(regsubst($controller_nodes, '$', ':11211'), ',')
nova_config {'DEFAULT/quantum_connection_host': value => $service_endpoint } nova_config {'DEFAULT/neutron_connection_host': value => $service_endpoint }
if !defined(Nova_config['DEFAULT/sql_connection']) { if !defined(Nova_config['DEFAULT/sql_connection']) {
nova_config {'DEFAULT/sql_connection': value => "mysql://nova:nova@${service_endpoint}/nova";} nova_config {'DEFAULT/sql_connection': value => "mysql://nova:nova@${service_endpoint}/nova";}
} }
#if ! $quantum_netnode_on_cnt { #if ! $quantum_netnode_on_cnt {
nova_config { nova_config {
'DEFAULT/quantum_auth_strategy': value => $auth_strategy; 'DEFAULT/neutron_auth_strategy': value => $auth_strategy;
'DEFAULT/quantum_admin_auth_url': value => $admin_auth_url; 'DEFAULT/neutron_admin_auth_url': value => $admin_auth_url;
'DEFAULT/quantum_admin_password': value => $auth_password; 'DEFAULT/neutron_admin_password': value => $auth_password;
'DEFAULT/quantum_admin_username': value => 'quantum'; 'DEFAULT/neutron_admin_username': value => 'neutron';
'DEFAULT/quantum_admin_tenant_name': value => $admin_tenant_name; 'DEFAULT/neutron_admin_tenant_name': value => $admin_tenant_name;
'DEFAULT/quantum_url': value => "http://${service_endpoint}:9696" ; 'DEFAULT/neutron_url': value => "http://${service_endpoint}:9696" ;
'DEFAULT/metadata_listen': value => $listen_ip; 'DEFAULT/metadata_listen': value => $listen_ip;
'DEFAULT/auth_strategy': value => $auth_strategy; 'DEFAULT/auth_strategy': value => $auth_strategy;
'DEFAULT/memcached_servers': value => $memcached_servers; 'DEFAULT/memcached_servers': value => $memcached_servers;
'DEFAULT/network_api_class': value => 'nova.network.quantumv2.api.API'; 'DEFAULT/network_api_class': value => 'nova.network.neutronv2.api.API'; # neutronv2 !!! not a neutron.v2
'DEFAULT/rootwrap_config': value => '/etc/nova/rootwrap.conf'; 'DEFAULT/rootwrap_config': value => '/etc/nova/rootwrap.conf';
'DEFAULT/rabbit_ha_queues': value => 'True'; 'DEFAULT/rabbit_ha_queues': value => 'True'; # todo: check HA or not, 'False' for non-HA
} }
#} #}
} }

View File

@ -110,13 +110,13 @@ class nova::network(
} }
# I don't think this is applicable to Folsom... # I don't think this is applicable to Folsom...
# If it is, the details will need changed. -jt # If it is, the details will need changed. -jt
'nova.network.quantum.manager.QuantumManager': { 'nova.network.neutron.manager.NeutronManager': {
$parameters = { fixed_range => $fixed_range, $parameters = { fixed_range => $fixed_range,
public_interface => $public_interface, public_interface => $public_interface,
} }
$resource_parameters = merge($_config_overrides, $parameters) $resource_parameters = merge($_config_overrides, $parameters)
$quantum_resource = { 'nova::network::quantum' => $resource_parameters } $neutron_resource = { 'nova::network::neutron' => $resource_parameters }
create_resources('class', $quantum_resource) create_resources('class', $neutron_resource)
} }
default: { default: {
fail("Unsupported network manager: ${nova::network_manager} The supported network managers are nova.network.manager.FlatManager, nova.network.FlatDHCPManager and nova.network.manager.VlanManager") fail("Unsupported network manager: ${nova::network_manager} The supported network managers are nova.network.manager.FlatManager, nova.network.FlatDHCPManager and nova.network.manager.VlanManager")

View File

@ -0,0 +1,27 @@
#
# == parameters
# * neutron_config: Quantum config hash.
# * neutron_auth_strategy: auth strategy used by neutron.
class nova::network::neutron (
$neutron_config = {},
$neutron_connection_host,
$neutron_auth_strategy = 'keystone',
) {
if $neutron_connection_host != 'localhost' {
nova_config { 'DEFAULT/neutron_connection_host': value => $neutron_connection_host }
}
nova_config {
'DEFAULT/network_api_class': value => 'nova.network.neutronv2.api.API'; # neutronv2 !!! not a neutron.v2
'DEFAULT/neutron_auth_strategy': value => $neutron_auth_strategy;
'DEFAULT/neutron_url': value => $neutron_config['server']['api_url'];
'DEFAULT/neutron_admin_tenant_name': value => $neutron_config['keystone']['admin_tenant_name'];
'DEFAULT/neutron_admin_username': value => $neutron_config['keystone']['admin_user'];
'DEFAULT/neutron_admin_password': value => $neutron_config['keystone']['admin_password'];
'DEFAULT/neutron_admin_auth_url': value => $neutron_config['keystone']['auth_url'];
}
}
# vim: set ts=2 sw=2 et :

View File

@ -1,27 +0,0 @@
#
# == parameters
# * quantum_config: Quantum config hash.
# * quantum_auth_strategy: auth strategy used by quantum.
class nova::network::quantum (
$quantum_config = {},
$quantum_connection_host,
$quantum_auth_strategy = 'keystone',
) {
if $quantum_connection_host != 'localhost' {
nova_config { 'DEFAULT/quantum_connection_host': value => $quantum_connection_host }
}
nova_config {
'DEFAULT/network_api_class': value => 'nova.network.quantumv2.api.API'; # quantumv2 !!! not a quantum.v2
'DEFAULT/quantum_auth_strategy': value => $quantum_auth_strategy;
'DEFAULT/quantum_url': value => $quantum_config['server']['api_url'];
'DEFAULT/quantum_admin_tenant_name': value => $quantum_config['keystone']['admin_tenant_name'];
'DEFAULT/quantum_admin_username': value => $quantum_config['keystone']['admin_user'];
'DEFAULT/quantum_admin_password': value => $quantum_config['keystone']['admin_password'];
'DEFAULT/quantum_admin_auth_url': value => $quantum_config['keystone']['auth_url'];
}
}
# vim: set ts=2 sw=2 et :

View File

@ -20,7 +20,7 @@
<project>nova</project> <project>nova</project>
<project>ntp</project> <project>ntp</project>
<project>operatingsystem</project> <project>operatingsystem</project>
<project>quantum</project> <project>neutron</project>
<project>rsyslog</project> <project>rsyslog</project>
<project>stdlib</project> <project>stdlib</project>
<project>swift</project> <project>swift</project>

View File

@ -1,851 +0,0 @@
#
# Parameter values in this file should be changed, taking into consideration your
# networking setup and desired OpenStack settings.
#
# Please consult with the latest Fuel User Guide before making edits.
#
### GENERAL CONFIG ###
# This section sets main parameters such as hostnames and IP addresses of different nodes
# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
$public_interface = 'eth1'
$public_br = 'br-ex'
# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen.
$internal_interface = 'eth0'
$internal_br = 'br-mgmt'
# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface.
$private_interface = 'eth2'
# Public and Internal VIPs. These virtual addresses are required by HA topology and will be managed by keepalived.
$internal_virtual_ip = '10.0.0.253'
# Change this IP to IP routable from your 'public' network,
# e. g. Internet or your office LAN, in which your public
# interface resides
$public_virtual_ip = '10.0.204.253'
$nodes_harr = [
{
'name' => 'master',
'role' => 'master',
'internal_address' => '10.0.0.101',
'public_address' => '10.0.204.101',
'mountpoints'=> "1 1\n2 1",
'storage_local_net_ip' => '10.0.0.101',
},
{
'name' => 'fuel-cobbler',
'role' => 'cobbler',
'internal_address' => '10.0.0.102',
'public_address' => '10.0.204.102',
'mountpoints'=> "1 1\n2 1",
'storage_local_net_ip' => '10.0.0.102',
},
{
'name' => 'fuel-controller-01',
'role' => 'primary-controller',
'internal_address' => '10.0.0.103',
'public_address' => '10.0.204.103',
'swift_zone' => 1,
'mountpoints'=> "1 1\n2 1",
'storage_local_net_ip' => '10.0.0.103',
},
{
'name' => 'fuel-controller-02',
'role' => 'controller',
'internal_address' => '10.0.0.104',
'public_address' => '10.0.204.104',
'swift_zone' => 2,
'mountpoints'=> "1 2\n 2 1",
'storage_local_net_ip' => '10.0.0.110',
},
{
'name' => 'fuel-controller-03',
'role' => 'controller',
'internal_address' => '10.0.0.105',
'public_address' => '10.0.204.105',
'swift_zone' => 3,
'mountpoints'=> "1 2\n 2 1",
'storage_local_net_ip' => '10.0.0.110',
},
{
'name' => 'fuel-compute-01',
'role' => 'compute',
'internal_address' => '10.0.0.106',
'public_address' => '10.0.204.106',
},
{
'name' => 'fuel-compute-02',
'role' => 'compute',
'internal_address' => '10.0.0.107',
'public_address' => '10.0.204.107',
},
]
$nodes = $nodes_harr
$default_gateway = '10.0.204.1'
# Specify nameservers here.
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
$dns_nameservers = ['10.0.204.1','8.8.8.8']
# Specify netmasks for internal and external networks.
$internal_netmask = '255.255.255.0'
$public_netmask = '255.255.255.0'
$node = filter_nodes($nodes,'name',$::hostname)
if empty($node) {
fail("Node $::hostname is not defined in the hash structure")
}
$internal_address = $node[0]['internal_address']
$public_address = $node[0]['public_address']
$controllers = merge_arrays(filter_nodes($nodes,'role','primary-controller'), filter_nodes($nodes,'role','controller'))
$controller_internal_addresses = nodes_to_hash($controllers,'name','internal_address')
$controller_public_addresses = nodes_to_hash($controllers,'name','public_address')
$controller_hostnames = sort(keys($controller_internal_addresses))
#Set this to anything other than pacemaker if you do not want Quantum HA
#Also, if you do not want Quantum HA, you MUST enable $quantum_network_node
#on the ONLY controller
$ha_provider = 'pacemaker'
$use_unicast_corosync = false
# Set nagios master fqdn
$nagios_master = 'nagios-server.localdomain'
## proj_name name of environment nagios configuration
$proj_name = 'test'
#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario.
$multi_host = true
# Specify different DB credentials for various services
# HA DB provided through pacemaker_mysql or galera
$mysql_root_password = 'nova'
$admin_email = 'openstack@openstack.org'
$admin_password = 'nova'
$custom_mysql_setup_class = 'pacemaker_mysql'
validate_re($mysql_custom_setup_class,'galera|pacemaker_mysql')
$keystone_db_password = 'nova'
$keystone_admin_token = 'nova'
$glance_db_password = 'nova'
$glance_user_password = 'nova'
$nova_db_password = 'nova'
$nova_user_password = 'nova'
#AMQP backend rabbitmq or qpid
$queue_provider = 'qpid'
validate_re($queue_provider, 'rabbitmq|qpid')
$rabbit_password = 'nova'
$rabbit_user = 'nova'
$swift_user_password = 'swift_pass'
$swift_shared_secret = 'changeme'
$quantum_user_password = 'quantum_pass'
$quantum_db_password = 'quantum_pass'
$quantum_db_user = 'quantum'
$quantum_db_dbname = 'quantum'
# End DB credentials section
### GENERAL CONFIG END ###
### NETWORK/QUANTUM ###
# Specify network/quantum specific settings
# Should we use quantum or nova-network(deprecated).
# Consult OpenStack documentation for differences between them.
$quantum = true
$quantum_netnode_on_cnt = true
# Specify network creation criteria:
# Should puppet automatically create networks?
$create_networks = true
# Fixed IP addresses are typically used for communication between VM instances.
$fixed_range = '10.0.198.128/27'
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
$floating_range = '10.0.204.128/28'
# These parameters are passed to the previously specified network manager , e.g. nova-manage network create.
# Not used in Quantum.
# Consult openstack docs for corresponding network manager.
# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup
$num_networks = 1
$network_size = 31
$vlan_start = 300
# Quantum
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
# the first address will be defined as an external default router,
# the second address will be attached to an uplink bridge interface,
# the remaining addresses will be utilized for the floating IP address pool.
$external_ipinfo = {}
## $external_ipinfo = {
## 'public_net_router' => '10.0.74.129',
## 'ext_bridge' => '10.0.74.130',
## 'pool_start' => '10.0.74.131',
## 'pool_end' => '10.0.74.142',
## }
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
$segment_range = '900:999'
# Set up OpenStack network manager. It is used ONLY in nova-network.
# Consult Openstack nova-network docs for possible values.
$network_manager = 'nova.network.manager.FlatDHCPManager'
# Assign floating IPs to VMs on startup automatically?
$auto_assign_floating_ip = false
# Database connection for Quantum configuration (quantum.conf)
$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${$internal_virtual_ip}/${quantum_db_dbname}"
if $quantum {
$public_int = $public_br
$internal_int = $internal_br
} else {
$public_int = $public_interface
$internal_int = $internal_interface
}
#Network configuration
stage {'netconfig':
before => Stage['main'],
}
class {'l23network': stage=> 'netconfig'}
class node_netconfig (
$mgmt_ipaddr,
$mgmt_netmask = '255.255.255.0',
$public_ipaddr = undef,
$public_netmask= '255.255.255.0',
$save_default_gateway=false,
$quantum = $quantum,
) {
if $quantum {
l23network::l3::create_br_iface {'mgmt':
interface => $internal_interface, # !!! NO $internal_int /sv !!!
bridge => $internal_br,
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
save_default_gateway => $save_default_gateway,
} ->
l23network::l3::create_br_iface {'ex':
interface => $public_interface, # !! NO $public_int /sv !!!
bridge => $public_br,
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
} else {
# nova-network mode
l23network::l3::ifconfig {$public_int:
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
l23network::l3::ifconfig {$internal_int:
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
}
}
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
}
### NETWORK/QUANTUM END ###
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
# installation. Each cluster requires a unique integer value.
# Valid identifier range is 1 to 254
$deployment_id = '79'
# Below you can enable or disable various services based on the chosen deployment topology:
### CINDER/VOLUME ###
# Should we use cinder or nova-volume(obsolete)
# Consult openstack docs for differences between them
$cinder = true
# Choose which nodes to install cinder onto
# 'compute' -> compute nodes will run cinder
# 'controller' -> controller nodes will run cinder
# 'storage' -> storage nodes will run cinder
# 'fuel-controller-XX' -> specify particular host(s) by hostname
# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address
# 'all' -> compute, controller, and storage nodes will run cinder (excluding swift and proxy nodes)
$cinder_nodes = ['controller']
#Set it to true if your want cinder-volume been installed to the host
#Otherwise it will install api and scheduler services
$manage_volumes = true
# Setup network interface, which Cinder uses to export iSCSI targets.
$cinder_iscsi_bind_addr = $internal_address
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
#Evaluate cinder node selection
if ($cinder) {
if (member($cinder_nodes,'all')) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$::hostname)) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$internal_address)) {
$is_cinder_node = true
} elsif ($node[0]['role'] =~ /controller/ ) {
$is_cinder_node = member($cinder_nodes,'controller')
} else {
$is_cinder_node = member($cinder_nodes,$node[0]['role'])
}
} else {
$is_cinder_node = false
}
### CINDER/VOLUME END ###
### GLANCE and SWIFT ###
# Which backend to use for glance
# Supported backends are "swift" and "file"
$glance_backend = 'swift'
# Use loopback device for swift:
# set 'loopback' or false
# This parameter controls where swift partitions are located:
# on physical partitions or inside loopback devices.
$swift_loopback = 'loopback'
# Which IP address to bind swift components to: e.g., which IP swift-proxy should listen on
$swift_local_net_ip = $internal_address
# IP node of controller used during swift installation
# and put into swift configs
$controller_node_public = $internal_virtual_ip
# Hash of proxies hostname|fqdn => ip mappings.
# This is used by controller_ha.pp manifests for haproxy setup
# of swift_proxy backends
$swift_proxies = $controller_internal_addresses
# Set hostname of swift_master.
# It tells on which swift proxy node to build
# *ring.gz files. Other swift proxies/storages
# will rsync them.
if $node[0]['role'] == 'primary-controller' {
$primary_proxy = true
} else {
$primary_proxy = false
}
if $node[0]['role'] == 'primary-controller' {
$primary_controller = true
} else {
$primary_controller = false
}
$master_swift_proxy_nodes = filter_nodes($nodes,'role','primary-controller')
$master_swift_proxy_ip = $master_swift_proxy_nodes[0]['internal_address']
### Glance and swift END ###
# This parameter specifies the verbosity level of log messages
# in openstack components config.
# Debug would have set DEBUG level and ignore verbose settings, if any.
# Verbose would have set INFO level messages
# In case of non debug and non verbose - WARNING, default level would have set.
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option.
$verbose = true
$debug = false
### Syslog ###
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
$use_syslog = true
# Default log level would have been used, if non verbose and non debug
$syslog_log_level = 'ERROR'
# Syslog facilities for main openstack services, choose any, may overlap if needed
# local0 is reserved for HA provisioning and orchestration services,
# local1 is reserved for openstack-dashboard
$syslog_log_facility_glance = 'LOCAL2'
$syslog_log_facility_cinder = 'LOCAL3'
$syslog_log_facility_quantum = 'LOCAL4'
$syslog_log_facility_nova = 'LOCAL6'
$syslog_log_facility_keystone = 'LOCAL7'
if $use_syslog {
class { "::openstack::logging":
stage => 'first',
role => 'client',
# use date-rfc3339 timestamps
show_timezone => true,
# log both locally include auth, and remote
log_remote => true,
log_local => true,
log_auth_local => true,
# keep four weekly log rotations, force rotate if 300M size have exceeded
rotation => 'weekly',
keep => '4',
# should be > 30M
limitsize => '300M',
# remote servers to send logs to
rservers => [{'remote_type'=>'udp', 'server'=>'master', 'port'=>'514'},],
# should be true, if client is running at virtual node
virtual => true,
# facilities
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
# Rabbit doesn't support syslog directly, should be >= syslog_log_level,
# otherwise none rabbit's messages would have gone to syslog
rabbit_log_level => $syslog_log_level,
debug => $debug,
}
}
# Example for server role class definition for remote logging node:
# class {::openstack::logging:
# role => 'server',
# log_remote => false,
# log_local => true,
# log_auth_local => true,
# rotation => 'daily',
# keep => '7',
# limitsize => '100M',
# port => '514',
# proto => 'udp',
# #high precision timespamps
# show_timezone => true,
# #should be true, if server is running at virtual node
# #virtual => false,
# }
### Syslog END ###
case $::osfamily {
"Debian": {
$rabbitmq_version_string = '2.8.7-1'
}
"RedHat": {
$rabbitmq_version_string = '2.8.7-2.el6'
}
}
#
# OpenStack packages and customized component versions to be installed.
# Use 'latest' to get the most recent ones or specify exact version if you need to install custom version.
$openstack_version = {
'keystone' => 'latest',
'glance' => 'latest',
'horizon' => 'latest',
'nova' => 'latest',
'novncproxy' => 'latest',
'cinder' => 'latest',
'rabbitmq_version' => $rabbitmq_version_string,
}
# Which package repo mirror to use. Currently "default".
# "custom" is used by Mirantis for testing purposes.
# Local puppet-managed repo option planned for future releases.
# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp,
# though it is NOT recommended.
$mirror_type = 'default'
$enable_test_repo = false
$repo_proxy = undef
#Rate Limits for cinder and Nova
#Cinder and Nova can rate-limit your requests to API services.
#These limits can be reduced for your installation or usage scenario.
#Change the following variables if you want. They are measured in requests per minute.
$nova_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
$cinder_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
#Specify desired NTP servers here.
#If you leave it undef pool.ntp.org
#will be used
$ntp_servers = ['pool.ntp.org']
class {'openstack::clocksync': ntp_servers=>$ntp_servers}
#Exec clocksync from openstack::clocksync before services
#connectinq to AMQP server are started.
Exec<| title == 'clocksync' |>->Nova::Generic_service<| |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-l3' |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-dhcp-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-ovs-plugin-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
Exec { logoutput => true }
### END OF PUBLIC CONFIGURATION PART ###
# Normally, you do not need to change anything after this string
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
type=>$mirror_type,
enable_test_repo=>$enable_test_repo,
repo_proxy=>$repo_proxy,
}
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
class { 'selinux':
mode=>"disabled",
stage=>"openstack-custom-repo"
}
}
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
$horizon_use_ssl = false
class compact_controller (
$quantum_network_node = $quantum_netnode_on_cnt
) {
class { 'openstack::controller_ha':
controller_public_addresses => $controller_public_addresses,
controller_internal_addresses => $controller_internal_addresses,
internal_address => $internal_address,
public_interface => $public_int,
internal_interface => $internal_int,
private_interface => $private_interface,
internal_virtual_ip => $internal_virtual_ip,
public_virtual_ip => $public_virtual_ip,
primary_controller => $primary_controller,
floating_range => $floating_range,
fixed_range => $fixed_range,
multi_host => $multi_host,
network_manager => $network_manager,
num_networks => $num_networks,
network_size => $network_size,
network_config => { 'vlan_start' => $vlan_start },
verbose => $verbose,
debug => $debug,
auto_assign_floating_ip => $auto_assign_floating_ip,
mysql_root_password => $mysql_root_password,
admin_email => $admin_email,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
queue_provider => $queue_provider,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
qpid_nodes => [$internal_virtual_ip],
memcached_servers => $controller_hostnames,
export_resources => false,
glance_backend => $glance_backend,
swift_proxies => $swift_proxies,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_network_node => $quantum_network_node,
quantum_netnode_on_cnt => $quantum_netnode_on_cnt,
quantum_gre_bind_addr => $quantum_gre_bind_addr,
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $is_cinder_node,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => $manage_volumes,
galera_nodes => $controller_hostnames,
custom_mysql_setup_class => $custom_mysql_setup_class,
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits,
horizon_use_ssl => $horizon_use_ssl,
use_unicast_corosync => $use_unicast_corosync,
ha_provider => $ha_provider
}
class { 'swift::keystone::auth':
password => $swift_user_password,
public_address => $public_virtual_ip,
internal_address => $internal_virtual_ip,
admin_address => $internal_virtual_ip,
}
}
# Definition of OpenStack controllers.
node /fuel-controller-[\d+]/ {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'setup'
}
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql', 'swift-proxy',
'swift-account', 'swift-container', 'swift-object',
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
class { compact_controller: }
$swift_zone = $node[0]['swift_zone']
class { 'openstack::swift::storage_node':
storage_type => $swift_loopback,
swift_zone => $swift_zone,
swift_local_net_ip => $swift_local_net_ip,
master_swift_proxy_ip => $master_swift_proxy_ip,
sync_rings => ! $primary_proxy,
cinder => $is_cinder_node,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => $manage_volumes,
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
cinder_rate_limits => $cinder_rate_limits,
debug => $debug,
verbose => $verbose,
syslog_log_level => $syslog_log_level,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
}
if $primary_proxy {
ring_devices {'all':
storages => $controllers
}
}
class { 'openstack::swift::proxy':
swift_user_password => $swift_user_password,
swift_proxies => $swift_proxies,
primary_proxy => $primary_proxy,
controller_node_address => $internal_virtual_ip,
swift_local_net_ip => $swift_local_net_ip,
master_swift_proxy_ip => $master_swift_proxy_ip,
debug => $debug,
verbose => $verbose,
syslog_log_level => $syslog_log_level,
}
Class ['openstack::swift::proxy'] -> Class['openstack::swift::storage_node']
}
# Definition of OpenStack compute nodes.
node /fuel-compute-[\d+]/ {
## Uncomment lines bellow if You want
## configure network of this nodes
## by puppet.
# class {'::node_netconfig':
# mgmt_ipaddr => $::internal_address,
# mgmt_netmask => $::internal_netmask,
# public_ipaddr => $::public_address,
# public_netmask => $::public_netmask,
# stage => 'netconfig',
# }
include stdlib
class { 'operatingsystem::checksupported':
stage => 'setup'
}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive', 'nova-compute','nova-network','libvirt'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'compute',
}
class { 'openstack::compute':
public_interface => $public_int,
private_interface => $private_interface,
internal_address => $internal_address,
libvirt_type => 'kvm',
fixed_range => $fixed_range,
network_manager => $network_manager,
network_config => { 'vlan_start' => $vlan_start },
multi_host => $multi_host,
auto_assign_floating_ip => $auto_assign_floating_ip,
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova",
queue_provider => $queue_provider,
rabbit_nodes => $controller_hostnames,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_ha_virtual_ip => $internal_virtual_ip,
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
qpid_nodes => [$internal_virtual_ip],
glance_api_servers => "${internal_virtual_ip}:9292",
vncproxy_host => $public_virtual_ip,
verbose => $verbose,
debug => $debug,
vnc_enabled => true,
nova_user_password => $nova_user_password,
cache_server_ip => $controller_hostnames,
service_endpoint => $internal_virtual_ip,
quantum => $quantum,
quantum_sql_connection => $quantum_sql_connection,
quantum_user_password => $quantum_user_password,
quantum_host => $internal_virtual_ip,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility => $syslog_log_facility_nova,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits
}
}
# Definition of OpenStack Quantum node.
node /fuel-quantum/ {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'setup'
}
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => 'none',
save_default_gateway => true,
stage => 'netconfig',
}
if ! $quantum_netnode_on_cnt {
class { 'openstack::quantum_router':
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
auth_host => $internal_virtual_ip,
nova_api_vip => $internal_virtual_ip,
internal_address => $internal_address,
public_interface => $public_int,
private_interface => $private_interface,
floating_range => $floating_range,
fixed_range => $fixed_range,
create_networks => $create_networks,
verbose => $verbose,
debug => $debug,
queue_provider => $queue_provider,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
rabbit_ha_virtual_ip => $internal_virtual_ip,
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
qpid_nodes => [$internal_virtual_ip],
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_netnode_on_cnt=> false,
quantum_network_node => true,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
external_ipinfo => $external_ipinfo,
api_bind_address => $internal_address,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
}
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $internal_virtual_ip,
before => Class['openstack::quantum_router'],
}
}
}

View File

@ -1,929 +0,0 @@
#
# Parameter values in this file should be changed, taking into consideration your
# networking setup and desired OpenStack settings.
#
# Please consult with the latest Fuel User Guide before making edits.
#
### GENERAL CONFIG ###
# This section sets main parameters such as hostnames and IP addresses of different nodes
# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
$public_interface = 'eth1'
$public_br = 'br-ex'
# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen.
$internal_interface = 'eth0'
$internal_br = 'br-mgmt'
# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface.
$private_interface = 'eth2'
# Public and Internal VIPs. These virtual addresses are required by HA topology and will be managed by keepalived.
$internal_virtual_ip = '10.0.0.253'
# Change this IP to IP routable from your 'public' network,
# e. g. Internet or your office LAN, in which your public
# interface resides
$public_virtual_ip = '10.0.204.253'
case $::operatingsystem {
'redhat' : {
$queue_provider = 'qpid'
$custom_mysql_setup_class = 'pacemaker_mysql'
}
default: {
$queue_provider='rabbitmq'
$custom_mysql_setup_class='galera'
}
}
$nodes_harr = [
{
'name' => 'master',
'role' => 'master',
'internal_address' => '10.0.0.101',
'public_address' => '10.0.204.101',
'mountpoints'=> "1 1\n2 1",
'storage_local_net_ip' => '10.0.0.101',
},
{
'name' => 'fuel-cobbler',
'role' => 'cobbler',
'internal_address' => '10.0.0.102',
'public_address' => '10.0.204.102',
'mountpoints'=> "1 1\n2 1",
'storage_local_net_ip' => '10.0.0.102',
},
{
'name' => 'fuel-controller-01',
'role' => 'primary-controller',
'internal_address' => '10.0.0.103',
'public_address' => '10.0.204.103',
'swift_zone' => 1,
'mountpoints'=> "1 1\n2 1",
'storage_local_net_ip' => '10.0.0.103',
},
{
'name' => 'fuel-controller-02',
'role' => 'controller',
'internal_address' => '10.0.0.104',
'public_address' => '10.0.204.104',
'swift_zone' => 2,
'mountpoints'=> "1 2\n 2 1",
'storage_local_net_ip' => '10.0.0.110',
},
{
'name' => 'fuel-controller-03',
'role' => 'controller',
'internal_address' => '10.0.0.105',
'public_address' => '10.0.204.105',
'swift_zone' => 3,
'mountpoints'=> "1 2\n 2 1",
'storage_local_net_ip' => '10.0.0.110',
},
{
'name' => 'fuel-compute-01',
'role' => 'compute',
'internal_address' => '10.0.0.106',
'public_address' => '10.0.204.106',
},
{
'name' => 'fuel-compute-02',
'role' => 'compute',
'internal_address' => '10.0.0.107',
'public_address' => '10.0.204.107',
},
]
$nodes = $nodes_harr
$default_gateway = '10.0.204.1'
# Specify nameservers here.
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
$dns_nameservers = ['10.0.204.1','8.8.8.8']
# Specify netmasks for internal and external networks.
$internal_netmask = '255.255.255.0'
$public_netmask = '255.255.255.0'
$node = filter_nodes($nodes,'name',$::hostname)
if empty($node) {
fail("Node $::hostname is not defined in the hash structure")
}
$internal_address = $node[0]['internal_address']
$public_address = $node[0]['public_address']
$controllers = merge_arrays(filter_nodes($nodes,'role','primary-controller'), filter_nodes($nodes,'role','controller'))
$controller_internal_addresses = nodes_to_hash($controllers,'name','internal_address')
$controller_public_addresses = nodes_to_hash($controllers,'name','public_address')
$controller_hostnames = sort(keys($controller_internal_addresses))
$controller_internal_ipaddresses = sort(values($controller_internal_addresses))
#Set this to anything other than pacemaker if you do not want Quantum HA
#Also, if you do not want Quantum HA, you MUST enable $quantum_network_node
#on the ONLY controller
$ha_provider = 'pacemaker'
$use_unicast_corosync = true
$nagios = false
# Set nagios master fqdn
$nagios_master = 'nagios-server.localdomain'
## proj_name name of environment nagios configuration
$proj_name = 'test'
#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario.
$multi_host = true
# Specify different DB credentials for various services
# HA DB provided through pacemaker_mysql or galera
$mysql_root_password = 'nova'
$admin_email = 'openstack@openstack.org'
$admin_password = 'nova'
validate_re($custom_mysql_setup_class,'galera|pacemaker_mysql')
$keystone_db_password = 'nova'
$keystone_admin_token = 'nova'
$glance_db_password = 'nova'
$glance_user_password = 'nova'
$nova_db_password = 'nova'
$nova_user_password = 'nova'
#AMQP backend rabbitmq or qpid
$queue_provider = 'qpid'
validate_re($queue_provider, 'rabbitmq|qpid')
$rabbit_password = 'nova'
$rabbit_user = 'nova'
$swift_user_password = 'swift_pass'
$swift_shared_secret = 'changeme'
$quantum_user_password = 'quantum_pass'
$quantum_db_password = 'quantum_pass'
$quantum_db_user = 'quantum'
$quantum_db_dbname = 'quantum'
# End DB credentials section
### GENERAL CONFIG END ###
### NETWORK/QUANTUM ###
# Specify network/quantum specific settings
# Should we use quantum or nova-network(deprecated).
# Consult OpenStack documentation for differences between them.
$quantum = true
$quantum_netnode_on_cnt = true
$quantum_use_namespaces = true
# a string "password" value that should be configured to authenticate requests for metadata
# from quantum-metadata-proxy to nova-api
$quantum_metadata_proxy_shared_secret = "connecting_nova-api_and_quantum-metadata-agent"
# Specify network creation criteria:
# Should puppet automatically create networks?
$create_networks = true
# Fixed IP addresses are typically used for communication between VM instances.
$fixed_range = '10.0.198.128/27'
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
$floating_range = '10.0.204.128/28'
# These parameters are passed to the previously specified network manager , e.g. nova-manage network create.
# Not used in Quantum.
# Consult openstack docs for corresponding network manager.
# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup
$num_networks = 1
$network_size = 31
$vlan_start = 300
# Quantum
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
# the first address will be defined as an external default router,
# the second address will be attached to an uplink bridge interface,
# the remaining addresses will be utilized for the floating IP address pool.
$external_ipinfo = {}
## $external_ipinfo = {
## 'public_net_router' => '10.0.74.129',
## 'ext_bridge' => '10.0.74.130',
## 'pool_start' => '10.0.74.131',
## 'pool_end' => '10.0.74.142',
## }
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
$segment_range = '900:999'
# Set up OpenStack network manager. It is used ONLY in nova-network.
# Consult Openstack nova-network docs for possible values.
$network_manager = 'nova.network.manager.FlatDHCPManager'
# Assign floating IPs to VMs on startup automatically?
$auto_assign_floating_ip = false
# Database connection for Quantum configuration (quantum.conf)
#todo: check passing following line to quantum::*
$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${$internal_virtual_ip}/${quantum_db_dbname}"
if $quantum {
$public_int = $public_br
$internal_int = $internal_br
} else {
$public_int = $public_interface
$internal_int = $internal_interface
}
$vips = { # Do not convert to ARRAY, It's can't work in 2.7
public_old => {
nic => $public_int,
ip => $public_virtual_ip,
},
management_old => {
nic => $internal_int,
ip => $internal_virtual_ip,
},
}
#Stages configuration
stage {'first': } ->
stage {'openstack-custom-repo': } ->
stage {'netconfig': } ->
stage {'corosync_setup': } ->
stage {'cluster_head': } ->
stage {'openstack-firewall': } -> Stage['main']
#Network configuration
class {'l23network': use_ovs=>$quantum, stage=> 'netconfig'}
class node_netconfig (
$mgmt_ipaddr,
$mgmt_netmask = '255.255.255.0',
$public_ipaddr = undef,
$public_netmask= '255.255.255.0',
$save_default_gateway=false,
$quantum = $quantum,
) {
if $quantum {
l23network::l3::create_br_iface {'mgmt':
interface => $internal_interface, # !!! NO $internal_int /sv !!!
bridge => $internal_br,
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
save_default_gateway => $save_default_gateway,
} ->
l23network::l3::create_br_iface {'ex':
interface => $public_interface, # !! NO $public_int /sv !!!
bridge => $public_br,
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
} else {
# nova-network mode
l23network::l3::ifconfig {$public_int:
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
l23network::l3::ifconfig {$internal_int:
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
}
}
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
}
### NETWORK/QUANTUM END ###
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
# installation. Each cluster requires a unique integer value.
# Valid identifier range is 1 to 254
$deployment_id = '79'
# Below you can enable or disable various services based on the chosen deployment topology:
### CINDER/VOLUME ###
# Should we use cinder or nova-volume(obsolete)
# Consult openstack docs for differences between them
$cinder = true
# Choose which nodes to install cinder onto
# 'compute' -> compute nodes will run cinder
# 'controller' -> controller nodes will run cinder
# 'storage' -> storage nodes will run cinder
# 'fuel-controller-XX' -> specify particular host(s) by hostname
# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address
# 'all' -> compute, controller, and storage nodes will run cinder (excluding swift and proxy nodes)
$cinder_nodes = ['controller']
#Set it to true if your want cinder-volume been installed to the host
#Otherwise it will install api and scheduler services
$manage_volumes = true
# Setup network address, which Cinder uses to export iSCSI targets.
$cinder_iscsi_bind_addr = $internal_address
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
#Evaluate cinder node selection
if ($cinder) {
if (member($cinder_nodes,'all')) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$::hostname)) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$internal_address)) {
$is_cinder_node = true
} elsif ($node[0]['role'] =~ /controller/ ) {
$is_cinder_node = member($cinder_nodes,'controller')
} else {
$is_cinder_node = member($cinder_nodes,$node[0]['role'])
}
} else {
$is_cinder_node = false
}
### CINDER/VOLUME END ###
### GLANCE and SWIFT ###
# Which backend to use for glance
# Supported backends are "swift" and "file"
$glance_backend = 'swift'
# Use loopback device for swift:
# set 'loopback' or false
# This parameter controls where swift partitions are located:
# on physical partitions or inside loopback devices.
$swift_loopback = 'loopback'
# Which IP address to bind swift components to: e.g., which IP swift-proxy should listen on
$swift_local_net_ip = $internal_address
# IP node of controller used during swift installation
# and put into swift configs
$controller_node_public = $internal_virtual_ip
# Hash of proxies hostname|fqdn => ip mappings.
# This is used by controller_ha.pp manifests for haproxy setup
# of swift_proxy backends
$swift_proxies = $controller_internal_addresses
# Set hostname of swift_master.
# It tells on which swift proxy node to build
# *ring.gz files. Other swift proxies/storages
# will rsync them.
if $node[0]['role'] == 'primary-controller' {
$primary_proxy = true
} else {
$primary_proxy = false
}
if $node[0]['role'] == 'primary-controller' {
$primary_controller = true
} else {
$primary_controller = false
}
$master_swift_proxy_nodes = filter_nodes($nodes,'role','primary-controller')
$master_swift_proxy_ip = $master_swift_proxy_nodes[0]['internal_address']
### Glance and swift END ###
# This parameter specifies the verbosity level of log messages
# in openstack components config.
# Debug would have set DEBUG level and ignore verbose settings, if any.
# Verbose would have set INFO level messages
# In case of non debug and non verbose - WARNING, default level would have set.
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option.
$verbose = true
$debug = false
### Syslog ###
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
$use_syslog = true
# Default log level would have been used, if non verbose and non debug
$syslog_log_level = 'ERROR'
# Syslog facilities for main openstack services, choose any, may overlap if needed
# local0 is reserved for HA provisioning and orchestration services,
# local1 is reserved for openstack-dashboard
$syslog_log_facility_glance = 'LOCAL2'
$syslog_log_facility_cinder = 'LOCAL3'
$syslog_log_facility_quantum = 'LOCAL4'
$syslog_log_facility_nova = 'LOCAL6'
$syslog_log_facility_keystone = 'LOCAL7'
if $use_syslog {
class { "::openstack::logging":
stage => 'first',
role => 'client',
# use date-rfc3339 timestamps
show_timezone => true,
# log both locally include auth, and remote
log_remote => true,
log_local => true,
log_auth_local => true,
# keep four weekly log rotations, force rotate if 300M size have exceeded
rotation => 'weekly',
keep => '4',
# should be > 30M
limitsize => '300M',
# remote servers to send logs to
rservers => [{'remote_type'=>'udp', 'server'=>'master', 'port'=>'514'},],
# should be true, if client is running at virtual node
virtual => true,
# facilities
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
# Rabbit doesn't support syslog directly, should be >= syslog_log_level,
# otherwise none rabbit's messages would have gone to syslog
rabbit_log_level => $syslog_log_level,
debug => $debug,
}
}
# Example for server role class definition for remote logging node:
# class {::openstack::logging:
# role => 'server',
# log_remote => false,
# log_local => true,
# log_auth_local => true,
# rotation => 'daily',
# keep => '7',
# limitsize => '100M',
# port => '514',
# proto => 'udp',
# #high precision timespamps
# show_timezone => true,
# #should be true, if server is running at virtual node
# #virtual => false,
# }
### Syslog END ###
case $::osfamily {
"Debian": {
$rabbitmq_version_string = '2.8.7-1'
}
"RedHat": {
$rabbitmq_version_string = '2.8.7-2.el6'
}
}
#
# OpenStack packages and customized component versions to be installed.
# Use 'latest' to get the most recent ones or specify exact version if you need to install custom version.
$openstack_version = {
'keystone' => 'latest',
'glance' => 'latest',
'horizon' => 'latest',
'nova' => 'latest',
'novncproxy' => 'latest',
'cinder' => 'latest',
'rabbitmq_version' => $rabbitmq_version_string,
}
# Which package repo mirror to use. Currently "default".
# "custom" is used by Mirantis for testing purposes.
# Local puppet-managed repo option planned for future releases.
# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp,
# though it is NOT recommended.
$mirror_type = 'default'
$enable_test_repo = false
$repo_proxy = undef
#Rate Limits for cinder and Nova
#Cinder and Nova can rate-limit your requests to API services.
#These limits can be reduced for your installation or usage scenario.
#Change the following variables if you want. They are measured in requests per minute.
$nova_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
$cinder_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
Exec { logoutput => true }
#Specify desired NTP servers here.
#If you leave it undef pool.ntp.org
#will be used
$ntp_servers = ['pool.ntp.org']
class {'openstack::clocksync': ntp_servers=>$ntp_servers}
#Exec clocksync from openstack::clocksync before services
#connectinq to AMQP server are started.
Exec<| title == 'clocksync' |>->Nova::Generic_service<| |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-l3' |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-dhcp-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-ovs-plugin-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
### END OF PUBLIC CONFIGURATION PART ###
# Normally, you do not need to change anything after this string
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
type=>$mirror_type,
enable_test_repo=>$enable_test_repo,
repo_proxy=>$repo_proxy,
}
class { '::openstack::firewall':
stage => 'openstack-firewall'
}
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
class { 'selinux':
mode=>"disabled",
stage=>"openstack-custom-repo"
}
}
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
$horizon_use_ssl = false
# Class for calling corosync::virtual_ip in the specifis stage
$vip_keys = keys($vips)
class virtual_ips () {
cluster::virtual_ips { $vip_keys:
vips => $vips,
}
}
class compact_controller (
$quantum_network_node = $quantum_netnode_on_cnt
) {
class { 'openstack::controller_ha':
controller_public_addresses => $controller_public_addresses,
controller_internal_addresses => $controller_internal_addresses,
internal_address => $internal_address,
public_interface => $public_int,
internal_interface => $internal_int,
private_interface => $private_interface,
internal_virtual_ip => $internal_virtual_ip,
public_virtual_ip => $public_virtual_ip,
primary_controller => $primary_controller,
floating_range => $floating_range,
fixed_range => $fixed_range,
multi_host => $multi_host,
network_manager => $network_manager,
num_networks => $num_networks,
network_size => $network_size,
network_config => { 'vlan_start' => $vlan_start },
verbose => $verbose,
debug => $debug,
auto_assign_floating_ip => $auto_assign_floating_ip,
mysql_root_password => $mysql_root_password,
admin_email => $admin_email,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
queue_provider => $queue_provider,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
qpid_nodes => [$internal_virtual_ip],
memcached_servers => $controller_hostnames,
export_resources => false,
glance_backend => $glance_backend,
swift_proxies => $swift_proxies,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_network_node => $quantum_network_node,
quantum_netnode_on_cnt => $quantum_netnode_on_cnt,
quantum_gre_bind_addr => $quantum_gre_bind_addr,
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
galera_nodes => $controller_hostnames,
custom_mysql_setup_class => $custom_mysql_setup_class,
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits,
horizon_use_ssl => $horizon_use_ssl,
use_unicast_corosync => $use_unicast_corosync,
ha_provider => $ha_provider
}
class { 'swift::keystone::auth':
password => $swift_user_password,
public_address => $public_virtual_ip,
internal_address => $internal_virtual_ip,
admin_address => $internal_virtual_ip,
}
}
# Definition of OpenStack controller nodes.
node /fuel-controller-[\d+]/ {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'first'
}
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
if $nagios {
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql', 'swift-proxy',
'swift-account', 'swift-container', 'swift-object',
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
}
###
# cluster init
class { '::cluster': stage => 'corosync_setup' } ->
class { 'virtual_ips':
stage => 'corosync_setup'
}
include ::haproxy::params
class { 'cluster::haproxy':
global_options => merge($::haproxy::params::global_options, {'log' => "/dev/log local0"}),
defaults_options => merge($::haproxy::params::defaults_options, {'mode' => 'http'}),
stage => 'cluster_head',
}
#
###
class { compact_controller: }
$swift_zone = $node[0]['swift_zone']
class { 'openstack::swift::storage_node':
storage_type => $swift_loopback,
swift_zone => $swift_zone,
swift_local_net_ip => $swift_local_net_ip,
master_swift_proxy_ip => $master_swift_proxy_ip,
sync_rings => ! $primary_proxy,
#disable cinder in storage-node in order to avoid
#duplicate classes call with different parameters
cinder => false,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => false,
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
cinder_rate_limits => $cinder_rate_limits,
queue_provider => $queue_provider,
rabbit_nodes => $controller_hostnames,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_ha_virtual_ip => $internal_virtual_ip,
debug => $debug,
verbose => $verbose,
syslog_log_level => $syslog_log_level,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
qpid_nodes => [$internal_virtual_ip],
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
}
if $primary_proxy {
ring_devices {'all':
storages => $controllers
}
}
class { 'openstack::swift::proxy':
swift_user_password => $swift_user_password,
swift_proxies => $swift_proxies,
primary_proxy => $primary_proxy,
controller_node_address => $internal_virtual_ip,
swift_local_net_ip => $swift_local_net_ip,
master_swift_proxy_ip => $master_swift_proxy_ip,
debug => $debug,
verbose => $verbose,
syslog_log_level => $syslog_log_level,
}
Class ['openstack::swift::proxy'] -> Class['openstack::swift::storage_node']
}
# Definition of OpenStack compute nodes.
node /fuel-compute-[\d+]/ {
## Uncomment lines bellow if You want
## configure network of this nodes
## by puppet.
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
include stdlib
class { 'operatingsystem::checksupported':
stage => 'first'
}
if $nagios {
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive', 'nova-compute','nova-network','libvirt'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'compute',
}
}
class { 'openstack::compute':
public_interface => $public_int,
private_interface => $private_interface,
internal_address => $internal_address,
libvirt_type => 'kvm',
fixed_range => $fixed_range,
network_manager => $network_manager,
network_config => { 'vlan_start' => $vlan_start },
multi_host => $multi_host,
auto_assign_floating_ip => $auto_assign_floating_ip,
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova",
queue_provider => $queue_provider,
rabbit_nodes => $controller_hostnames,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_ha_virtual_ip => $internal_virtual_ip,
qpid_nodes => [$internal_virtual_ip],
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
glance_api_servers => "${internal_virtual_ip}:9292",
vncproxy_host => $public_virtual_ip,
verbose => $verbose,
debug => $debug,
vnc_enabled => true,
nova_user_password => $nova_user_password,
cache_server_ip => $controller_hostnames,
service_endpoint => $internal_virtual_ip,
quantum => $quantum,
quantum_sql_connection => $quantum_sql_connection,
quantum_user_password => $quantum_user_password,
quantum_host => $internal_virtual_ip,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
cinder_rate_limits => $cinder_rate_limits,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility => $syslog_log_facility_nova,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
nova_rate_limits => $nova_rate_limits,
}
}
# Definition of OpenStack Quantum node.
node /fuel-quantum/ {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'first'
}
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => 'none',
save_default_gateway => true,
stage => 'netconfig',
}
if ! $quantum_netnode_on_cnt {
class { 'openstack::quantum_router':
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
auth_host => $internal_virtual_ip,
nova_api_vip => $internal_virtual_ip,
internal_address => $internal_address,
public_interface => $public_int,
private_interface => $private_interface,
floating_range => $floating_range,
fixed_range => $fixed_range,
create_networks => $create_networks,
verbose => $verbose,
debug => $debug,
queue_provider => $queue_provider,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
rabbit_ha_virtual_ip => $internal_virtual_ip,
qpid_nodes => [$internal_virtual_ip],
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_netnode_on_cnt=> false,
quantum_network_node => true,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
external_ipinfo => $external_ipinfo,
api_bind_address => $internal_address,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
}
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $internal_virtual_ip,
before => Class['openstack::quantum_router'],
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,833 +0,0 @@
#
# Parameter values in this file should be changed, taking into consideration your
# networking setup and desired OpenStack settings.
#
# Please consult with the latest Fuel User Guide before making edits.
#
### GENERAL CONFIG ###
# This section sets main parameters such as hostnames and IP addresses of different nodes
# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
$public_interface = 'eth1'
$public_br = 'br-ex'
# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen.
$internal_interface = 'eth0'
$internal_br = 'br-mgmt'
# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface.
$private_interface = 'eth2'
# Public and Internal VIPs. These virtual addresses are required by HA topology and will be managed by keepalived.
$internal_virtual_ip = '10.0.0.253'
# Change this IP to IP routable from your 'public' network,
# e. g. Internet or your office LAN, in which your public
# interface resides
$public_virtual_ip = '10.0.204.253'
case $::operatingsystem {
'redhat' : {
$queue_provider = 'qpid'
$custom_mysql_setup_class = 'pacemaker_mysql'
}
default: {
$queue_provider='rabbitmq'
$custom_mysql_setup_class='galera'
}
}
$nodes_harr = [
{
'name' => 'master',
'role' => 'master',
'internal_address' => '10.0.0.101',
'public_address' => '10.0.204.101',
},
{
'name' => 'fuel-cobbler',
'role' => 'cobbler',
'internal_address' => '10.0.0.102',
'public_address' => '10.0.204.102',
},
{
'name' => 'fuel-controller-01',
'role' => 'primary-controller',
'internal_address' => '10.0.0.103',
'public_address' => '10.0.204.103',
},
{
'name' => 'fuel-controller-02',
'role' => 'controller',
'internal_address' => '10.0.0.104',
'public_address' => '10.0.204.104',
},
{
'name' => 'fuel-controller-03',
'role' => 'controller',
'internal_address' => '10.0.0.105',
'public_address' => '10.0.204.105',
},
{
'name' => 'fuel-compute-01',
'role' => 'compute',
'internal_address' => '10.0.0.106',
'public_address' => '10.0.204.106',
},
{
'name' => 'fuel-compute-02',
'role' => 'compute',
'internal_address' => '10.0.0.107',
'public_address' => '10.0.204.107',
},
{
'name' => 'fuel-compute-03',
'role' => 'compute',
'internal_address' => '10.0.0.108',
'public_address' => '10.0.204.108',
},]
$nodes = $nodes_harr
$default_gateway = '10.0.204.1'
# Specify nameservers here.
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
$dns_nameservers = ['10.0.204.1','8.8.8.8']
# Specify netmasks for internal and external networks.
$internal_netmask = '255.255.255.0'
$public_netmask = '255.255.255.0'
$node = filter_nodes($nodes,'name',$::hostname)
if empty($node) {
fail("Node $::hostname is not defined in the hash structure")
}
$internal_address = $node[0]['internal_address']
$public_address = $node[0]['public_address']
$controllers = merge_arrays(filter_nodes($nodes,'role','primary-controller'), filter_nodes($nodes,'role','controller'))
$controller_internal_addresses = nodes_to_hash($controllers,'name','internal_address')
$controller_public_addresses = nodes_to_hash($controllers,'name','public_address')
$controller_hostnames = sort(keys($controller_internal_addresses))
$controller_internal_ipaddresses = sort(values($controller_internal_addresses))
#Set this to anything other than pacemaker if you do not want Quantum HA
#Also, if you do not want Quantum HA, you MUST enable $quantum_network_node
#on the ONLY controller
$ha_provider = 'pacemaker'
$use_unicast_corosync = true
$nagios = false
# Set nagios master fqdn
$nagios_master = 'nagios-server.localdomain'
## proj_name name of environment nagios configuration
$proj_name = 'test'
#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario.
$multi_host = true
# Specify different DB credentials for various services
# HA DB provided through pacemaker_mysql or galera
$mysql_root_password = 'nova'
$admin_email = 'openstack@openstack.org'
$admin_password = 'nova'
validate_re($custom_mysql_setup_class,'galera|pacemaker_mysql')
$keystone_db_password = 'nova'
$keystone_admin_token = 'nova'
$glance_db_password = 'nova'
$glance_user_password = 'nova'
$nova_db_password = 'nova'
$nova_user_password = 'nova'
#AMQP backend rabbitmq or qpid
validate_re($queue_provider, 'rabbitmq|qpid')
$rabbit_password = 'nova'
$rabbit_user = 'nova'
$quantum_user_password = 'quantum_pass'
$quantum_db_password = 'quantum_pass'
$quantum_db_user = 'quantum'
$quantum_db_dbname = 'quantum'
# End DB credentials section
### GENERAL CONFIG END ###
### NETWORK/QUANTUM ###
# Specify network/quantum specific settings
# Should we use quantum or nova-network(deprecated).
# Consult OpenStack documentation for differences between them.
$quantum = true
$quantum_netnode_on_cnt = true
$quantum_use_namespaces = true
# a string "password" value that should be configured to authenticate requests for metadata
# from quantum-metadata-proxy to nova-api
$quantum_metadata_proxy_shared_secret = "connecting_nova-api_and_quantum-metadata-agent"
# Specify network creation criteria:
# Should puppet automatically create networks?
$create_networks = true
# Fixed IP addresses are typically used for communication between VM instances.
$fixed_range = '10.0.198.128/27'
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
$floating_range = '10.0.204.128/28'
# These parameters are passed to the previously specified network manager , e.g. nova-manage network create.
# Not used in Quantum.
# Consult openstack docs for corresponding network manager.
# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup
$num_networks = 1
$network_size = 31
$vlan_start = 300
# Quantum
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
# the first address will be defined as an external default router,
# the second address will be attached to an uplink bridge interface,
# the remaining addresses will be utilized for the floating IP address pool.
$external_ipinfo = {}
## $external_ipinfo = {
## 'public_net_router' => '10.0.74.129',
## 'ext_bridge' => '10.0.74.130',
## 'pool_start' => '10.0.74.131',
## 'pool_end' => '10.0.74.142',
## }
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
$segment_range = '900:999'
# Set up OpenStack network manager. It is used ONLY in nova-network.
# Consult Openstack nova-network docs for possible values.
$network_manager = 'nova.network.manager.FlatDHCPManager'
# Assign floating IPs to VMs on startup automatically?
$auto_assign_floating_ip = false
# Database connection for Quantum configuration (quantum.conf)
#todo: check passing following line to quantum::*
$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${$internal_virtual_ip}/${quantum_db_dbname}"
if $quantum {
$public_int = $public_br
$internal_int = $internal_br
} else {
$public_int = $public_interface
$internal_int = $internal_interface
}
$vips = { # Do not convert to ARRAY, It's can't work in 2.7
public_old => {
nic => $public_int,
ip => $public_virtual_ip,
},
management_old => {
nic => $internal_int,
ip => $internal_virtual_ip,
},
}
#Stages configuration
stage {'first': } ->
stage {'openstack-custom-repo': } ->
stage {'netconfig': } ->
stage {'corosync_setup': } ->
stage {'cluster_head': } ->
stage {'openstack-firewall': } -> Stage['main']
#Network configuration
class {'l23network': use_ovs=>$quantum, stage=> 'netconfig'}
class node_netconfig (
$mgmt_ipaddr,
$mgmt_netmask = '255.255.255.0',
$public_ipaddr = undef,
$public_netmask= '255.255.255.0',
$save_default_gateway=false,
$quantum = $quantum,
) {
if $quantum {
l23network::l3::create_br_iface {'mgmt':
interface => $internal_interface, # !!! NO $internal_int /sv !!!
bridge => $internal_br,
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
save_default_gateway => $save_default_gateway,
} ->
l23network::l3::create_br_iface {'ex':
interface => $public_interface, # !! NO $public_int /sv !!!
bridge => $public_br,
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
} else {
# nova-network mode
l23network::l3::ifconfig {$public_int:
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
l23network::l3::ifconfig {$internal_int:
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
}
}
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
}
### NETWORK/QUANTUM END ###
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
# installation. Each cluster requires a unique integer value.
# Valid identifier range is 1 to 254
$deployment_id = '89'
# Below you can enable or disable various services based on the chosen deployment topology:
### CINDER/VOLUME ###
# Should we use cinder or nova-volume(obsolete)
# Consult openstack docs for differences between them
$cinder = true
# Choose which nodes to install cinder onto
# 'compute' -> compute nodes will run cinder
# 'controller' -> controller nodes will run cinder
# 'storage' -> storage nodes will run cinder
# 'fuel-controller-XX' -> specify particular host(s) by hostname
# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address
# 'all' -> compute, controller, and storage nodes will run cinder (excluding swift and proxy nodes)
$cinder_nodes = ['controller']
#Set it to true if your want cinder-volume been installed to the host
#Otherwise it will install api and scheduler services
$manage_volumes = true
# Setup network address, which Cinder uses to export iSCSI targets.
$cinder_iscsi_bind_addr = $internal_address
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
#Evaluate cinder node selection
if ($cinder) {
if (member($cinder_nodes,'all')) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$::hostname)) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$internal_address)) {
$is_cinder_node = true
} elsif ($node[0]['role'] =~ /controller/ ) {
$is_cinder_node = member($cinder_nodes,'controller')
} else {
$is_cinder_node = member($cinder_nodes,$node[0]['role'])
}
} else {
$is_cinder_node = false
}
### CINDER/VOLUME END ###
### GLANCE ###
# Which backend to use for glance
# Supported backends are "swift" and "file"
$glance_backend = 'file'
# Use loopback device for swift:
# set 'loopback' or false
# This parameter controls where swift partitions are located:
# on physical partitions or inside loopback devices.
$swift_loopback = false
### Glance and swift END ###
if $node[0]['role'] == 'primary-controller' {
$primary_controller = true
} else {
$primary_controller = false
}
# This parameter specifies the verbosity level of log messages
# in openstack components config.
# Debug would have set DEBUG level and ignore verbose settings, if any.
# Verbose would have set INFO level messages
# In case of non debug and non verbose - WARNING, default level would have set.
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option.
$verbose = true
$debug = false
### Syslog ###
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
$use_syslog = true
# Default log level would have been used, if non verbose and non debug
$syslog_log_level = 'ERROR'
# Syslog facilities for main openstack services, choose any, may overlap if needed
# local0 is reserved for HA provisioning and orchestration services,
# local1 is reserved for openstack-dashboard
$syslog_log_facility_glance = 'LOCAL2'
$syslog_log_facility_cinder = 'LOCAL3'
$syslog_log_facility_quantum = 'LOCAL4'
$syslog_log_facility_nova = 'LOCAL6'
$syslog_log_facility_keystone = 'LOCAL7'
if $use_syslog {
class { "::openstack::logging":
stage => 'first',
role => 'client',
# use date-rfc3339 timestamps
show_timezone => true,
# log both locally include auth, and remote
log_remote => true,
log_local => true,
log_auth_local => true,
# keep four weekly log rotations, force rotate if 300M size have exceeded
rotation => 'weekly',
keep => '4',
# should be > 30M
limitsize => '300M',
# remote servers to send logs to
rservers => [{'remote_type'=>'udp', 'server'=>'master', 'port'=>'514'},],
# should be true, if client is running at virtual node
virtual => true,
# facilities
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
# Rabbit doesn't support syslog directly, should be >= syslog_log_level,
# otherwise none rabbit's messages would have gone to syslog
rabbit_log_level => $syslog_log_level,
debug => $debug,
}
}
# Example for server role class definition for remote logging node:
# class {::openstack::logging:
# role => 'server',
# log_remote => false,
# log_local => true,
# log_auth_local => true,
# rotation => 'daily',
# keep => '7',
# limitsize => '100M',
# port => '514',
# proto => 'udp',
# #high precision timespamps
# show_timezone => true,
# #should be true, if server is running at virtual node
# #virtual => false,
# }
### Syslog END ###
case $::osfamily {
"Debian": {
$rabbitmq_version_string = '2.8.7-1'
}
"RedHat": {
$rabbitmq_version_string = '2.8.7-2.el6'
}
}
#
# OpenStack packages and customized component versions to be installed.
# Use 'latest' to get the most recent ones or specify exact version if you need to install custom version.
$openstack_version = {
'keystone' => 'latest',
'glance' => 'latest',
'horizon' => 'latest',
'nova' => 'latest',
'novncproxy' => 'latest',
'cinder' => 'latest',
'rabbitmq_version' => $rabbitmq_version_string,
}
# Which package repo mirror to use. Currently "default".
# "custom" is used by Mirantis for testing purposes.
# Local puppet-managed repo option planned for future releases.
# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp,
# though it is NOT recommended.
$mirror_type = 'default'
$enable_test_repo = false
$repo_proxy = undef
#Rate Limits for cinder and Nova
#Cinder and Nova can rate-limit your requests to API services.
#These limits can be reduced for your installation or usage scenario.
#Change the following variables if you want. They are measured in requests per minute.
$nova_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
$cinder_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
Exec { logoutput => true }
#Specify desired NTP servers here.
#If you leave it undef pool.ntp.org
#will be used
$ntp_servers = ['pool.ntp.org']
class {'openstack::clocksync': ntp_servers=>$ntp_servers}
#Exec clocksync from openstack::clocksync before services
#connectinq to AMQP server are started.
Exec<| title == 'clocksync' |>->Nova::Generic_service<| |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-l3' |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-dhcp-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-ovs-plugin-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
### END OF PUBLIC CONFIGURATION PART ###
# Normally, you do not need to change anything after this string
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
type=>$mirror_type,
enable_test_repo=>$enable_test_repo,
repo_proxy=>$repo_proxy,
}
class { '::openstack::firewall':
stage => 'openstack-firewall'
}
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
class { 'selinux':
mode=>"disabled",
stage=>"openstack-custom-repo"
}
}
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
$horizon_use_ssl = false
# Class for calling corosync::virtual_ip in the specifis stage
$vip_keys = keys($vips)
class virtual_ips () {
cluster::virtual_ips { $vip_keys:
vips => $vips,
}
}
class compact_controller (
$quantum_network_node = $quantum_netnode_on_cnt
) {
class { 'openstack::controller_ha':
controller_public_addresses => $controller_public_addresses,
controller_internal_addresses => $controller_internal_addresses,
internal_address => $internal_address,
public_interface => $public_int,
internal_interface => $internal_int,
private_interface => $private_interface,
internal_virtual_ip => $internal_virtual_ip,
public_virtual_ip => $public_virtual_ip,
primary_controller => $primary_controller,
floating_range => $floating_range,
fixed_range => $fixed_range,
multi_host => $multi_host,
network_manager => $network_manager,
num_networks => $num_networks,
network_size => $network_size,
network_config => { 'vlan_start' => $vlan_start },
verbose => $verbose,
debug => $debug,
auto_assign_floating_ip => $auto_assign_floating_ip,
mysql_root_password => $mysql_root_password,
admin_email => $admin_email,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
queue_provider => $queue_provider,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
qpid_nodes => [$internal_virtual_ip],
memcached_servers => $controller_hostnames,
export_resources => false,
glance_backend => $glance_backend,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_network_node => $quantum_network_node,
quantum_netnode_on_cnt => $quantum_netnode_on_cnt,
quantum_gre_bind_addr => $quantum_gre_bind_addr,
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
galera_nodes => $controller_hostnames,
custom_mysql_setup_class => $custom_mysql_setup_class,
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
nova_rate_limits => $nova_rate_limits,
horizon_use_ssl => $horizon_use_ssl,
use_unicast_corosync => $use_unicast_corosync,
cinder_rate_limits => $cinder_rate_limits,
ha_provider => $ha_provider
}
}
# Definition of OpenStack controller nodes.
node /fuel-controller-[\d+]/ {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'first'
}
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
if $nagios {
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
}
###
# cluster init
class { '::cluster': stage => 'corosync_setup' } ->
class { 'virtual_ips':
stage => 'corosync_setup'
}
include ::haproxy::params
class { 'cluster::haproxy':
global_options => merge($::haproxy::params::global_options, {'log' => "/dev/log local0"}),
defaults_options => merge($::haproxy::params::defaults_options, {'mode' => 'http'}),
stage => 'cluster_head',
}
#
###
class { compact_controller: }
}
# Definition of OpenStack compute nodes.
node /fuel-compute-[\d+]/ {
## Uncomment lines bellow if You want
## configure network of this nodes
## by puppet.
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
include stdlib
class { 'operatingsystem::checksupported':
stage => 'first'
}
if $nagios {
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive', 'nova-compute','nova-network','libvirt'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'compute',
}
}
class { 'openstack::compute':
public_interface => $public_int,
private_interface => $private_interface,
internal_address => $internal_address,
libvirt_type => 'kvm',
fixed_range => $fixed_range,
network_manager => $network_manager,
network_config => { 'vlan_start' => $vlan_start },
multi_host => $multi_host,
auto_assign_floating_ip => $auto_assign_floating_ip,
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova",
queue_provider => $queue_provider,
rabbit_nodes => $controller_hostnames,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_ha_virtual_ip => $internal_virtual_ip,
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
qpid_nodes => [$internal_virtual_ip],
glance_api_servers => "${internal_virtual_ip}:9292",
vncproxy_host => $public_virtual_ip,
verbose => $verbose,
debug => $debug,
vnc_enabled => true,
nova_user_password => $nova_user_password,
cache_server_ip => $controller_hostnames,
service_endpoint => $internal_virtual_ip,
quantum => $quantum,
quantum_sql_connection => $quantum_sql_connection,
quantum_user_password => $quantum_user_password,
quantum_host => $internal_virtual_ip,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
cinder_rate_limits => $cinder_rate_limits,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
nova_rate_limits => $nova_rate_limits,
}
}
# Definition of OpenStack Quantum node.
node /fuel-quantum/ {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'first'
}
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => 'none',
save_default_gateway => true,
stage => 'netconfig',
}
if ! $quantum_netnode_on_cnt {
class { 'openstack::quantum_router':
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
auth_host => $internal_virtual_ip,
nova_api_vip => $internal_virtual_ip,
internal_address => $internal_address,
public_interface => $public_int,
private_interface => $private_interface,
floating_range => $floating_range,
fixed_range => $fixed_range,
create_networks => $create_networks,
verbose => $verbose,
debug => $debug,
queue_provider => $queue_provider,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
rabbit_ha_virtual_ip => $internal_virtual_ip,
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
qpid_nodes => [$internal_virtual_ip],
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_netnode_on_cnt=> false,
quantum_network_node => true,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
external_ipinfo => $external_ipinfo,
api_bind_address => $internal_address,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
}
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $internal_virtual_ip,
before => Class['openstack::quantum_router'],
}
}
}

View File

@ -1,714 +0,0 @@
#
# Parameter values in this file should be changed, taking into consideration your
# networking setup and desired OpenStack settings.
#
# Please consult with the latest Fuel User Guide before making edits.
#
# Run stages for puppet
stage {'first': } ->
stage {'openstack-custom-repo': } ->
stage {'netconfig': } ->
stage {'openstack-firewall': } -> Stage['main']
### GENERAL CONFIG ###
# This section sets main parameters such as hostnames and IP addresses of different nodes
# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
$public_interface = 'eth1'
$public_br = 'br-ex'
# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen.
$internal_interface = 'eth0'
$internal_br = 'br-mgmt'
# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface.
$private_interface = 'eth2'
case $::operatingsystem {
'redhat' : {
$queue_provider = 'qpid'
$custom_mysql_setup_class = 'pacemaker_mysql'
}
default: {
$queue_provider='rabbitmq'
$custom_mysql_setup_class='galera'
}
}
$nodes_harr = [
{
'name' => 'fuel-cobbler',
'role' => 'cobbler',
'internal_address' => '10.0.0.102',
'public_address' => '10.0.204.102',
},
{
'name' => 'fuel-controller-01',
'role' => 'controller',
'internal_address' => '10.0.0.103',
'public_address' => '10.0.204.103',
},
{
'name' => 'fuel-compute-01',
'role' => 'compute',
'internal_address' => '10.0.0.106',
'public_address' => '10.0.204.106',
},
{
'name' => 'fuel-compute-02',
'role' => 'compute',
'internal_address' => '10.0.0.107',
'public_address' => '10.0.204.107',
},
{
'name' => 'fuel-compute-03',
'role' => 'compute',
'internal_address' => '10.0.0.108',
'public_address' => '10.0.204.108',
},
]
$nodes = $nodes_harr
$default_gateway = '10.0.204.1'
# Specify nameservers here.
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
$dns_nameservers = ['10.0.204.1','8.8.8.8']
# Specify netmasks for internal and external networks.
$internal_netmask = '255.255.255.0'
$public_netmask = '255.255.255.0'
$node = filter_nodes($nodes,'name',$::hostname)
$internal_address = $node[0]['internal_address']
$public_address = $node[0]['public_address']
$controllers = merge_arrays(filter_nodes($nodes,'role','primary-controller'), filter_nodes($nodes,'role','controller'))
$controller_internal_address = $controllers[0]['internal_address']
$controller_public_address = $controllers[0]['public_address']
#Set this to anything other than pacemaker if you do not want Quantum HA
#Also, if you do not want Quantum HA, you MUST enable $quantum_network_node
#on the ONLY controller
$ha_provider = 'generic'
#$use_unicast_corosync = false
# Set nagios master fqdn
$nagios_master = 'nagios-server.localdomain'
## proj_name name of environment nagios configuration
$proj_name = 'test'
#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario.
$multi_host = true
# Specify different DB credentials for various services
$mysql_root_password = 'nova'
$admin_email = 'openstack@openstack.org'
$admin_password = 'nova'
$keystone_db_password = 'nova'
$keystone_admin_token = 'nova'
$glance_db_password = 'nova'
$glance_user_password = 'nova'
$nova_db_password = 'nova'
$nova_user_password = 'nova'
#AMQP backend rabbitmq or qpid
$queue_provider = 'qpid'
validate_re($queue_provider, 'rabbitmq|qpid')
$rabbit_password = 'nova'
$rabbit_user = 'nova'
$quantum_user_password = 'quantum_pass'
$quantum_db_password = 'quantum_pass'
$quantum_db_user = 'quantum'
$quantum_db_dbname = 'quantum'
# End DB credentials section
### GENERAL CONFIG END ###
### NETWORK/QUANTUM ###
# Specify network/quantum specific settings
# Should we use quantum or nova-network(deprecated).
# Consult OpenStack documentation for differences between them.
$quantum = true
$quantum_netnode_on_cnt = true
$quantum_use_namespaces = true
# a string "password" value that should be configured to authenticate requests for metadata
# from quantum-metadata-proxy to nova-api
$quantum_metadata_proxy_shared_secret = "connecting_nova-api_and_quantum-metadata-agent"
# Specify network creation criteria:
# Should puppet automatically create networks?
$create_networks = true
# Fixed IP addresses are typically used for communication between VM instances.
$fixed_range = '10.0.198.128/27'
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
$floating_range = '10.0.204.128/28'
# These parameters are passed to the previously specified network manager , e.g. nova-manage network create.
# Not used in Quantum.
# Consult openstack docs for corresponding network manager.
# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup
$num_networks = 1
$network_size = 31
$vlan_start = 300
# Quantum
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
# the first address will be defined as an external default router,
# the second address will be attached to an uplink bridge interface,
# the remaining addresses will be utilized for the floating IP address pool.
$external_ipinfo = {}
## $external_ipinfo = {
## 'public_net_router' => '10.0.74.129',
## 'ext_bridge' => '10.0.74.130',
## 'pool_start' => '10.0.74.131',
## 'pool_end' => '10.0.74.142',
## }
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
$segment_range = '900:999'
# Set up OpenStack network manager. It is used ONLY in nova-network.
# Consult Openstack nova-network docs for possible values.
$network_manager = 'nova.network.manager.FlatDHCPManager'
# Assign floating IPs to VMs on startup automatically?
$auto_assign_floating_ip = false
# Database connections
$sql_connection = "mysql://nova:${nova_db_password}@${controller_internal_address}/nova"
$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${controller_internal_address}/${quantum_db_dbname}"
if $quantum {
$public_int = $public_br
$internal_int = $internal_br
} else {
$public_int = $public_interface
$internal_int = $internal_interface
}
#Network configuration
class {'l23network': use_ovs=>$quantum, stage=> 'netconfig'}
class node_netconfig (
$mgmt_ipaddr,
$mgmt_netmask = '255.255.255.0',
$public_ipaddr = undef,
$public_netmask= '255.255.255.0',
$save_default_gateway=false,
$quantum = $quantum,
) {
if $quantum {
l23network::l3::create_br_iface {'mgmt':
interface => $internal_interface, # !!! NO $internal_int /sv !!!
bridge => $internal_br,
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
save_default_gateway => $save_default_gateway,
} ->
l23network::l3::create_br_iface {'ex':
interface => $public_interface, # !! NO $public_int /sv !!!
bridge => $public_br,
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
} else {
# nova-network mode
l23network::l3::ifconfig {$public_int:
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
l23network::l3::ifconfig {$internal_int:
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
}
}
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
}
### NETWORK/QUANTUM END ###
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
# installation. Each cluster requires a unique integer value.
# Valid identifier range is 1 to 254
$deployment_id = '69'
# Below you can enable or disable various services based on the chosen deployment topology:
### CINDER/VOLUME ###
# Should we use cinder or nova-volume(obsolete)
# Consult openstack docs for differences between them
$cinder = true
# Choose which nodes to install cinder onto
# 'compute' -> compute nodes will run cinder
# 'controller' -> controller nodes will run cinder
# 'storage' -> storage nodes will run cinder
# 'fuel-controller-XX' -> specify particular host(s) by hostname
# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address
# 'all' -> compute, controller, and storage nodes will run cinder (excluding swift and proxy nodes)
$cinder_nodes = ['controller']
#Set it to true if your want cinder-volume been installed to the host
#Otherwise it will install api and scheduler services
$manage_volumes = true
# Setup network address, which Cinder uses to export iSCSI targets.
$cinder_iscsi_bind_addr = $internal_address
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
#Evaluate cinder node selection
if ($cinder) {
if (member($cinder_nodes,'all')) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$::hostname)) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$internal_address)) {
$is_cinder_node = true
} elsif ($node[0]['role'] =~ /controller/ ) {
$is_cinder_node = member($cinder_nodes,'controller')
} else {
$is_cinder_node = member($cinder_nodes,$node[0]['role'])
}
} else {
$is_cinder_node = false
}
### CINDER/VOLUME END ###
### GLANCE and SWIFT ###
# Which backend to use for glance
# Supported backends are "swift" and "file"
$glance_backend = 'file'
# Use loopback device for swift:
# set 'loopback' or false
# This parameter controls where swift partitions are located:
# on physical partitions or inside loopback devices.
$swift_loopback = false
### Glance and swift END ###
# This parameter specifies the verbosity level of log messages
# in openstack components config.
# Debug would have set DEBUG level and ignore verbose settings, if any.
# Verbose would have set INFO level messages
# In case of non debug and non verbose - WARNING, default level would have set.
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option.
$verbose = true
$debug = false
### Syslog ###
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case,
# and configured to start at the very beginning of puppet agent run.
$use_syslog = true
# Default log level would have been used, if non verbose and non debug
$syslog_log_level = 'ERROR'
# Syslog facilities for main openstack services, choose any, may overlap if needed.
# local0 is reserved for HA provisioning and orchestration services (not applicable here),
# local1 is reserved for openstack-dashboard
$syslog_log_facility_glance = 'LOCAL2'
$syslog_log_facility_cinder = 'LOCAL3'
$syslog_log_facility_quantum = 'LOCAL4'
$syslog_log_facility_nova = 'LOCAL6'
$syslog_log_facility_keystone = 'LOCAL7'
if $use_syslog {
class { "::openstack::logging":
stage => 'first',
role => 'client',
# use date-rfc3339 timestamps
show_timezone => true,
# log both locally include auth, and remote
log_remote => true,
log_local => true,
log_auth_local => true,
# keep four weekly log rotations, force rotate if 300M size have exceeded
rotation => 'weekly',
keep => '4',
# should be > 30M
limitsize => '300M',
# remote servers to send logs to
rservers => [{'remote_type'=>'udp', 'server'=>'master', 'port'=>'514'},],
# should be true, if client is running at virtual node
virtual => true,
# facilities
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
# Rabbit doesn't support syslog directly, should be >= syslog_log_level,
# otherwise none rabbit's messages would have gone to syslog
rabbit_log_level => $syslog_log_level,
debug => $debug,
}
}
# Example for server role class definition for remote logging node:
# class {::openstack::logging:
# role => 'server',
# log_remote => false,
# log_local => true,
# log_auth_local => true,
# rotation => 'daily',
# keep => '7',
# limitsize => '100M',
# port => '514',
# proto => 'udp',
# #high precision timespamps
# show_timezone => true,
# #should be true, if server is running at virtual node
# #virtual => false,
# }
### Syslog END ###
case $::osfamily {
"Debian": {
$rabbitmq_version_string = '2.8.7-1'
}
"RedHat": {
$rabbitmq_version_string = '2.8.7-2.el6'
}
}
# OpenStack packages to be installed
$openstack_version = {
'keystone' => 'latest',
'glance' => 'latest',
'horizon' => 'latest',
'nova' => 'latest',
'novncproxy' => 'latest',
'cinder' => 'latest',
'rabbitmq_version' => $rabbitmq_version_string,
}
# Which package repo mirror to use. Currently "default".
# "custom" is used by Mirantis for testing purposes.
# Local puppet-managed repo option planned for future releases.
# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp,
# though it is NOT recommended.
$mirror_type = 'default'
$enable_test_repo = false
$repo_proxy = undef
$use_upstream_mysql = true
#Rate Limits for cinder and Nova
#Cinder and Nova can rate-limit your requests to API services.
#These limits can be reduced for your installation or usage scenario.
#Change the following variables if you want. They are measured in requests per minute.
$nova_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
$cinder_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
Exec { logoutput => true }
#Specify desired NTP servers here.
#If you leave it undef pool.ntp.org
#will be used
$ntp_servers = ['pool.ntp.org']
class {'openstack::clocksync': ntp_servers=>$ntp_servers}
#Exec clocksync from openstack::clocksync before services
#connectinq to AMQP server are started.
Exec<| title == 'clocksync' |>->Nova::Generic_service<| |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-l3' |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-dhcp-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-ovs-plugin-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage pki_setup' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
### END OF PUBLIC CONFIGURATION PART ###
# Normally, you do not need to change anything after this string
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
type=>$mirror_type,
enable_test_repo=>$enable_test_repo,
repo_proxy=>$repo_proxy,
use_upstream_mysql=>$use_upstream_mysql
}
class { '::openstack::firewall':
stage => 'openstack-firewall'
}
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
class { 'selinux':
mode=>"disabled",
stage=>"openstack-custom-repo"
}
}
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
$horizon_use_ssl = false
class simple_controller (
$quantum_network_node = true
) {
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
class { 'openstack::controller':
admin_address => $controller_internal_address,
service_endpoint => $controller_internal_address,
public_address => $controller_public_address,
public_interface => $public_int,
private_interface => $private_interface,
internal_address => $controller_internal_address,
floating_range => $floating_range,
fixed_range => $fixed_range,
multi_host => $multi_host,
network_manager => $network_manager,
num_networks => $num_networks,
network_size => $network_size,
network_config => { 'vlan_start' => $vlan_start },
verbose => $verbose,
debug => $debug,
auto_assign_floating_ip => $auto_assign_floating_ip,
mysql_root_password => $mysql_root_password,
admin_email => $admin_email,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
queue_provider => $queue_provider,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
export_resources => false,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_network_node => $quantum_network_node,
quantum_netnode_on_cnt => $quantum_netnode_on_cnt,
quantum_gre_bind_addr => $quantum_gre_bind_addr,
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits,
horizon_use_ssl => $horizon_use_ssl,
}
if $::quantum and $quantum_network_node {
class { '::openstack::quantum_router':
db_host => $controller_internal_address,
service_endpoint => $controller_internal_address,
auth_host => $controller_internal_address,
nova_api_vip => $controller_internal_address,
internal_address => $internal_address,
public_interface => $public_int,
private_interface => $private_interface,
floating_range => $floating_range,
fixed_range => $fixed_range,
create_networks => $create_networks,
verbose => $verbose,
debug => $debug,
queue_provider => $queue_provider,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_ha_virtual_ip => $controller_internal_address,
rabbit_nodes => [$controller_internal_address],
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
qpid_nodes => [$controller_internal_address],
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_gre_bind_addr => $quantum_gre_bind_addr,
quantum_network_node => $quantum_network_node,
quantum_netnode_on_cnt=> $quantum_netnode_on_cnt,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
external_ipinfo => $external_ipinfo,
api_bind_address => $internal_address,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility => $syslog_log_facility_quantum,
}
}
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $controller_internal_address,
}
}
# Definition of OpenStack controller node.
node /fuel-controller-[\d+]/ {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'first'
}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql',
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
class { 'simple_controller': }
}
# Definition of OpenStack compute nodes.
node /fuel-compute-[\d+]/ {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'first'
}
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive', 'nova-compute','nova-network','libvirt'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'compute',
}
class { 'openstack::compute':
public_interface => $public_int,
private_interface => $private_interface,
internal_address => $internal_address,
libvirt_type => 'kvm',
fixed_range => $fixed_range,
network_manager => $network_manager,
network_config => { 'vlan_start' => $vlan_start },
multi_host => $multi_host,
auto_assign_floating_ip => $auto_assign_floating_ip,
sql_connection => $sql_connection,
nova_user_password => $nova_user_password,
queue_provider => $queue_provider,
rabbit_nodes => [$controller_internal_address],
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
qpid_nodes => [$controller_internal_address],
qpid_password => $rabbit_password,
qpid_user => $rabbit_user,
glance_api_servers => "${controller_internal_address}:9292",
vncproxy_host => $controller_public_address,
vnc_enabled => true,
quantum => $quantum,
quantum_sql_connection => $quantum_sql_connection,
quantum_user_password => $quantum_user_password,
quantum_host => $controller_internal_address,
tenant_network_type => $tenant_network_type,
service_endpoint => $controller_internal_address,
db_host => $controller_internal_address,
verbose => $verbose,
debug => $debug,
segment_range => $segment_range,
cinder => $cinder,
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
nv_physical_volume => $nv_physical_volume,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility => $syslog_log_facility_nova,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits
}
}

View File

@ -1,558 +0,0 @@
#
# Parameter values in this file should be changed, taking into consideration your
# networking setup and desired OpenStack settings.
#
# Please consult with the latest Fuel User Guide before making edits.
#
# Run stages for puppet
stage {'first': } ->
stage {'openstack-custom-repo': } ->
stage {'netconfig': } ->
stage {'openstack-firewall': } -> Stage['main']
### GENERAL CONFIG ###
# This section sets main parameters such as hostnames and IP addresses of different nodes
# deploy a script that can be used to test nova
class { 'openstack::test_file': }
# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
$public_interface = 'eth1'
# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen.
$internal_interface = 'eth0'
# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface.
$private_interface = 'eth2'
case $::operatingsystem {
'redhat' : {
$queue_provider = 'qpid'
$custom_mysql_setup_class = 'pacemaker_mysql'
}
default: {
$queue_provider='rabbitmq'
$custom_mysql_setup_class='galera'
}
}
$nodes_harr = [
{
'name' => 'fuel-cobbler',
'role' => 'cobbler',
'internal_address' => '10.0.0.102',
'public_address' => '10.0.204.102',
},
{
'name' => 'fuel-controller-01',
'role' => 'controller',
'internal_address' => '10.0.0.103',
'public_address' => '10.0.204.103',
},
{
'name' => 'fuel-controller-01',
'role' => 'compute',
'internal_address' => '10.0.0.103',
'public_address' => '10.0.204.103',
},
]
$nodes = $nodes_harr
$default_gateway = '10.0.204.1'
# Specify nameservers here.
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
$dns_nameservers = ['10.0.204.1','8.8.8.8']
# Specify netmasks for internal and external networks.
$internal_netmask = '255.255.255.0'
$public_netmask = '255.255.255.0'
$node = filter_nodes($nodes,'name',$::hostname)
$internal_address = $node[0]['internal_address']
$public_address = $node[0]['public_address']
$controllers = merge_arrays(filter_nodes($nodes,'role','primary-controller'), filter_nodes($nodes,'role','controller'))
$controller_internal_address = $controllers[0]['internal_address']
$controller_public_address = $controllers[0]['public_address']
$ha_provider = 'generic'
# Set nagios master fqdn
$nagios_master = 'nagios-server.localdomain'
## proj_name name of environment nagios configuration
$proj_name = 'test'
#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario.
$multi_host = false
# Specify different DB credentials for various services
$mysql_root_password = 'nova'
$admin_email = 'openstack@openstack.org'
$admin_password = 'nova'
$keystone_db_password = 'nova'
$keystone_admin_token = 'nova'
$glance_db_password = 'nova'
$glance_user_password = 'nova'
$nova_db_password = 'nova'
$nova_user_password = 'nova'
$rabbit_password = 'nova'
$rabbit_user = 'nova'
# End DB credentials section
### GENERAL CONFIG END ###
### NETWORK/QUANTUM ###
# Specify network/quantum specific settings
# Should we use quantum or nova-network(deprecated).
# Consult OpenStack documentation for differences between them.
$quantum = false
$quantum_netnode_on_cnt = true
# Specify network creation criteria:
# Should puppet automatically create networks?
$create_networks = true
# Fixed IP addresses are typically used for communication between VM instances.
$fixed_range = '10.0.198.128/27'
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
$floating_range = '10.0.204.128/28'
# These parameters are passed to the previously specified network manager , e.g. nova-manage network create.
# Not used in Quantum.
# Consult openstack docs for corresponding network manager.
# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup
$num_networks = 1
$network_size = 31
$vlan_start = 300
# Quantum
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
# the first address will be defined as an external default router,
# the second address will be attached to an uplink bridge interface,
# the remaining addresses will be utilized for the floating IP address pool.
$external_ipinfo = {}
## $external_ipinfo = {
## 'public_net_router' => '10.0.74.129',
## 'ext_bridge' => '10.0.74.130',
## 'pool_start' => '10.0.74.131',
## 'pool_end' => '10.0.74.142',
## }
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
$segment_range = '900:999'
# Set up OpenStack network manager. It is used ONLY in nova-network.
# Consult Openstack nova-network docs for possible values.
$network_manager = 'nova.network.manager.FlatDHCPManager'
# Assign floating IPs to VMs on startup automatically?
$auto_assign_floating_ip = false
# Database connections
$sql_connection = "mysql://nova:${nova_db_password}@${controller_internal_address}/nova"
$public_int = $public_interface
$internal_int = $internal_interface
#Network configuration
class {'l23network': use_ovs=>$quantum, stage=> 'netconfig'}
class node_netconfig (
$mgmt_ipaddr,
$mgmt_netmask = '255.255.255.0',
$public_ipaddr = undef,
$public_netmask= '255.255.255.0',
$save_default_gateway=false,
$quantum = $quantum,
) {
if $quantum {
l23network::l3::create_br_iface {'mgmt':
interface => $internal_interface, # !!! NO $internal_int /sv !!!
bridge => $internal_br,
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
save_default_gateway => $save_default_gateway,
} ->
l23network::l3::create_br_iface {'ex':
interface => $public_interface, # !! NO $public_int /sv !!!
bridge => $public_br,
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
} else {
# nova-network mode
l23network::l3::ifconfig {$public_int:
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
l23network::l3::ifconfig {$internal_int:
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
}
}
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
class { cobbler::checksum_bootpc: }
}
### NETWORK/QUANTUM END ###
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
# installation. Each cluster requires a unique integer value.
# Valid identifier range is 1 to 254
$deployment_id = '69'
# Below you can enable or disable various services based on the chosen deployment topology:
### CINDER/VOLUME ###
# Should we use cinder or nova-volume(obsolete)
# Consult openstack docs for differences between them
$cinder = true
# Choose which nodes to install cinder onto
# 'compute' -> compute nodes will run cinder
# 'controller' -> controller nodes will run cinder
# 'storage' -> storage nodes will run cinder
# 'fuel-controller-XX' -> specify particular host(s) by hostname
# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address
# 'all' -> compute, controller, and storage nodes will run cinder (excluding swift and proxy nodes)
$cinder_nodes = ['controller']
#Set it to true if your want cinder-volume been installed to the host
#Otherwise it will install api and scheduler services
$manage_volumes = true
# Setup network interface, which Cinder uses to export iSCSI targets.
$cinder_iscsi_bind_addr = $internal_address
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
#Evaluate cinder node selection
if ($cinder) {
if (member($cinder_nodes,'all')) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$::hostname)) {
$is_cinder_node = true
} elsif (member($cinder_nodes,$internal_address)) {
$is_cinder_node = true
} elsif ($node[0]['role'] =~ /controller/ ) {
$is_cinder_node = member($cinder_nodes,'controller')
} else {
$is_cinder_node = member($cinder_nodes,$node[0]['role'])
}
} else {
$is_cinder_node = false
}
### CINDER/VOLUME END ###
### GLANCE and SWIFT ###
# Which backend to use for glance
# Supported backends are "swift" and "file"
$glance_backend = 'file'
# Use loopback device for swift:
# set 'loopback' or false
# This parameter controls where swift partitions are located:
# on physical partitions or inside loopback devices.
$swift_loopback = false
### Glance and swift END ###
# This parameter specifies the verbosity level of log messages
# in openstack components config.
# Debug would have set DEBUG level and ignore verbose settings, if any.
# Verbose would have set INFO level messages
# In case of non debug and non verbose - WARNING, default level would have set.
# Note: if syslog on, this default level may be configured (for syslog) with syslog_log_level option.
$verbose = true
$debug = false
### Syslog ###
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case,
# and configured to start at the very beginning of puppet agent run.
$use_syslog = true
# Default log level would have been used, if non verbose and non debug
$syslog_log_level = 'ERROR'
# Syslog facilities for main openstack services, choose any, may overlap if needed
# local0 is reserved for HA provisioning and orchestration services (not applicable here),
# local1 is reserved for openstack-dashboard
$syslog_log_facility_glance = 'LOCAL2'
$syslog_log_facility_cinder = 'LOCAL3'
$syslog_log_facility_quantum = 'LOCAL4'
$syslog_log_facility_nova = 'LOCAL6'
$syslog_log_facility_keystone = 'LOCAL7'
if $use_syslog {
class { "::openstack::logging":
stage => 'first',
role => 'client',
# use date-rfc3339 timestamps
show_timezone => true,
# log both locally include auth, and remote
log_remote => true,
log_local => true,
log_auth_local => true,
# keep four weekly log rotations, force rotate if 300M size have exceeded
rotation => 'weekly',
keep => '4',
# should be > 30M
limitsize => '300M',
# remote servers to send logs to
rservers => [{'remote_type'=>'udp', 'server'=>'master', 'port'=>'514'},],
# should be true, if client is running at virtual node
virtual => true,
# facilities
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
# Rabbit doesn't support syslog directly, should be >= syslog_log_level,
# otherwise none rabbit's messages would have gone to syslog
rabbit_log_level => $syslog_log_level,
debug => $debug,
}
}
# Example for server role class definition for remote logging node:
# class {::openstack::logging:
# role => 'server',
# log_remote => false,
# log_local => true,
# log_auth_local => true,
# rotation => 'daily',
# keep => '7',
# limitsize => '100M',
# port => '514',
# proto => 'udp',
# #high precision timespamps
# show_timezone => true,
# #should be true, if server is running at virtual node
# #virtual => false,
# }
### Syslog END ###
case $::osfamily {
"Debian": {
$rabbitmq_version_string = '2.8.7-1'
}
"RedHat": {
$rabbitmq_version_string = '2.8.7-2.el6'
}
}
# OpenStack packages to be installed
$openstack_version = {
'keystone' => 'latest',
'glance' => 'latest',
'horizon' => 'latest',
'nova' => 'latest',
'novncproxy' => 'latest',
'cinder' => 'latest',
'rabbitmq_version' => $rabbitmq_version_string,
}
# Which package repo mirror to use. Currently "default".
# "custom" is used by Mirantis for testing purposes.
# Local puppet-managed repo option planned for future releases.
# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp,
# though it is NOT recommended.
$mirror_type = 'default'
$enable_test_repo = false
$repo_proxy = undef
$use_upstream_mysql = true
#Rate Limits for cinder and Nova
#Cinder and Nova can rate-limit your requests to API services.
#These limits can be reduced for your installation or usage scenario.
#Change the following variables if you want. They are measured in requests per minute.
$nova_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
$cinder_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
Exec { logoutput => true }
#Specify desired NTP servers here.
#If you leave it undef pool.ntp.org
#will be used
$ntp_servers = ['pool.ntp.org']
class {'openstack::clocksync': ntp_servers=>$ntp_servers}
#Exec clocksync from openstack::clocksync before services
#connectinq to AMQP server are started.
Exec<| title == 'clocksync' |>->Nova::Generic_service<| |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage pki_setup' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
### END OF PUBLIC CONFIGURATION PART ###
# Normally, you do not need to change anything after this string
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
type=>$mirror_type,
enable_test_repo=>$enable_test_repo,
repo_proxy=>$repo_proxy,
use_upstream_mysql=>$use_upstream_mysql
}
class { '::openstack::firewall':
stage => 'openstack-firewall'
}
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
class { 'selinux':
mode=>"disabled",
stage=>"openstack-custom-repo"
}
}
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
$horizon_use_ssl = false
$horizon_secret_key = 'dummy_secret_key'
# Every node should be deployed as all-in-one openstack installations.
node default {
include stdlib
class { 'operatingsystem::checksupported':
stage => 'setup'
}
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql',
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
class { 'openstack::all':
admin_address => $controller_internal_address,
service_endpoint => $controller_internal_address,
public_address => $controller_public_address,
public_interface => $public_int,
private_interface => $private_interface,
internal_address => $controller_internal_address,
floating_range => $floating_range,
fixed_range => $fixed_range,
network_manager => $network_manager,
num_networks => $num_networks,
network_size => $network_size,
network_config => { 'vlan_start' => $vlan_start },
verbose => $verbose,
debug => $debug,
auto_assign_floating_ip => $auto_assign_floating_ip,
mysql_root_password => $mysql_root_password,
admin_email => $admin_email,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
secret_key => $horizon_secret_key,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
purge_nova_config => false,
cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level,
syslog_log_facility_glance => $syslog_log_facility_glance,
syslog_log_facility_cinder => $syslog_log_facility_cinder,
syslog_log_facility_quantum => $syslog_log_facility_quantum,
syslog_log_facility_nova => $syslog_log_facility_nova,
syslog_log_facility_keystone => $syslog_log_facility_keystone,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits,
quantum => $quantum,
swift => $swift,
glance_backend => $glance_backend,
}
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $controller_internal_address,
}
}

View File

@ -3,7 +3,7 @@ import re
import time import time
import sys import sys
import optparse import optparse
from quantumclient.quantum import client as q_client from neutronclient.neutron import client as q_client
from keystoneclient.v2_0 import client as ks_client from keystoneclient.v2_0 import client as ks_client
API_VER = '2.0' API_VER = '2.0'
@ -22,7 +22,7 @@ def get_authconfig(cfg_file):
return rv return rv
class QuantumXxx(object): class NeutronXxx(object):
def __init__(self, openrc, retries=20, sleep=2): def __init__(self, openrc, retries=20, sleep=2):
self.auth_config = openrc self.auth_config = openrc
self.connect_retries = retries self.connect_retries = retries
@ -64,7 +64,7 @@ class QuantumXxx(object):
ret_count = self.connect_retries ret_count = self.connect_retries
while True: while True:
if ret_count <= 0 : if ret_count <= 0 :
print(">>> Quantum error: no more retries for connect to keystone server.") print(">>> Neutron error: no more retries for connect to keystone server.")
sys.exit(1) sys.exit(1)
try: try:
rv = self.client.list_ports()['ports'] rv = self.client.list_ports()['ports']
@ -78,7 +78,7 @@ class QuantumXxx(object):
print(">>> Can't connect to {0}, wait for server ready...".format(self.keystone.service_catalog.url_for(service_type='network'))) print(">>> Can't connect to {0}, wait for server ready...".format(self.keystone.service_catalog.url_for(service_type='network')))
time.sleep(self.sleep) time.sleep(self.sleep)
else: else:
print(">>> Quantum error:\n{0}".format(e.message)) print(">>> Neutron error:\n{0}".format(e.message))
raise e raise e
ret_count -= 1 ret_count -= 1
return rv return rv
@ -126,7 +126,7 @@ if __name__ == '__main__':
if len(args) != 1: if len(args) != 1:
parser.error("incorrect number of arguments") parser.error("incorrect number of arguments")
# #
Qu = QuantumXxx(get_authconfig(options.authconf), retries=options.retries) Qu = NeutronXxx(get_authconfig(options.authconf), retries=options.retries)
for i in Qu.get_ifnames_for(args[0].strip(" \"\'"), activeonly=options.activeonly): for i in Qu.get_ifnames_for(args[0].strip(" \"\'"), activeonly=options.activeonly):
print(i) print(i)
### ###

View File

@ -313,18 +313,8 @@ class openstack::compute (
} }
} else { } else {
# if ! $quantum_sql_connection { class { '::neutron':
# fail('quantum sql connection must be specified when quantum is installed on compute instances') neutron_config => $quantum_config,
# }
# if ! $quantum_host {
# fail('quantum host must be specified when quantum is installed on compute instances')
# }
# if ! $quantum_user_password {
# fail('quantum user password must be set when quantum is configured')
# }
class { '::quantum':
quantum_config => $quantum_config,
verbose => $verbose, verbose => $verbose,
debug => $debug, debug => $debug,
use_syslog => $use_syslog, use_syslog => $use_syslog,
@ -333,12 +323,12 @@ class openstack::compute (
} }
#todo: Quantum plugin and database connection not need on compute. #todo: Quantum plugin and database connection not need on compute.
class { 'quantum::plugins::ovs': class { 'neutron::plugins::ovs':
quantum_config => $quantum_config neutron_config => $quantum_config
} }
class { 'quantum::agents::ovs': class { 'neutron::agents::ovs':
quantum_config => $quantum_config, neutron_config => $quantum_config,
# bridge_uplinks => ["br-prv:${private_interface}"], # bridge_uplinks => ["br-prv:${private_interface}"],
# bridge_mappings => ['physnet2:br-prv'], # bridge_mappings => ['physnet2:br-prv'],
# enable_tunneling => $enable_tunneling, # enable_tunneling => $enable_tunneling,
@ -353,13 +343,13 @@ class openstack::compute (
source => 'puppet:///modules/nova/libvirt_qemu.conf', source => 'puppet:///modules/nova/libvirt_qemu.conf',
} }
class { 'nova::compute::quantum': } class { 'nova::compute::neutron': }
# does this have to be installed on the compute node? # does this have to be installed on the compute node?
# NOTE # NOTE
class { 'nova::network::quantum': class { 'nova::network::neutron':
quantum_config => $quantum_config, neutron_config => $quantum_config,
quantum_connection_host => $service_endpoint neutron_connection_host => $service_endpoint
} }
nova_config { nova_config {
@ -368,5 +358,4 @@ class openstack::compute (
} }
} }
} }
# vim: set ts=2 sw=2 et : # vim: set ts=2 sw=2 et :

View File

@ -332,7 +332,7 @@ class openstack::controller_ha (
nameservers => $nameservers, nameservers => $nameservers,
} }
if $quantum and $quantum_network_node { if $quantum and $quantum_network_node {
class { '::openstack::quantum_router': class { '::openstack::neutron_router':
#service_endpoint => $internal_virtual_ip, #service_endpoint => $internal_virtual_ip,
#auth_host => $internal_virtual_ip, #auth_host => $internal_virtual_ip,
#nova_api_vip => $internal_virtual_ip, #nova_api_vip => $internal_virtual_ip,
@ -343,10 +343,10 @@ class openstack::controller_ha (
#create_networks => $create_networks, #create_networks => $create_networks,
verbose => $verbose, verbose => $verbose,
debug => $debug, debug => $debug,
quantum => $quantum, neutron => $quantum,
quantum_config => $quantum_config, neutron_config => $quantum_config,
quantum_network_node => $quantum_network_node, neutron_network_node => $quantum_network_node,
#quantum_netnode_on_cnt=> $quantum_netnode_on_cnt, #neutron_netnode_on_cnt=> $quantum_netnode_on_cnt,
service_provider => $ha_provider, service_provider => $ha_provider,
use_syslog => $use_syslog, use_syslog => $use_syslog,
syslog_log_level => $syslog_log_level, syslog_log_level => $syslog_log_level,

View File

@ -1,5 +1,5 @@
# todo: move this file and ocf scripts to cluster module # todo: move this file and ocf scripts to cluster module
# todo: refactor quantum-* ocf scripts # todo: refactor neutron-* ocf scripts
class openstack::corosync ( class openstack::corosync (
$bind_address = '127.0.0.1', $bind_address = '127.0.0.1',
$multicast_address = '239.1.1.2', $multicast_address = '239.1.1.2',
@ -37,7 +37,7 @@ file {'filter_quantum_ports.py':
group => root, group => root,
source => "puppet:///modules/openstack/filter_quantum_ports.py", source => "puppet:///modules/openstack/filter_quantum_ports.py",
} }
File['filter_quantum_ports.py'] -> File<| title == 'quantum-ovs-agent' |> File['filter_quantum_ports.py'] -> File<| title == 'neutron-ovs-agent' |>
file {'mysql-wss': file {'mysql-wss':
path=>'/usr/lib/ocf/resource.d/mirantis/mysql', path=>'/usr/lib/ocf/resource.d/mirantis/mysql',
@ -48,12 +48,12 @@ file {'mysql-wss':
source => "puppet:///modules/openstack/mysql-wss", source => "puppet:///modules/openstack/mysql-wss",
} -> Corosync::Service['pacemaker'] } -> Corosync::Service['pacemaker']
file {'quantum-ovs-agent': file {'neutron-ovs-agent':
path=>'/usr/lib/ocf/resource.d/pacemaker/quantum-agent-ovs', path=>'/usr/lib/ocf/resource.d/pacemaker/neutron-agent-ovs',
mode => 755, mode => 755,
owner => root, owner => root,
group => root, group => root,
source => "puppet:///modules/openstack/quantum-agent-ovs", source => "puppet:///modules/openstack/neutron-agent-ovs",
} -> Corosync::Service['pacemaker'] } -> Corosync::Service['pacemaker']
Anchor['corosync'] -> Anchor['corosync'] ->

View File

@ -27,7 +27,7 @@ class openstack::firewall (
$memcached_port = 11211, $memcached_port = 11211,
$rsync_port = 873, $rsync_port = 873,
$iscsi_port = 3260, $iscsi_port = 3260,
$quantum_api_port = 9696, $neutron_api_port = 9696,
$dns_server_port = 53, $dns_server_port = 53,
$dhcp_server_port = 67, $dhcp_server_port = 67,
$ntp_server_port = 123, $ntp_server_port = 123,
@ -146,8 +146,8 @@ class openstack::firewall (
action => 'accept', action => 'accept',
} }
firewall {'110 quantum ': firewall {'110 neutron ':
port => $quantum_api_port, port => $neutron_api_port,
proto => 'tcp', proto => 'tcp',
action => 'accept', action => 'accept',
} }

View File

@ -1,17 +1,17 @@
#This class installs quantum WITHOUT quantum api server which is installed on controller nodes #This class installs neutron WITHOUT neutron api server which is installed on controller nodes
# [use_syslog] Rather or not service should log to syslog. Optional. # [use_syslog] Rather or not service should log to syslog. Optional.
# [syslog_log_facility] Facility for syslog, if used. Optional. Note: duplicating conf option # [syslog_log_facility] Facility for syslog, if used. Optional. Note: duplicating conf option
# wouldn't have been used, but more powerfull rsyslog features managed via conf template instead # wouldn't have been used, but more powerfull rsyslog features managed via conf template instead
# [syslog_log_level] logging level for non verbose and non debug mode. Optional. # [syslog_log_level] logging level for non verbose and non debug mode. Optional.
class openstack::quantum_router ( class openstack::neutron_router (
$verbose = 'False', $verbose = 'False',
$debug = 'False', $debug = 'False',
$enabled = true, $enabled = true,
$quantum = true, $neutron = true,
$quantum_config = {}, $neutron_config = {},
$quantum_network_node = false, $neutron_network_node = false,
$quantum_server = true, $neutron_server = true,
$use_syslog = false, $use_syslog = false,
$syslog_log_facility = 'LOCAL4', $syslog_log_facility = 'LOCAL4',
$syslog_log_level = 'WARNING', $syslog_log_level = 'WARNING',
@ -22,8 +22,8 @@ class openstack::quantum_router (
# $private_interface = "br-mgmt", # $private_interface = "br-mgmt",
# $create_networks = true, # $create_networks = true,
) { ) {
class { '::quantum': class { '::neutron':
quantum_config => $quantum_config, neutron_config => $neutron_config,
verbose => $verbose, verbose => $verbose,
debug => $debug, debug => $debug,
use_syslog => $use_syslog, use_syslog => $use_syslog,
@ -31,43 +31,36 @@ class openstack::quantum_router (
syslog_log_level => $syslog_log_level, syslog_log_level => $syslog_log_level,
server_ha_mode => $ha_mode, server_ha_mode => $ha_mode,
} }
#todo: add quantum::server here (into IF) #todo: add neutron::server here (into IF)
class { 'quantum::plugins::ovs': class { '::neutron::plugins::ovs':
quantum_config => $quantum_config, neutron_config => $neutron_config,
#bridge_mappings => ["physnet1:br-ex","physnet2:br-prv"], #bridge_mappings => ["physnet1:br-ex","physnet2:br-prv"],
} }
if $quantum_network_node { if $neutron_network_node {
class { 'quantum::agents::ovs': class { '::neutron::agents::ovs':
#bridge_uplinks => ["br-prv:${private_interface}"],
#bridge_mappings => ['physnet2:br-prv'],
#verbose => $verbose,
#debug => $debug,
service_provider => $service_provider, service_provider => $service_provider,
quantum_config => $quantum_config, } neutron_config => $neutron_config, }
# Quantum metadata agent starts only under pacemaker # neutron metadata agent starts only under pacemaker
# and co-located with l3-agent # and co-located with l3-agent
class {'quantum::agents::metadata': class {'::neutron::agents::metadata':
verbose => $verbose, verbose => $verbose,
debug => $debug, debug => $debug,
service_provider => $service_provider, service_provider => $service_provider,
quantum_config => $quantum_config, neutron_config => $neutron_config,
#metadata_ip => $nova_api_vip,
} }
class { 'quantum::agents::dhcp': class { '::neutron::agents::dhcp':
quantum_config => $quantum_config, neutron_config => $neutron_config,
verbose => $verbose, verbose => $verbose,
debug => $debug, debug => $debug,
service_provider => $service_provider, service_provider => $service_provider,
} }
class { 'quantum::agents::l3': class { '::neutron::agents::l3':
#enabled => $quantum_l3_enable, neutron_config => $neutron_config,
quantum_config => $quantum_config, verbose => $verbose,
verbose => $verbose, debug => $debug,
debug => $debug, service_provider => $service_provider,
service_provider => $service_provider,
#create_networks => $create_networks,
#segment_range => $segment_range,
} }
} }

View File

@ -274,14 +274,15 @@ class openstack::nova::controller (
} }
} else { } else {
# Set up Quantum # Set up Quantum
#todo: move to ::openstack:controller and ::openstack:neutron_router
class { 'quantum::server': #todo: from HERE to <<<
quantum_config => $quantum_config, class { '::neutron::server':
neutron_config => $quantum_config,
primary_controller => $primary_controller primary_controller => $primary_controller
} }
if $quantum and !$quantum_network_node { if $quantum and !$quantum_network_node {
class { '::quantum': class { '::neutron':
quantum_config => $quantum_config, neutron_config => $quantum_config,
verbose => $verbose, verbose => $verbose,
debug => $debug, debug => $debug,
use_syslog => $use_syslog, use_syslog => $use_syslog,
@ -290,14 +291,15 @@ class openstack::nova::controller (
server_ha_mode => $ha_mode, server_ha_mode => $ha_mode,
} }
} }
class { 'nova::network::quantum': #todo: <<<
quantum_config => $quantum_config, class { '::nova::network::neutron':
quantum_connection_host => $service_endpoint neutron_config => $quantum_config,
neutron_connection_host => $service_endpoint
} }
} }
# Configure nova-api # Configure nova-api
class { 'nova::api': class { '::nova::api':
enabled => $enabled, enabled => $enabled,
admin_password => $nova_user_password, admin_password => $nova_user_password,
auth_host => $keystone_host, auth_host => $keystone_host,
@ -318,7 +320,7 @@ class openstack::nova::controller (
# ensure_package => $ensure_package, # ensure_package => $ensure_package,
#} #}
class {'nova::conductor': class {'::nova::conductor':
enabled => $enabled, enabled => $enabled,
ensure_package => $ensure_package, ensure_package => $ensure_package,
} }
@ -337,7 +339,7 @@ class openstack::nova::controller (
ensure_package => $ensure_package ensure_package => $ensure_package
} }
class { 'nova::consoleauth': class { '::nova::consoleauth':
enabled => $enabled, enabled => $enabled,
ensure_package => $ensure_package, ensure_package => $ensure_package,
} }

View File

@ -1,6 +1,6 @@
"/var/log/*-all.log" "/var/log/corosync.log" "/var/log/remote/*/*log" "/var/log/*-all.log" "/var/log/corosync.log" "/var/log/remote/*/*log"
"/var/log/kern.log" "/var/log/debug" "/var/log/syslog" "/var/log/kern.log" "/var/log/debug" "/var/log/syslog"
"/var/log/dashboard.log" "/var/log/ha.log" "/var/log/quantum/*.log" "/var/log/dashboard.log" "/var/log/ha.log" "/var/log/neutron/*.log"
"/var/log/nova/*.log" "/var/log/keystone/*.log" "/var/log/glance/*.log" "/var/log/nova/*.log" "/var/log/keystone/*.log" "/var/log/glance/*.log"
"/var/log/cinder/*.log" "/var/log/cinder/*.log"
# This file is used for hourly log rotations, use (min)size options here # This file is used for hourly log rotations, use (min)size options here

View File

@ -4,8 +4,6 @@ class osnailyfacter::cluster_simple {
$novanetwork_params = {} $novanetwork_params = {}
$quantum_config = sanitize_quantum_config($::fuel_settings, 'quantum_settings') $quantum_config = sanitize_quantum_config($::fuel_settings, 'quantum_settings')
} else { } else {
$quantum_hash = {}
$quantum_params = {}
$quantum_config = {} $quantum_config = {}
$novanetwork_params = $::fuel_settings['novanetwork_parameters'] $novanetwork_params = $::fuel_settings['novanetwork_parameters']
$network_config = { $network_config = {

View File

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>quantum</name>
<comment></comment>
<projects>
<project>corosync</project>
<project>haproxy</project>
<project>keystone</project>
<project>l23network</project>
<project>nova</project>
<project>openstack</project>
<project>selinux</project>
<project>stdlib</project>
<project>sysctl</project>
<project>vswitch</project>
</projects>
<buildSpec>
<buildCommand>
<name>org.cloudsmith.geppetto.pp.dsl.ui.modulefileBuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.xtext.ui.shared.xtextBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.cloudsmith.geppetto.pp.dsl.ui.puppetNature</nature>
<nature>org.eclipse.xtext.ui.shared.xtextNature</nature>
</natures>
</projectDescription>

View File

@ -1,4 +0,0 @@
Puppet module for OpenStack Quantum
===================================
(Work under development)

View File

@ -1,24 +0,0 @@
Puppet::Type.type(:ini_setting)#.providers
Puppet::Type.type(:quantum_plugin_ovs).provide(
:ini_setting,
:parent => Puppet::Type.type(:ini_setting).provider(:ruby)
) do
def section
resource[:name].split('/', 2).first
end
def setting
resource[:name].split('/', 2).last
end
def separator
'='
end
def file_path
'/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini'
end
end

View File

@ -1,226 +0,0 @@
#
class quantum::agents::dhcp (
$quantum_config = {},
$verbose = 'False',
$debug = 'False',
$interface_driver = 'quantum.agent.linux.interface.OVSInterfaceDriver',
$dhcp_driver = 'quantum.agent.linux.dhcp.Dnsmasq',
$dhcp_agent_manager='quantum.agent.dhcp_agent.DhcpAgentWithStateReport',
$state_path = '/var/lib/quantum',
$service_provider = 'generic',
) {
include 'quantum::params'
if $::operatingsystem == 'Ubuntu' {
if $service_provider == 'pacemaker' {
file { "/etc/init/quantum-dhcp-agent.override":
replace => "no",
ensure => "present",
content => "manual",
mode => 644,
before => Package['quantum-dhcp-agent'],
}
}
}
if $::quantum::params::dhcp_agent_package {
Package['quantum'] -> Package['quantum-dhcp-agent']
$dhcp_agent_package = 'quantum-dhcp-agent'
package { 'quantum-dhcp-agent':
name => $::quantum::params::dhcp_agent_package
}
} else {
$dhcp_agent_package = $::quantum::params::package_name
}
include 'quantum::waist_setup'
anchor {'quantum-dhcp-agent': }
#Anchor['quantum-metadata-agent-done'] -> Anchor['quantum-dhcp-agent']
Service<| title=='quantum-server' |> -> Anchor['quantum-dhcp-agent']
case $dhcp_driver {
/\.Dnsmasq/ : {
package { $::quantum::params::dnsmasq_packages: ensure => present, }
Package[$::quantum::params::dnsmasq_packages] -> Package[$dhcp_agent_package]
$dhcp_server_packages = $::quantum::params::dnsmasq_packages
}
default : {
fail("${dhcp_driver} is not supported as of now")
}
}
Package[$dhcp_agent_package] -> Quantum_dhcp_agent_config <| |>
Package[$dhcp_agent_package] -> Quantum_config <| |>
quantum_dhcp_agent_config {
'DEFAULT/debug': value => $debug;
'DEFAULT/verbose': value => $verbose;
'DEFAULT/state_path': value => $state_path;
'DEFAULT/interface_driver': value => $interface_driver;
'DEFAULT/dhcp_driver': value => $dhcp_driver;
'DEFAULT/dhcp_agent_manager':value => $dhcp_agent_manager;
'DEFAULT/auth_url': value => $quantum_config['keystone']['auth_url'];
'DEFAULT/admin_user': value => $quantum_config['keystone']['admin_user'];
'DEFAULT/admin_password': value => $quantum_config['keystone']['admin_password'];
'DEFAULT/admin_tenant_name': value => $quantum_config['keystone']['admin_tenant_name'];
'DEFAULT/resync_interval': value => $quantum_config['L3']['resync_interval'];
'DEFAULT/use_namespaces': value => $quantum_config['L3']['use_namespaces'];
'DEFAULT/root_helper': value => $quantum_config['root_helper'];
'DEFAULT/signing_dir': value => $quantum_config['keystone']['signing_dir'];
'DEFAULT/enable_isolated_metadata': value => $quantum_config['L3']['dhcp_agent']['enable_isolated_metadata'];
'DEFAULT/enable_metadata_network': value => $quantum_config['L3']['dhcp_agent']['enable_metadata_network'];
}
Service <| title == 'quantum-server' |> -> Service['quantum-dhcp-service']
if $service_provider == 'pacemaker' {
Service <| title == 'quantum-server' |> -> Cs_shadow['dhcp']
Quantum_dhcp_agent_config <| |> -> Cs_shadow['dhcp']
# OCF script for pacemaker
# and his dependences
file {'quantum-dhcp-agent-ocf':
path=>'/usr/lib/ocf/resource.d/mirantis/quantum-agent-dhcp',
mode => 755,
owner => root,
group => root,
source => "puppet:///modules/quantum/ocf/quantum-agent-dhcp",
}
Package['pacemaker'] -> File['quantum-dhcp-agent-ocf']
File['quantum-dhcp-agent-ocf'] -> Cs_resource["p_${::quantum::params::dhcp_agent_service}"]
File['q-agent-cleanup.py'] -> Cs_resource["p_${::quantum::params::dhcp_agent_service}"]
File<| title=='quantum-logging.conf' |> ->
cs_resource { "p_${::quantum::params::dhcp_agent_service}":
ensure => present,
cib => 'dhcp',
primitive_class => 'ocf',
provided_by => 'mirantis',
primitive_type => 'quantum-agent-dhcp',
#require => File['quantum-agent-dhcp'],
parameters => {
'os_auth_url' => $quantum_config['keystone']['auth_url'],
'tenant' => $quantum_config['keystone']['admin_tenant_name'],
'username' => $quantum_config['keystone']['admin_user'],
'password' => $quantum_config['keystone']['admin_password'],
}
,
operations => {
'monitor' => {
'interval' => '20',
'timeout' => '30'
}
,
'start' => {
'timeout' => '360'
}
,
'stop' => {
'timeout' => '360'
}
}
,
}
Cs_commit <| title == 'ovs' |> -> Cs_shadow <| title == 'dhcp' |>
Cs_commit <| title == 'quantum-metadata-agent' |> -> Cs_shadow <| title == 'dhcp' |>
::corosync::cleanup { "p_${::quantum::params::dhcp_agent_service}": }
Cs_commit['dhcp'] -> ::Corosync::Cleanup["p_${::quantum::params::dhcp_agent_service}"]
Cs_commit['dhcp'] ~> ::Corosync::Cleanup["p_${::quantum::params::dhcp_agent_service}"]
::Corosync::Cleanup["p_${::quantum::params::dhcp_agent_service}"] -> Service['quantum-dhcp-service']
Cs_resource["p_${::quantum::params::dhcp_agent_service}"] -> Cs_colocation['dhcp-with-ovs']
Cs_resource["p_${::quantum::params::dhcp_agent_service}"] -> Cs_order['dhcp-after-ovs']
Cs_resource["p_${::quantum::params::dhcp_agent_service}"] -> Cs_colocation['dhcp-with-metadata']
Cs_resource["p_${::quantum::params::dhcp_agent_service}"] -> Cs_order['dhcp-after-metadata']
cs_shadow { 'dhcp': cib => 'dhcp' }
cs_commit { 'dhcp': cib => 'dhcp' }
cs_colocation { 'dhcp-with-ovs':
ensure => present,
cib => 'dhcp',
primitives => [
"p_${::quantum::params::dhcp_agent_service}",
"clone_p_${::quantum::params::ovs_agent_service}"
],
score => 'INFINITY',
} ->
cs_order { 'dhcp-after-ovs':
ensure => present,
cib => 'dhcp',
first => "clone_p_${::quantum::params::ovs_agent_service}",
second => "p_${::quantum::params::dhcp_agent_service}",
score => 'INFINITY',
} -> Service['quantum-dhcp-service']
cs_colocation { 'dhcp-with-metadata':
ensure => present,
cib => 'dhcp',
primitives => [
"p_${::quantum::params::dhcp_agent_service}",
"clone_p_quantum-metadata-agent"
],
score => 'INFINITY',
} ->
cs_order { 'dhcp-after-metadata':
ensure => present,
cib => 'dhcp',
first => "clone_p_quantum-metadata-agent",
second => "p_${::quantum::params::dhcp_agent_service}",
score => 'INFINITY',
} -> Service['quantum-dhcp-service']
Service['quantum-dhcp-service_stopped'] -> Cs_resource["p_${::quantum::params::dhcp_agent_service}"]
service { 'quantum-dhcp-service_stopped':
name => "${::quantum::params::dhcp_agent_service}",
enable => false,
ensure => stopped,
hasstatus => true,
hasrestart => true,
provider => $::quantum::params::service_provider,
require => [Package[$dhcp_agent_package], Class['quantum']],
}
Quantum::Network::Provider_router<||> -> Service<| title=='quantum-dhcp-service' |>
service { 'quantum-dhcp-service':
name => "p_${::quantum::params::dhcp_agent_service}",
enable => true,
ensure => running,
hasstatus => true,
hasrestart => false,
provider => $service_provider,
require => [Package[$dhcp_agent_package], Class['quantum'], Service['quantum-ovs-agent']],
}
} else {
Quantum_config <| |> ~> Service['quantum-dhcp-service']
Quantum_dhcp_agent_config <| |> ~> Service['quantum-dhcp-service']
File<| title=='quantum-logging.conf' |> ->
service { 'quantum-dhcp-service':
name => $::quantum::params::dhcp_agent_service,
enable => true,
ensure => running,
hasstatus => true,
hasrestart => true,
provider => $::quantum::params::service_provider,
require => [Package[$dhcp_agent_package], Class['quantum'], Service['quantum-ovs-agent']],
}
}
Class[quantum::waistline] -> Service[quantum-dhcp-service]
Anchor['quantum-dhcp-agent'] ->
Quantum_dhcp_agent_config <| |> ->
Cs_resource<| title=="p_${::quantum::params::dhcp_agent_service}" |> ->
Service['quantum-dhcp-service'] ->
Anchor['quantum-dhcp-agent-done']
anchor {'quantum-dhcp-agent-done': }
}
# vim: set ts=2 sw=2 et :

View File

@ -1,244 +0,0 @@
#
class quantum::agents::l3 (
$quantum_config = {},
$verbose = 'False',
$debug = 'False',
$create_networks = true, # ?????????????????
$interface_driver = 'quantum.agent.linux.interface.OVSInterfaceDriver',
$service_provider = 'generic'
) {
include 'quantum::params'
anchor {'quantum-l3': }
Service<| title=='quantum-server' |> -> Anchor['quantum-l3']
if $::operatingsystem == 'Ubuntu' {
if $service_provider == 'pacemaker' {
file { "/etc/init/quantum-l3-agent.override":
replace => "no",
ensure => "present",
content => "manual",
mode => 644,
before => Package['quantum-l3'],
}
}
}
if $::quantum::params::l3_agent_package {
$l3_agent_package = 'quantum-l3'
package { 'quantum-l3':
name => $::quantum::params::l3_agent_package,
ensure => present,
}
# do not move it to outside this IF
Package['quantum-l3'] -> Quantum_l3_agent_config <| |>
} else {
$l3_agent_package = $::quantum::params::package_name
}
include 'quantum::waist_setup'
Quantum_config <| |> -> Quantum_l3_agent_config <| |>
Quantum_l3_agent_config <| |> -> Service['quantum-l3']
# Quantum_l3_agent_config <| |> -> Quantum_router <| |>
# Quantum_l3_agent_config <| |> -> Quantum_net <| |>
# Quantum_l3_agent_config <| |> -> Quantum_subnet <| |>
quantum_l3_agent_config {
'DEFAULT/debug': value => $debug;
'DEFAULT/verbose': value => $verbose;
'DEFAULT/root_helper': value => $quantum_config['root_helper'];
'DEFAULT/auth_url': value => $quantum_config['keystone']['auth_url'];
'DEFAULT/admin_user': value => $quantum_config['keystone']['admin_user'];
'DEFAULT/admin_password': value => $quantum_config['keystone']['admin_password'];
'DEFAULT/admin_tenant_name': value => $quantum_config['keystone']['admin_tenant_name'];
'DEFAULT/metadata_ip': value => $quantum_config['metadata']['metadata_ip'];
'DEFAULT/metadata_port': value => $quantum_config['metadata']['metadata_port'];
'DEFAULT/use_namespaces': value => $quantum_config['L3']['use_namespaces'];
'DEFAULT/send_arp_for_ha': value => $quantum_config['L3']['send_arp_for_ha'];
'DEFAULT/periodic_interval': value => $quantum_config['L3']['resync_interval'];
'DEFAULT/periodic_fuzzy_delay': value => $quantum_config['L3']['resync_fuzzy_delay'];
'DEFAULT/external_network_bridge': value => $quantum_config['L3']['public_bridge'];
}
quantum_l3_agent_config{'DEFAULT/router_id': ensure => absent }
Anchor['quantum-l3'] ->
Quantum_l3_agent_config <| |> ->
Exec<| title=='setup_router_id' |> ->
#Exec<| title=='update_default_route_metric' |> ->
Service<| title=='quantum-l3' |> ->
#Exec<| title=='settle-down-default-route' |> ->
Anchor['quantum-l3-done']
# rootwrap error with L3 agent
# https://bugs.launchpad.net/quantum/+bug/1069966
$iptables_manager = "/usr/lib/${::quantum::params::python_path}/quantum/agent/linux/iptables_manager.py"
exec { 'patch-iptables-manager':
command => "sed -i '272 s|/sbin/||' ${iptables_manager}",
onlyif => "sed -n '272p' ${iptables_manager} | grep -q '/sbin/'",
path => ['/bin', '/sbin', '/usr/bin', '/usr/sbin'],
require => [Anchor['quantum-l3'], Package[$l3_agent_package]],
}
Service<| title == 'quantum-server' |> -> Service['quantum-l3']
if $service_provider == 'pacemaker' {
Service<| title == 'quantum-server' |> -> Cs_shadow['l3']
Quantum_l3_agent_config <||> -> Cs_shadow['l3']
# OCF script for pacemaker
# and his dependences
file {'quantum-l3-agent-ocf':
path=>'/usr/lib/ocf/resource.d/mirantis/quantum-agent-l3',
mode => 755,
owner => root,
group => root,
source => "puppet:///modules/quantum/ocf/quantum-agent-l3",
}
Package['pacemaker'] -> File['quantum-l3-agent-ocf']
File['quantum-l3-agent-ocf'] -> Cs_resource["p_${::quantum::params::l3_agent_service}"]
File['q-agent-cleanup.py'] -> Cs_resource["p_${::quantum::params::l3_agent_service}"]
cs_resource { "p_${::quantum::params::l3_agent_service}":
ensure => present,
cib => 'l3',
primitive_class => 'ocf',
provided_by => 'mirantis',
primitive_type => 'quantum-agent-l3',
#require => File['quantum-l3-agent'],
parameters => {
'debug' => $debug,
'syslog' => $::use_syslog,
'os_auth_url' => $quantum_config['keystone']['auth_url'],
'tenant' => $quantum_config['keystone']['admin_tenant_name'],
'username' => $quantum_config['keystone']['admin_user'],
'password' => $quantum_config['keystone']['admin_password'],
},
operations => {
'monitor' => {
'interval' => '20',
'timeout' => '30'
}
,
'start' => {
'timeout' => '360'
}
,
'stop' => {
'timeout' => '360'
}
},
}
File<| title=='quantum-logging.conf' |> -> Cs_resource["p_${::quantum::params::l3_agent_service}"]
Exec<| title=='setup_router_id' |> -> Cs_resource["p_${::quantum::params::l3_agent_service}"]
cs_shadow { 'l3': cib => 'l3' }
cs_commit { 'l3': cib => 'l3' }
###
# Do not remember to be carefylly with Cs_shadow and Cs_commit orders.
# at one time onli one Shadow can be without commit
Cs_commit <| title == 'dhcp' |> -> Cs_shadow <| title == 'l3' |>
Cs_commit <| title == 'ovs' |> -> Cs_shadow <| title == 'l3' |>
Cs_commit <| title == 'quantum-metadata-agent' |> -> Cs_shadow <| title == 'l3' |>
::corosync::cleanup{"p_${::quantum::params::l3_agent_service}": }
Cs_commit['l3'] -> ::Corosync::Cleanup["p_${::quantum::params::l3_agent_service}"]
Cs_commit['l3'] ~> ::Corosync::Cleanup["p_${::quantum::params::l3_agent_service}"]
::Corosync::Cleanup["p_${::quantum::params::l3_agent_service}"] -> Service['quantum-l3']
Cs_resource["p_${::quantum::params::l3_agent_service}"] -> Cs_colocation['l3-with-ovs']
Cs_resource["p_${::quantum::params::l3_agent_service}"] -> Cs_order['l3-after-ovs']
Cs_resource["p_${::quantum::params::l3_agent_service}"] -> Cs_colocation['l3-with-metadata']
Cs_resource["p_${::quantum::params::l3_agent_service}"] -> Cs_order['l3-after-metadata']
cs_colocation { 'l3-with-ovs':
ensure => present,
cib => 'l3',
primitives => ["p_${::quantum::params::l3_agent_service}", "clone_p_${::quantum::params::ovs_agent_service}"],
score => 'INFINITY',
} ->
cs_order { 'l3-after-ovs':
ensure => present,
cib => 'l3',
first => "clone_p_${::quantum::params::ovs_agent_service}",
second => "p_${::quantum::params::l3_agent_service}",
score => 'INFINITY',
} -> Service['quantum-l3']
cs_colocation { 'l3-with-metadata':
ensure => present,
cib => 'l3',
primitives => [
"p_${::quantum::params::l3_agent_service}",
"clone_p_quantum-metadata-agent"
],
score => 'INFINITY',
} ->
cs_order { 'l3-after-metadata':
ensure => present,
cib => "l3",
first => "clone_p_quantum-metadata-agent",
second => "p_${::quantum::params::l3_agent_service}",
score => 'INFINITY',
} -> Service['quantum-l3']
# start DHCP and L3 agents on different controllers if it's possible
cs_colocation { 'dhcp-without-l3':
ensure => present,
cib => 'l3',
score => '-100',
primitives => [
"p_${::quantum::params::dhcp_agent_service}",
"p_${::quantum::params::l3_agent_service}"
],
}
# Ensure service is stopped and disabled by upstart/init/etc.
Anchor['quantum-l3'] ->
Service['quantum-l3-init_stopped'] ->
Cs_resource["p_${::quantum::params::l3_agent_service}"] ->
Service['quantum-l3'] ->
Anchor['quantum-l3-done']
service { 'quantum-l3-init_stopped':
name => "${::quantum::params::l3_agent_service}",
enable => false,
ensure => stopped,
hasstatus => true,
hasrestart => true,
provider => $::quantum::params::service_provider,
}
service { 'quantum-l3':
name => "p_${::quantum::params::l3_agent_service}",
enable => true,
ensure => running,
hasstatus => true,
hasrestart => false,
provider => "pacemaker",
}
} else {
Quantum_config <| |> ~> Service['quantum-l3']
Quantum_l3_agent_config <| |> ~> Service['quantum-l3']
File<| title=='quantum-logging.conf' |> ->
service { 'quantum-l3':
name => $::quantum::params::l3_agent_service,
enable => true,
ensure => running,
hasstatus => true,
hasrestart => true,
provider => $::quantum::params::service_provider,
}
}
anchor {'quantum-l3-cellar': }
Anchor['quantum-l3-cellar'] -> Anchor['quantum-l3-done']
anchor {'quantum-l3-done': }
Anchor['quantum-l3'] -> Anchor['quantum-l3-done']
}
# vim: set ts=2 sw=2 et :

View File

@ -1,210 +0,0 @@
class quantum::agents::ovs (
$quantum_config = {},
$service_provider = 'generic'
#$bridge_uplinks = ['br-ex:eth2'],
#$bridge_mappings = ['physnet1:br-ex'],
#$integration_bridge = 'br-int',
#$enable_tunneling = true,
) {
include 'quantum::params'
include 'quantum::waist_setup'
if defined(Anchor['quantum-plugin-ovs-done']) {
# install quantum-ovs-agent at the same host where
# quantum-server + quantum-ovs-plugin
Anchor['quantum-plugin-ovs-done'] -> Anchor['quantum-ovs-agent']
}
if defined(Anchor['quantum-server-done']) {
Anchor['quantum-server-done'] -> Anchor['quantum-ovs-agent']
}
anchor {'quantum-ovs-agent': }
if $::operatingsystem == 'Ubuntu' {
if $service_provider == 'pacemaker' {
file { "/etc/init/quantum-plugin-openvswitch-agent.override":
replace => "no",
ensure => "present",
content => "manual",
mode => 644,
before => Package['quantum-plugin-ovs-agent'],
}
}
}
if $::quantum::params::ovs_agent_package {
Package['quantum'] -> Package['quantum-plugin-ovs-agent']
$ovs_agent_package = 'quantum-plugin-ovs-agent'
package { 'quantum-plugin-ovs-agent':
name => $::quantum::params::ovs_agent_package,
}
} else {
$ovs_agent_package = $::quantum::params::ovs_server_package
}
if !defined(Anchor['quantum-server-done']) {
# if defined -- this depends already defined
Package[$ovs_agent_package] -> Quantum_plugin_ovs <| |>
}
l23network::l2::bridge { $quantum_config['L2']['integration_bridge']:
external_ids => "bridge-id=${quantum_config['L2']['integration_bridge']}",
ensure => present,
skip_existing => true,
}
if $quantum_config['L2']['enable_tunneling'] {
L23network::L2::Bridge<| |> ->
Anchor['quantum-ovs-agent-done']
l23network::l2::bridge { $quantum_config['L2']['tunnel_bridge']:
external_ids => "bridge-id=${quantum_config['L2']['tunnel_bridge']}",
ensure => present,
skip_existing => true,
} ->
Anchor['quantum-ovs-agent-done']
quantum_plugin_ovs { 'OVS/local_ip': value => $quantum_config['L2']['local_ip']; }
} else {
L23network::L2::Bridge[$quantum_config['L2']['integration_bridge']] ->
Anchor['quantum-ovs-agent-done']
quantum::agents::utils::bridges { $quantum_config['L2']['phys_bridges']: } ->
Anchor['quantum-ovs-agent-done']
}
#Quantum_config <| |> ~> Service['quantum-ovs-agent']
#Quantum_plugin_ovs <| |> ~> Service['quantum-ovs-agent']
#Service <| title == 'quantum-server' |> -> Service['quantum-ovs-agent']
if $service_provider == 'pacemaker' {
Quantum_config <| |> -> Cs_shadow['ovs']
Quantum_plugin_ovs <| |> -> Cs_shadow['ovs']
L23network::L2::Bridge <| |> -> Cs_shadow['ovs']
cs_shadow { 'ovs': cib => 'ovs' }
cs_commit { 'ovs': cib => 'ovs' }
::corosync::cleanup { "p_${::quantum::params::ovs_agent_service}": }
Cs_commit['ovs'] -> ::Corosync::Cleanup["p_${::quantum::params::ovs_agent_service}"]
Cs_commit['ovs'] ~> ::Corosync::Cleanup["p_${::quantum::params::ovs_agent_service}"]
::Corosync::Cleanup["p_${::quantum::params::ovs_agent_service}"] -> Service['quantum-ovs-agent']
File<| title=='quantum-logging.conf' |> ->
cs_resource { "p_${::quantum::params::ovs_agent_service}":
ensure => present,
cib => 'ovs',
primitive_class => 'ocf',
provided_by => 'pacemaker',
primitive_type => 'quantum-agent-ovs',
require => File['quantum-ovs-agent'] ,
multistate_hash => {
'type' => 'clone',
},
ms_metadata => {
'interleave' => 'true',
},
parameters => {
},
operations => {
'monitor' => {
'interval' => '20',
'timeout' => '30'
},
'start' => {
'timeout' => '480'
},
'stop' => {
'timeout' => '480'
}
},
}
case $::osfamily {
/(?i)redhat/: {
$started_status = "is running"
}
/(?i)debian/: {
$started_status = "start/running"
}
default: { fail("The $::osfamily operating system is not supported.") }
}
service { 'quantum-ovs-agent_stopped':
name => $::quantum::params::ovs_agent_service,
enable => false,
ensure => stopped,
hasstatus => false,
hasrestart => false
}
if $::osfamily =~ /(?i)debian/ {
exec { 'quantum-ovs-agent_stopped':
#todo: rewrite as script, that returns zero or wait, when it can return zero
name => "bash -c \"service ${::quantum::params::ovs_agent_service} stop || ( kill `pgrep -f quantum-openvswitch-agent` || : )\"",
onlyif => "service ${::quantum::params::ovs_agent_service} status | grep \'${started_status}\'",
path => ['/usr/bin', '/usr/sbin', '/bin', '/sbin'],
returns => [0,""]
}
}
L23network::L2::Bridge<| |> ->
Package[$ovs_agent_package] ->
Service['quantum-ovs-agent_stopped'] ->
Exec<| title=='quantum-ovs-agent_stopped' |> ->
Cs_resource["p_${::quantum::params::ovs_agent_service}"] ->
Service['quantum-ovs-agent']
service { 'quantum-ovs-agent':
name => "p_${::quantum::params::ovs_agent_service}",
enable => true,
ensure => running,
hasstatus => true,
hasrestart => false,
provider => $service_provider,
}
} else {
# NON-HA mode
service { 'quantum-ovs-agent':
name => $::quantum::params::ovs_agent_service,
enable => true,
ensure => running,
hasstatus => true,
hasrestart => true,
provider => $::quantum::params::service_provider,
}
Quantum_config<||> ~> Service['quantum-ovs-agent']
Quantum_plugin_ovs<||> ~> Service['quantum-ovs-agent']
}
Quantum_config<||> -> Service['quantum-ovs-agent']
Quantum_plugin_ovs<||> -> Service['quantum-ovs-agent']
Class[quantum::waistline] -> Service['quantum-ovs-agent']
#todo: This service must be disabled if Quantum-ovs-agent managed by pacemaker
if $::osfamily == 'redhat' {
service { 'quantum-ovs-cleanup':
name => 'quantum-ovs-cleanup',
enable => true,
ensure => stopped,# !!! Warning !!!
hasstatus => false, # !!! 'stopped' is not mistake
hasrestart => false, # !!! cleanup is simple script running once at OS boot
}
Service['quantum-ovs-agent'] -> # it's not mistate!
Service['quantum-ovs-cleanup'] -> # cleanup service after agent.
Anchor['quantum-ovs-agent-done']
}
Anchor['quantum-ovs-agent'] ->
Service['quantum-ovs-agent'] ->
Anchor['quantum-ovs-agent-done']
anchor{'quantum-ovs-agent-done': }
Anchor['quantum-ovs-agent-done'] -> Anchor<| title=='quantum-l3' |>
Anchor['quantum-ovs-agent-done'] -> Anchor<| title=='quantum-dhcp-agent' |>
}
# vim: set ts=2 sw=2 et :

View File

@ -1,11 +0,0 @@
class quantum::client (
$package_ensure = present
) {
include 'quantum::params'
package { 'python-quantumclient':
name => $::quantum::params::client_package_name,
ensure => $package_ensure
}
}
# vim: set ts=2 sw=2 et :

View File

@ -1,39 +0,0 @@
class quantum::keystone::auth (
quantum_config = {},
$configure_endpoint = true,
$service_type = 'network',
$public_address = '127.0.0.1',
$admin_address = '127.0.0.1',
$internal_address = '127.0.0.1',
) {
keystone_user { $quantum_config['keystone']['admin_user']:
ensure => present,
password => $quantum_config['keystone']['admin_password'],
email => $quantum_config['keystone']['admin_email'],
tenant => $quantum_config['keystone']['admin_tenant_name'],
}
keystone_user_role { "${quantum_config['keystone']['admin_user']}@services":
ensure => present,
roles => 'admin',
}
Keystone_user_role["${quantum_config['keystone']['admin_user']}@services"] ~> Service <| name == 'quantum-server' |>
keystone_service { $quantum_config['keystone']['admin_user']:
ensure => present,
type => $service_type,
description => "Quantum Networking Service",
}
if $configure_endpoint {
# keystone_endpoint { "${region}/$quantum_config['keystone']['admin_user']":
keystone_endpoint { $quantum_config['keystone']['admin_user']:
region => $quantum_config['keystone']['auth_region'],
ensure => present,
public_url => "http://${public_address}:${quantum_config['server']['bind_port']}",
admin_url => "http://${admin_address}:${$quantum_config['server']['bind_port']}",
internal_url => "http://${internal_address}:${$quantum_config['server']['bind_port']}",
}
}
}

View File

@ -1,17 +0,0 @@
class quantum::network::predefined_netwoks (
$quantum_config = {},
) {
create_predefined_networks_and_routers($quantum_config)
Keystone_user_role<| title=="$auth_user@$auth_tenant"|> -> Quantum_net<| |>
Service <| title == 'keystone' |> -> Quantum_net <| |>
Anchor['quantum-plugin-ovs-done'] -> Quantum_net <| |>
quantum_floatingip_pool{'admin':
pool_size => get_floatingip_pool_size_for_admin($quantum_config)
}
Quantum_net<||> -> Quantum_floatingip_pool<||>
Quantum_subnet<||> -> Quantum_floatingip_pool<||>
Quantum_router<||> -> Quantum_floatingip_pool<||>
}
# vim: set ts=2 sw=2 et :

View File

@ -1,26 +0,0 @@
#
# Use Case: Provider Router with Private Networks
#
define quantum::network::provider_router (
$quantum_config = {},
$router_subnets = undef,
$router_extnet = undef
) {
Quantum_subnet <| |> -> Quantum_router <| |>
Service <| title == 'keystone' |> -> Quantum_router <| |>
# create router
quantum_router { $title:
#quantum_config => $quantum_config,
ensure => present,
quantum_config=> $quantum_config,
int_subnets => $router_subnets,
ext_net => $router_extnet,
tenant => $quantum_config['keystone']['admin_tenant_name'],
auth_url => $quantum_config['keystone']['auth_url'],
auth_user => $quantum_config['keystone']['admin_user'],
auth_password => $quantum_config['keystone']['admin_password'],
auth_tenant => $quantum_config['keystone']['admin_tenant_name'],
}
}
# vim: set ts=2 sw=2 et :

View File

@ -1,83 +0,0 @@
class quantum::params {
case $::osfamily {
'Debian', 'Ubuntu': {
$package_name = 'quantum-common'
$server_package = 'quantum-server'
$server_service = 'quantum-server'
$ovs_agent_package = 'quantum-plugin-openvswitch-agent'
$ovs_agent_service = 'quantum-plugin-openvswitch-agent'
$ovs_server_package = 'quantum-plugin-openvswitch'
$ovs_cleanup_service = false
$dhcp_agent_package = 'quantum-dhcp-agent'
$dhcp_agent_service = 'quantum-dhcp-agent'
$dnsmasq_packages = ['dnsmasq-base', 'dnsmasq-utils']
$isc_dhcp_packages = ['isc-dhcp-server']
$l3_agent_package = 'quantum-l3-agent'
$l3_agent_service = 'quantum-l3-agent'
$linuxbridge_agent_package = 'quantum-plugin-linuxbridge-agent'
$linuxbridge_agent_service = 'quantum-plugin-linuxbridge-agent'
$linuxbridge_server_package = 'quantum-plugin-linuxbridge'
$linuxbridge_config_file = '/etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini'
$metadata_agent_package = 'quantum-metadata-agent'
$metadata_agent_service = 'quantum-metadata-agent'
$cliff_package = 'python-cliff'
$kernel_headers = "linux-headers-${::kernelrelease}"
$python_path = 'python2.7/dist-packages'
$cidr_package = 'ipcalc'
$vlan_package = 'vlan'
case $::operatingsystem {
'Debian': {
$service_provider = undef
}
default: {
$service_provider = 'upstart'
}
}
}
'RedHat': {
$package_name = 'openstack-quantum'
$server_package = false
$server_service = 'quantum-server'
$ovs_agent_package = false
$ovs_agent_service = 'quantum-openvswitch-agent'
$ovs_server_package = 'openstack-quantum-openvswitch'
$dhcp_agent_package = false
$dhcp_agent_service = 'quantum-dhcp-agent'
$dnsmasq_packages = ['dnsmasq', 'dnsmasq-utils']
$isc_dhcp_packages = ['dhcp']
$l3_agent_package = false
$l3_agent_service = 'quantum-l3-agent'
$cliff_package = 'python-cliff'
$kernel_headers = "linux-headers-${::kernelrelease}"
$python_path = 'python2.6/site-packages'
$cidr_package = "whatmask"
$vlan_package = 'vconfig'
$service_provider = undef
$linuxbridge_agent_package = 'openstack-quantum-linuxbridge'
$linuxbridge_agent_service = 'quantum-linuxbridge-agent'
$linuxbridge_server_package = 'openstack-quantum-linuxbridge'
$linuxbridge_config_file = '/etc/quantum/plugins/linuxbridge/linuxbridge_conf.ini'
$metadata_agent_service = 'quantum-metadata-agent'
}
}
}

Some files were not shown because too many files have changed in this diff Show More