Merge pull request #23 from bodepd/dev

Dev
This commit is contained in:
Michael Chapman
2013-06-18 20:59:24 -07:00
12 changed files with 240 additions and 322 deletions

2
.gitignore vendored
View File

@@ -1,8 +1,6 @@
modules
module/*
vendor
templates
manifests
.tmp
.vagrant
*.log*

View File

@@ -10,7 +10,6 @@ git_protocol=ENV['git_protocol'] || 'git'
# that they are derived from (and should be maintained in sync with)
#
#
# this is just targeting the upstream stackforge modules
# right now, and the logic for using downstream does not
@@ -41,6 +40,24 @@ release = 'grizzly'
manifest_branch = 'master'
mod 'manifests', :git => "#{base_url}/#{user_name}/#{release}-manifests", :ref => manifest_branch
###### module under development #####
# this following modules are still undergoing their initial development
# and have not yet been ported to CiscoSystems.
# This top level module contains the roles that are used to deploy openstack
mod 'CiscoSystems/coi', :git => "#{base_url}/bodepd/puppet-COI", :ref => 'master'
# no existing downstream module
mod 'puppetlabs/postgresql', :git => "#{base_url}/puppetlabs/puppetlabs-postgresql", :ref => 'master'
mod 'puppetlabs/puppetdb', :git => "#{base_url}/puppetlabs/puppetlabs-puppetdb", :ref => 'master'
# do I really need this firewall module?
mod 'puppetlabs/firewall', :git => "#{base_url}/puppetlabs/puppetlabs-firewall", :ref => 'master'
# stephenrjohnson
# this what I am testing Puppet 3.2 deploys with
# I am pointing it at me until my patch is accepted
mod 'stephenjohrnson/puppet', :git => "#{base_url}/stephenrjohnson/puppetlabs-puppet", :ref => 'master'
###### stackforge openstack modules #####
openstack_repo_prefix = "#{base_url}/#{openstack_module_account}/puppet"
@@ -67,10 +84,11 @@ mod 'CiscoSystems/mysql', :git => "#{base_url}/CiscoSystems/puppet-mysql", :ref
mod 'CiscoSystems/rabbitmq', :git => "#{base_url}/CiscoSystems/puppet-rabbitmq", :ref => branch_name
mod 'CiscoSystems/apache', :git => "#{base_url}/CiscoSystems/puppet-apache", :ref => branch_name
##### modules with other upstreams #####
# upstream is ripienaar
mod 'CiscoSystems/concat', :git => "#{base_url}/CiscoSystems/puppet-concat", :ref => branch_name
mod 'ripienaar/concat', :git => "#{base_url}/CiscoSystems/puppet-concat", :ref => 'origin/grizzly'
# upstream is cprice-puppet/puppetlabs-inifile
mod 'CiscoSystems/inifile', :git => "#{base_url}/CiscoSystems/puppet-inifile", :ref => branch_name
@@ -85,16 +103,19 @@ mod 'CiscoSystems/sysctl', :git => "#{base_url}/CiscoSystems/puppet-sysctl", :re
# unclear who the upstream is
mod 'CiscoSystems/vswitch', :git => "#{base_url}/CiscoSystems/puppet-vswitch", :ref => branch_name
##### Modules without upstreams #####
# TODO - this is still pointing at my fork
mod 'CiscoSystems/coe', :git => "#{base_url}/CiscoSystems/puppet-coe", :ref => 'origin/grizzly'
mod 'CiscoSystems/cobbler', :git => "#{base_url}/bodepd/puppet-cobbler", :ref => 'origin/fix_cobbler_sync_issue'
mod 'CiscoSystems/coe', :git => "#{base_url}/CiscoSystems/puppet-coe", :ref => branch_name
mod 'CiscoSystems/cobbler', :git => "#{base_url}/CiscoSystems/puppet-cobbler", :ref => branch_name
mod 'CiscoSystems/apt-cacher-ng', :git => "#{base_url}/CiscoSystems/puppet-apt-cacher-ng", :ref => branch_name
mod 'CiscoSystems/collectd', :git => "#{base_url}/CiscoSystems/puppet-collectd", :ref => branch_name
mod 'CiscoSystems/graphite', :git => "#{base_url}/CiscoSystems/puppet-graphite", :ref => branch_name
# based on pradeep's fork
# this is forked and needs to be updated
mod 'CiscoSystems/graphite', :git => "#{base_url}/bodepd/puppet-graphite/", :ref => 'master'
mod 'CiscoSystems/monit', :git => "#{base_url}/CiscoSystems/puppet-monit", :ref => branch_name
mod 'CiscoSystems/pip', :git => "#{base_url}/CiscoSystems/puppet-pip", :ref => branch_name
mod 'CiscoSystems/puppet', :git => "#{base_url}/CiscoSystems/puppet-puppet", :ref => branch_name
mod 'CiscoSystems/dnsmasq', :git => "#{base_url}/CiscoSystems/puppet-dnsmasq", :ref => branch_name
mod 'CiscoSystems/naginator', :git => "#{base_url}/CiscoSystems/puppet-naginator", :ref => branch_name

42
Vagrantfile vendored
View File

@@ -72,14 +72,6 @@ end
Vagrant::Config.run do |config|
require 'fileutils'
if !File.symlink?("templates")
File.symlink("./modules/manifests/templates", "./templates")
end
if !File.symlink?("manifests")
File.symlink("./modules/manifests/manifests", "./manifests")
end
v_config = parse_vagrant_config
apt_cache_proxy = 'Acquire::http { Proxy \"http://%s:3142\"; };' % v_config['apt_cache']
@@ -98,7 +90,7 @@ Vagrant::Config.run do |config|
get_box(config, 'precise64')
setup_networks(config, '100')
setup_hostname(config, 'build-server')
config.vm.customize ["modifyvm", :id, "--memory", 1024]
# Configure apt mirror
config.vm.provision :shell do |shell|
shell.inline = "sed -i 's/us.archive.ubuntu.com/%s/g' /etc/apt/sources.list" % v_config['apt_mirror']
@@ -113,20 +105,29 @@ Vagrant::Config.run do |config|
# configure apt and basic packages needed for install
config.vm.provision :shell do |shell|
shell.inline = "echo \"%s\" > /etc/apt/apt.conf.d/01apt-cacher-ng-proxy; apt-get update; dhclient -r eth0 && dhclient eth0; apt-get install -y git vim puppet curl; if [ ! -h /etc/puppet/templates ]; then rmdir /etc/puppet/templates;ln -s /vagrant/templates /etc/puppet/; fi" % apt_cache_proxy
shell.inline = "echo \"%s\" > /etc/apt/apt.conf.d/01apt-cacher-ng-proxy; apt-get update; dhclient -r eth0 && dhclient eth0; apt-get install -y git vim curl; " % apt_cache_proxy
end
# pre-import the ubuntu image from an appropriate mirror
config.vm.provision :shell do |shell|
shell.inline = "apt-get install -y cobbler; cobbler-ubuntu-import -m http://%s/ubuntu precise-x86_64;" % v_config['apt_mirror']
end
#config.vm.provision :shell do |shell|
# shell.inline = "apt-get install -y cobbler; cobbler-ubuntu-import -m http://%s/ubuntu precise-x86_64;" % v_config['apt_mirror']
#end
config.vm.share_folder("hiera_data", '/etc/puppet/hiera_data', './hiera_data/')
# perform some initial setup before our 'real' puppetrun
config.vm.provision(:puppet, :pp_path => "/etc/puppet") do |puppet|
puppet.manifests_path = 'manifests'
puppet.manifest_file = "setup.pp"
puppet.module_path = 'modules'
puppet.options = ['--verbose', '--trace', '--debug', '--show_diff']
end
# now run puppet to install the build server
config.vm.provision(:puppet, :pp_path => "/etc/puppet") do |puppet|
puppet.manifests_path = 'manifests'
puppet.manifest_file = "site.pp"
puppet.module_path = 'modules'
puppet.options = ['--verbose', '--trace', '--debug']
puppet.options = ['--verbose', '--trace', '--debug', '--show_diff']
end
# Configure puppet
@@ -167,6 +168,13 @@ Vagrant::Config.run do |config|
shell.inline = 'echo "192.168.242.100 build-server build-server.domain.name" >> /etc/hosts;echo "%s" > /etc/apt/apt.conf.d/01apt-cacher-ng-proxy; apt-get update;apt-get install ubuntu-cloud-keyring' % apt_cache_proxy
end
config.vm.provision(:puppet, :pp_path => "/etc/puppet") do |puppet|
puppet.manifests_path = 'manifests'
puppet.manifest_file = "setup.pp"
puppet.module_path = 'modules'
puppet.options = ['--verbose', '--trace', '--debug', '--show_diff']
end
config.vm.provision(:puppet_server) do |puppet|
puppet.puppet_server = 'build-server.domain.name'
puppet.options = ['-t', '--pluginsync', '--trace', "--certname #{node_name}"]
@@ -198,6 +206,12 @@ Vagrant::Config.run do |config|
shell.inline = 'echo "192.168.242.100 build-server build-server.domain.name" >> /etc/hosts;echo "%s" > /etc/apt/apt.conf.d/01apt-cacher-ng-proxy; apt-get update;apt-get install ubuntu-cloud-keyring' % apt_cache_proxy
end
config.vm.provision(:puppet, :pp_path => "/etc/puppet") do |puppet|
puppet.manifests_path = 'manifests'
puppet.manifest_file = "setup.pp"
puppet.module_path = 'modules'
puppet.options = ['--verbose', '--trace', '--debug', '--show_diff']
end
config.vm.provision(:puppet_server) do |puppet|
puppet.puppet_server = 'build-server.domain.name'
puppet.options = ['-t', '--pluginsync', '--trace', "--certname #{node_name}"]

View File

@@ -0,0 +1,13 @@
# set my puppet_master_address to be fqdn
puppet_master_address: %{fqdn}
cobbler_node_ip: '192.168.242.100'
node_subnet: '192.168.242.0'
node_netmask: '255.255.255.0'
node_gateway: '192.168.242.1'
admin_user: localadmin
password_crypted: $6$UfgWxrIv$k4KfzAEMqMg.fppmSOTd0usI4j6gfjs0962.JXsoJRWa5wMz8yQk4SfInn4.WZ3L/MCt5u.62tHDGB36EhiKF1
autostart_puppet: true
ucsm_port: 443
install_drive: /dev/sda
#ipv6_ra: 1
#interface_bonding = 'true'

0
hiera_data/build.yaml Normal file
View File

13
hiera_data/common.yaml Normal file
View File

@@ -0,0 +1,13 @@
# time servers
ntp_servers:
- time-server.domain.name
# domain name
domain_name: domain.name
# interfaces for directing different types of traffic
public_interface: eth1
private_interface: eth1
external_interface: eth2
# build node connection information
build_node_name: build-server
# advanced options
proxy: http://proxy-server:port-number

3
hiera_data/compute.yaml Normal file
View File

@@ -0,0 +1,3 @@
# configuration only relevant to compute node
libvirt_type: qemu
internal_ip: %{ipaddress_eth3}

View File

@@ -0,0 +1,26 @@
# configuration only required on the
# openstack controller
admin_email: root@localhost
mysql_root_password: mysql_pass
# networking related config
auto_assign_floating_ip: false
keystone_db_password: keystone_db_password
# glance config
glance_db_password: glance_pass
glance_user_password: glance_pass
glance_backend: file
# for our current deployment
# only the controller needs to connect
# to the quantum db
quantum_db_password: quantum_pass
# horizon
horizon_secret_key: horizon_secret_key
# only the controller needs to authenticate?
cinder_user_password: cinder_pass
metadata_shared_secret: metadata_shared_secret,

38
hiera_data/openstack.yaml Normal file
View File

@@ -0,0 +1,38 @@
#
# this config is for values that
# are applicable to multiple openstack roles
# controller information
controller_node_internal: 192.168.242.10
controller_node_public: 192.168.242.10
controller_hostname: control-server
# information for package repos
openstack_release: precise-updates/grizzly
package_repo: cloud_archive
# this may need to be updated b/c we are using nova-conductor now
nova_db_password: nova_pass
nova_user_password: nova_pass
quantum_user_password: quantum_pass
# all service need to authenticate via the mq
rabbit_password: openstack_rabbit_password
rabbit_user: openstack_rabbit_user
# ovs config
tunnel_ip: %{ipaddress_eth3}
cinder_db_password: cinder_pass
# used by test_file
test_file_image_type: cirros
multi_host: true
# used by both auth_file as well as controller
admin_password: Cisco123
keystone_admin_token: keystone_admin_token
verbose: false

51
manifests/setup.pp Normal file
View File

@@ -0,0 +1,51 @@
#
# this script is responsible for performing initial setup
# configurations that are only necessary for our virtual
# box based installations.
#
# setup up puppetlabs repos and install Puppet 3.2
#
# it makes my life easier if I can assume Puppet 3.2
# b/c then the other manifests can utilize hiera!
# this is not required for bare-metal b/c we can assume
# that Puppet will be installed on the bare-metal nodes
# with the correct version
include puppet::repo::puppetlabs
package { 'puppet':
# is this the real version that I should be using?
ensure => '3.2.2-1puppetlabs1',
}
# dns resolution should be setup correctly
host {
'build-server': ip => '192.168.242.100', host_aliases => 'build-server.domain.name';
}
# set up our hiera-store!
file { "${settings::confdir}/hiera.yaml":
content =>
'
---
:backends:
- yaml
:hierarchy:
- "%{hostname}"
- jenkins
- "%{openstack_role}"
- "%{role}"
- common
:yaml:
:datadir: /etc/puppet/hiera_data'
}
# lay down a file that can be used for subsequent runs to puppet. Often, the
# only thing that you want to do after the initial provisioning of a box is
# to run puppet again. This command lays down a script that can be simply used for
# subsequent runs
file { '/root/run_puppet.sh':
content =>
"#!/bin/bash
puppet apply --modulepath /etc/puppet/modules-0/ --certname ${clientcert} /etc/puppet/manifests/site.pp $*"
}

41
manifests/site.pp Normal file
View File

@@ -0,0 +1,41 @@
node build-server {
Exec { logoutput => on_failure }
$role = 'build'
include coi::roles::build_server
# I want these nodes to eventuall be moved to
# some data format that can just be parsed
# right now, it does make sense to maintain them here
coi::cobbler_node { "control-server":
node_type => "control",
mac => "00:11:22:33:44:55",
ip => "192.168.242.10",
power_address => "192.168.242.110",
power_user => "admin",
power_password => "password",
power_type => "ipmitool"
}
coi::cobbler_node { "compute-server01":
node_type => "compute",
mac => "11:22:33:44:55:66",
ip => "192.168.242.21",
power_address => "192.168.242.121"
}
}
node /control-server/ {
$role = 'openstack'
$openstack_role = 'controller'
include coi::roles::controller
}
node /compute-server\d+/ {
$role = 'openstack'
$openstack_role = 'compute'
include coi::roles::compute
}

300
site.pp
View File

@@ -1,300 +0,0 @@
# This document serves as an example of how to deploy
# basic multi-node openstack environments.
# In this scenario Quantum is using OVS with GRE Tunnels
# Swift is not included.
########### Proxy Configuration ##########
# If you use an HTTP/HTTPS proxy, uncomment this setting and specify the correct proxy URL.
# If you do not use an HTTP/HTTPS proxy, leave this setting commented out.
#$proxy = "http://proxy-server:port-number"
########### package repo configuration ##########
#
# The package repos used to install openstack
$package_repo = 'cloud_archive'
# Alternatively, the upstream Ubuntu package from cloud archive can be used
# $package_repo = 'cloud_archive'
# If you are behind a proxy you may choose not to use our ftp distribution, and
# instead try our http distribution location. Note the http location is not
# a permanent location and may change at any time.
$location = "ftp://ftpeng.cisco.com/openstack/cisco"
# Alternate, uncomment this one, and comment out the one above
#$location = "http://128.107.252.163/openstack/cisco"
########### Build Node (Cobbler, Puppet Master, NTP) ######
# Change the following to the host name you have given your build node
$build_node_name = "build-server"
########### NTP Configuration ############
# Change this to the location of a time server in your organization accessible to the build server
# The build server will synchronize with this time server, and will in turn function as the time
# server for your OpenStack nodes
$ntp_servers = ["time-server.domain.name"]
########### Build Node Cobbler Variables ############
# Change these 5 parameters to define the IP address and other network settings of your build node
# The cobbler node *must* have this IP configured and it *must* be on the same network as
# the hosts to install
$cobbler_node_ip = '192.168.242.100'
$node_subnet = '192.168.242.0'
$node_netmask = '255.255.255.0'
# This gateway is optional - if there's a gateway providing a default route, put it here
# If not, comment it out
$node_gateway = '192.168.242.1'
# This domain name will be the name your build and compute nodes use for the local DNS
# It doesn't have to be the name of your corporate DNS - a local DNS server on the build
# node will serve addresses in this domain - but if it is, you can also add entries for
# the nodes in your corporate DNS environment they will be usable *if* the above addresses
# are routeable from elsewhere in your network.
$domain_name = 'domain.name'
# This setting likely does not need to be changed
# To speed installation of your OpenStack nodes, it configures your build node to function
# as a caching proxy storing the Ubuntu install files used to deploy the OpenStack nodes
$cobbler_proxy = "http://${cobbler_node_ip}:3142/"
####### Preseed File Configuration #######
# This will build a preseed file called 'cisco-preseed' in /etc/cobbler/preseeds/
# The preseed file automates the installation of Ubuntu onto the OpenStack nodes
#
# The following variables may be changed by the system admin:
# 1) admin_user
# 2) password_crypted
# 3) autostart_puppet -- whether the puppet agent will auto start
# Default user is: localadmin
# Default SHA-512 hashed password is "ubuntu": $6$UfgWxrIv$k4KfzAEMqMg.fppmSOTd0usI4j6gfjs0962.JXsoJRWa5wMz8yQk4SfInn4.WZ3L/MCt5u.62tHDGB36EhiKF1
# To generate a new SHA-512 hashed password, run the following replacing
# the word "password" with your new password. Then use the result as the
# $password_crypted variable
# python -c "import crypt, getpass, pwd; print crypt.crypt('password', '\$6\$UfgWxrIv\$')"
$admin_user = 'localadmin'
$password_crypted = '$6$UfgWxrIv$k4KfzAEMqMg.fppmSOTd0usI4j6gfjs0962.JXsoJRWa5wMz8yQk4SfInn4.WZ3L/MCt5u.62tHDGB36EhiKF1'
$autostart_puppet = true
# If the setup uses the UCS Bseries blades, enter the port on which the
# ucsm accepts requests. By default the UCSM is enabled to accept requests
# on port 443 (https). If https is disabled and only http is used, set
# $ucsm_port = '80'
$ucsm_port = '443'
########### OpenStack Variables ############
# These values define parameters which will be used to deploy and configure OpenStack
# once Ubuntu is installed on your nodes
#
# Change these next 3 parameters to the network settings of the node which will be your
# OpenStack control node
$controller_node_address = '192.168.242.10'
$controller_node_network = '192.168.242.0'
$controller_hostname = 'control-server'
# Specify the network which should have access to the MySQL database on the OpenStack control
# node. Typically, this will be the same network as defined in the controller_node_network
# parameter above. Use MySQL network wild card syntax to specify the desired network.
$db_allowed_network = '192.168.242.%'
# These next two values typically do not need to be changed. They define the network connectivity
# of the OpenStack controller
# This is the interface used to connect to Horizon dashboard
$controller_node_public = $controller_node_address
# This is the interface used for external backend communication
$controller_node_internal = $controller_node_address
# These next three parameters specify the networking hardware used in each node
# Current assumption is that all nodes have the same network interfaces and are
# cabled identically
#
# Specify which interface in each node is the API Interface
# This is also known as the Management Interface
$public_interface = 'eth1'
# Define the interface used for vm networking connectivity when nova-network is being used.
# Quantum does not require this value, so using eth0 will typically be fine.
$private_interface = 'eth1'
# Specify the interface used for external connectivity such as floating IPs (only in network/controller node)
$external_interface = 'eth2'
# Select the drive on which Ubuntu and OpenStack will be installed in each node. Current assumption is
# that all nodes will be installed on the same device name
$install_drive = '/dev/sda'
########### OpenStack Service Credentials ############
# This block of parameters is used to change the user names and passwords used by the services which
# make up OpenStack. The following defaults should be changed for any production deployment
$admin_email = 'root@localhost'
$admin_password = 'Cisco123'
$keystone_db_password = 'keystone_db_pass'
$keystone_admin_token = 'keystone_admin_token'
$nova_user = 'nova'
$nova_db_password = 'nova_pass'
$nova_user_password = 'nova_pass'
$libvirt_type = 'qemu'
$glance_db_password = 'glance_pass'
$glance_user_password = 'glance_pass'
$glance_sql_connection = "mysql://glance:${glance_db_password}@${controller_node_address}/glance"
$glance_on_swift = false
$cinder_user_password = 'cinder_pass'
$cinder_db_password = 'cinder_pass'
$quantum_user_password = 'quantum_pass'
$quantum_db_password = 'quantum_pass'
$rabbit_password = 'openstack_rabbit_password'
$rabbit_user = 'openstack_rabbit_user'
# Nova DB connection
$sql_connection = "mysql://${nova_user}:${nova_db_password}@${controller_node_address}/nova"
# glance backend configuration, supports file or swift
$glance_backend = 'file'
# image type to use for testing
# this populates the contents of the /tmp/nova_test.sh script
# that is deployed to the controller.
# It accepts kvm or cirros
$test_file_image_type = 'cirros'
#### end shared variables #################
# Storage Configuration
# Set to true to enable Cinder services
$cinder_controller_enabled = true
# Set to true to enable Cinder deployment to all compute nodes
$cinder_compute_enabled = true
# The cinder storage driver to use. Default is iscsi
$cinder_storage_driver = 'iscsi'
# Other drivers exist for cinder. Here are examples on how to enable them.
#
# NetApp iSCSI Driver
# $cinder_storage_driver = 'netapp'
# $netapp_wsdl_url = ''
# $netapp_login = ''
# $netapp_password = ''
#
# NFS
# share information is stored in flat text file specified in $nfs_shares_config
# the format for this file is hostname:/mountpoint eg 192.168.2.55:/myshare, with only one entry per line
#
# $cinder_storage_driver = 'nfs'
# $nfs_shares_config = '/etc/cinder/shares.conf'
####### OpenStack Node Definitions #####
# This section is used to define the hardware parameters of the nodes which will be used
# for OpenStack. Cobbler will automate the installation of Ubuntu onto these nodes using
# these settings
node /build-node/ inherits master-node {
# This block defines the control server. Replace "control_server" with the host name of your
# OpenStack controller, and change the "mac" to the MAC address of the boot interface of your
# OpenStack controller. Change the "ip" to the IP address of your OpenStack controller
cobbler_node { "control-server":
node_type => "control",
mac => "00:11:22:33:44:55",
ip => "192.168.242.10",
power_address => "192.168.242.110",
power_user => "admin",
power_password => "password",
power_type => "ipmitool"
}
# This block defines the first compute server. Replace "compute_server01" with the host name
# of your first OpenStack compute node, and change the "mac" to the MAC address of the boot
# interface of your first OpenStack compute node. Change the "ip" to the IP address of your first
# OpenStack compute node
# Begin compute node
cobbler_node { "compute-server01":
node_type => "compute",
mac => "11:22:33:44:55:66",
ip => "192.168.242.21",
power_address => "192.168.242.121"
}
# Example with UCS blade power_address with a sub-group (in UCSM), and a ServiceProfile for power_id
# you will need to change power type to 'USC' in the define macro above
# cobbler_node { "compute-server01":
# node_type => "compute",
# mac => "11:22:33:44:66:77",
# ip => "192.168.242.21",
# power_address => "192.168.242.121:org-cisco",
# power_id => "OpenStack-1"
# }
# End compute node
### Repeat as needed ###
# Make a copy of your compute node block above for each additional OpenStack node in your cluster
# and paste the copy in this section. Be sure to change the host name, mac, ip, and power settings
# for each node
### End repeated nodes ###
# Deploy a script that can be used to test nova
class { 'openstack::test_file':
image_type => 'cirros',
}
# Auth file used by test script
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $controller_node_internal,
}
}
### Node types ###
# These lines specify the host names in your OpenStack cluster and what the function of each host is
# Change build_server to the host name of your build node
node build-server inherits build-node { }
# Change control_server to the host name of your control node
node /control-server/ inherits os_base {
class { 'control':
internal_ip => '192.168.242.10',
vm_net_ip => $ipaddress_eth3,
}
}
# Change compute_server01 to the host name of your first compute node
node /compute-server\d+/ inherits os_base {
class { 'compute':
internal_ip => '192.168.242.21',
vm_net_ip => $ipaddress_eth3,
}
}
### Repeat as needed ###
# Copy the compute_server01 line above and paste a copy here for each additional OpenStack node in
# your cluster. Be sure to replace the compute_server01 parameter with the correct host name for
# each additional node
### End repeated nodes ###
########################################################################
### All parameters below this point likely do not need to be changed ###
########################################################################
### Advanced Users Configuration ###
# These four settings typically do not need to be changed
# In the default deployment, the build node functions as the DNS and static DHCP server for
# the OpenStack nodes. These settings can be used if alternate configurations are needed
$node_dns = "${cobbler_node_ip}"
$ip = "${cobbler_node_ip}"
$dns_service = "dnsmasq"
$dhcp_service = "dnsmasq"
# Enable network interface bonding. This will only enable the bonding module in the OS,
# it won't acutally bond any interfaces. Edit the networking interfaces template to set
# up interface bonds as required after setting this to true should bonding be required.
#$interface_bonding = 'true'
# Enable ipv6 router edvertisement
#$ipv6_ra = '1'
### Puppet Parameters ###
# These settings load other puppet components. They should not be changed
import 'cobbler-node'
import 'core'
## Define the default node, to capture any un-defined nodes that register
## Simplifies debug when necessary.
node default {
notify{"Default Node: Perhaps add a node definition to site.pp": }
}