Files
puppet_openstack_builder/site.pp.downstream

302 lines
13 KiB
Plaintext

# This document serves as an example of how to deploy
# basic multi-node openstack environments.
# In this scenario Quantum is using OVS with GRE Tunnels
# Swift is not included.
########### Proxy Configuration ##########
# If you use an HTTP/HTTPS proxy, uncomment this setting and specify the correct proxy URL.
# If you do not use an HTTP/HTTPS proxy, leave this setting commented out.
#$proxy = "http://proxy-server:port-number"
########### package repo configuration ##########
#
# The package repos used to install openstack
$package_repo = 'cloud_archive'
# Alternatively, the upstream Ubuntu package from cloud archive can be used
# $package_repo = 'cloud_archive'
# If you are behind a proxy you may choose not to use our ftp distribution, and
# instead try our http distribution location. Note the http location is not
# a permanent location and may change at any time.
$location = "ftp://ftpeng.cisco.com/openstack/cisco"
# Alternate, uncomment this one, and comment out the one above
#$location = "http://128.107.252.163/openstack/cisco"
########### Build Node (Cobbler, Puppet Master, NTP) ######
# Change the following to the host name you have given your build node
$build_node_name = "build-server"
########### NTP Configuration ############
# Change this to the location of a time server in your organization accessible to the build server
# The build server will synchronize with this time server, and will in turn function as the time
# server for your OpenStack nodes
$ntp_servers = ["time-server.domain.name"]
########### Build Node Cobbler Variables ############
# Change these 5 parameters to define the IP address and other network settings of your build node
# The cobbler node *must* have this IP configured and it *must* be on the same network as
# the hosts to install
$cobbler_node_ip = '192.168.242.100'
$node_subnet = '192.168.242.0'
$node_netmask = '255.255.255.0'
# This gateway is optional - if there's a gateway providing a default route, put it here
# If not, comment it out
$node_gateway = '192.168.242.1'
# This domain name will be the name your build and compute nodes use for the local DNS
# It doesn't have to be the name of your corporate DNS - a local DNS server on the build
# node will serve addresses in this domain - but if it is, you can also add entries for
# the nodes in your corporate DNS environment they will be usable *if* the above addresses
# are routeable from elsewhere in your network.
$domain_name = 'domain.name'
# This setting likely does not need to be changed
# To speed installation of your OpenStack nodes, it configures your build node to function
# as a caching proxy storing the Ubuntu install files used to deploy the OpenStack nodes
$cobbler_proxy = "http://${cobbler_node_ip}:3142/"
####### Preseed File Configuration #######
# This will build a preseed file called 'cisco-preseed' in /etc/cobbler/preseeds/
# The preseed file automates the installation of Ubuntu onto the OpenStack nodes
#
# The following variables may be changed by the system admin:
# 1) admin_user
# 2) password_crypted
# 3) autostart_puppet -- whether the puppet agent will auto start
# Default user is: localadmin
# Default SHA-512 hashed password is "ubuntu": $6$UfgWxrIv$k4KfzAEMqMg.fppmSOTd0usI4j6gfjs0962.JXsoJRWa5wMz8yQk4SfInn4.WZ3L/MCt5u.62tHDGB36EhiKF1
# To generate a new SHA-512 hashed password, run the following replacing
# the word "password" with your new password. Then use the result as the
# $password_crypted variable
# python -c "import crypt, getpass, pwd; print crypt.crypt('password', '\$6\$UfgWxrIv\$')"
$admin_user = 'localadmin'
$password_crypted = '$6$UfgWxrIv$k4KfzAEMqMg.fppmSOTd0usI4j6gfjs0962.JXsoJRWa5wMz8yQk4SfInn4.WZ3L/MCt5u.62tHDGB36EhiKF1'
$autostart_puppet = true
# If the setup uses the UCS Bseries blades, enter the port on which the
# ucsm accepts requests. By default the UCSM is enabled to accept requests
# on port 443 (https). If https is disabled and only http is used, set
# $ucsm_port = '80'
$ucsm_port = '443'
########### OpenStack Variables ############
# These values define parameters which will be used to deploy and configure OpenStack
# once Ubuntu is installed on your nodes
#
# Change these next 3 parameters to the network settings of the node which will be your
# OpenStack control node
$controller_node_address = '192.168.242.10'
$controller_node_network = '192.168.242.0'
$controller_hostname = 'control-server'
# Specify the network which should have access to the MySQL database on the OpenStack control
# node. Typically, this will be the same network as defined in the controller_node_network
# parameter above. Use MySQL network wild card syntax to specify the desired network.
$db_allowed_network = '192.168.242.%'
# These next two values typically do not need to be changed. They define the network connectivity
# of the OpenStack controller
# This is the interface used to connect to Horizon dashboard
$controller_node_public = $controller_node_address
# This is the interface used for external backend communication
$controller_node_internal = $controller_node_address
# These next three parameters specify the networking hardware used in each node
# Current assumption is that all nodes have the same network interfaces and are
# cabled identically
#
# Specify which interface in each node is the API Interface
# This is also known as the Management Interface
$public_interface = 'eth1'
# Define the interface used for vm networking connectivity when nova-network is being used.
# Quantum does not require this value, so using eth0 will typically be fine.
$private_interface = 'eth1'
# Specify the interface used for external connectivity such as floating IPs (only in network/controller node)
$external_interface = 'eth2'
# Select the drive on which Ubuntu and OpenStack will be installed in each node. Current assumption is
# that all nodes will be installed on the same device name
$install_drive = '/dev/sda'
########### OpenStack Service Credentials ############
# This block of parameters is used to change the user names and passwords used by the services which
# make up OpenStack. The following defaults should be changed for any production deployment
$admin_email = 'root@localhost'
$admin_password = 'Cisco123'
$keystone_db_password = 'keystone_db_pass'
$keystone_admin_token = 'keystone_admin_token'
$nova_user = 'nova'
$nova_db_password = 'nova_pass'
$nova_user_password = 'nova_pass'
$libvirt_type = 'qemu'
$glance_db_password = 'glance_pass'
$glance_user_password = 'glance_pass'
$glance_sql_connection = "mysql://glance:${glance_db_password}@${controller_node_address}/glance"
$glance_on_swift = false
$cinder_user_password = 'cinder_pass'
$cinder_db_password = 'cinder_pass'
$quantum_user_password = 'quantum_pass'
$quantum_db_password = 'quantum_pass'
$rabbit_password = 'openstack_rabbit_password'
$rabbit_user = 'openstack_rabbit_user'
# Nova DB connection
$sql_connection = "mysql://${nova_user}:${nova_db_password}@${controller_node_address}/nova"
# glance backend configuration, supports file or swift
$glance_backend = 'file'
# image type to use for testing
# this populates the contents of the /tmp/nova_test.sh script
# that is deployed to the controller.
# It accepts kvm or cirros
$test_file_image_type = 'cirros'
#### end shared variables #################
# Storage Configuration
# Set to true to enable Cinder services
$cinder_controller_enabled = true
# Set to true to enable Cinder deployment to all compute nodes
$cinder_compute_enabled = true
# The cinder storage driver to use. Default is iscsi
$cinder_storage_driver = 'iscsi'
# Other drivers exist for cinder. Here are examples on how to enable them.
#
# NetApp iSCSI Driver
# $cinder_storage_driver = 'netapp'
# $netapp_wsdl_url = ''
# $netapp_login = ''
# $netapp_password = ''
#
# NFS
# share information is stored in flat text file specified in $nfs_shares_config
# the format for this file is hostname:/mountpoint eg 192.168.2.55:/myshare, with only one entry per line
#
# $cinder_storage_driver = 'nfs'
# $nfs_shares_config = '/etc/cinder/shares.conf'
####### OpenStack Node Definitions #####
# This section is used to define the hardware parameters of the nodes which will be used
# for OpenStack. Cobbler will automate the installation of Ubuntu onto these nodes using
# these settings
node /build-node/ inherits master-node {
# This block defines the control server. Replace "control_server" with the host name of your
# OpenStack controller, and change the "mac" to the MAC address of the boot interface of your
# OpenStack controller. Change the "ip" to the IP address of your OpenStack controller
cobbler_node { "control-server":
node_type => "control",
mac => "00:11:22:33:44:55",
ip => "192.168.242.10",
power_address => "192.168.242.110",
power_user => "admin",
power_password => "password",
power_type => "ipmitool"
}
# This block defines the first compute server. Replace "compute_server01" with the host name
# of your first OpenStack compute node, and change the "mac" to the MAC address of the boot
# interface of your first OpenStack compute node. Change the "ip" to the IP address of your first
# OpenStack compute node
# Begin compute node
cobbler_node { "compute-server01":
node_type => "compute",
mac => "11:22:33:44:55:66",
ip => "192.168.242.21",
power_address => "192.168.242.121"
}
# Example with UCS blade power_address with a sub-group (in UCSM), and a ServiceProfile for power_id
# you will need to change power type to 'USC' in the define macro above
# cobbler_node { "compute-server01":
# node_type => "compute",
# mac => "11:22:33:44:66:77",
# ip => "192.168.242.21",
# power_address => "192.168.242.121:org-cisco",
# power_id => "OpenStack-1"
# }
# End compute node
### Repeat as needed ###
# Make a copy of your compute node block above for each additional OpenStack node in your cluster
# and paste the copy in this section. Be sure to change the host name, mac, ip, and power settings
# for each node
### End repeated nodes ###
# Deploy a script that can be used to test nova
class { 'openstack::test_file':
image_type => 'cirros',
}
# Auth file used by test script
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $controller_node_internal,
}
}
### Node types ###
# These lines specify the host names in your OpenStack cluster and what the function of each host is
# Change build_server to the host name of your build node
node build-server inherits build-node { }
# Change control_server to the host name of your control node
node /control-server/ inherits os_base {
class { 'control':
tunnel_ip => $ipaddress_eth3,
}
}
# Change compute_server01 to the host name of your first compute node
node /compute-server\d+/ inherits os_base {
class { 'compute':
internal_ip => '192.168.242.21',
tunnel_ip => $ipaddress_eth3,
}
}
### Repeat as needed ###
# Copy the compute_server01 line above and paste a copy here for each additional OpenStack node in
# your cluster. Be sure to replace the compute_server01 parameter with the correct host name for
# each additional node
### End repeated nodes ###
########################################################################
### All parameters below this point likely do not need to be changed ###
########################################################################
### Advanced Users Configuration ###
# These four settings typically do not need to be changed
# In the default deployment, the build node functions as the DNS and static DHCP server for
# the OpenStack nodes. These settings can be used if alternate configurations are needed
$node_dns = "${cobbler_node_ip}"
$ip = "${cobbler_node_ip}"
$dns_service = "dnsmasq"
$dhcp_service = "dnsmasq"
# Enable network interface bonding. This will only enable the bonding module in the OS,
# it won't acutally bond any interfaces. Edit the networking interfaces template to set
# up interface bonds as required after setting this to true should bonding be required.
#$interface_bonding = 'true'
# Enable ipv6 router edvertisement
#$ipv6_ra = '1'
$max_connect_errors = '10'
### Puppet Parameters ###
# These settings load other puppet components. They should not be changed
import 'cobbler-node'
import 'core'
## Define the default node, to capture any un-defined nodes that register
## Simplifies debug when necessary.
node default {
notify{"Default Node: Perhaps add a node definition to site.pp": }
}