Merge "* rename site.pp files * site_openstack_ha_minimal.pp with quantum on controller"

This commit is contained in:
Victor Galkin 2013-03-11 18:59:34 +04:00 committed by Gerrit Code Review
commit 03a8bafa8f
7 changed files with 682 additions and 415 deletions

View File

@ -24,7 +24,7 @@ $internal_virtual_ip = '10.0.0.253'
# Change this IP to IP routable from your 'public' network,
# e. g. Internet or your office LAN, in which your public
# interface resides
$public_virtual_ip = '10.0.215.253'
$public_virtual_ip = '10.0.204.253'
$nodes_harr = [
{
@ -110,7 +110,7 @@ class node_netconfig (
) {
if $quantum {
l23network::l3::create_br_iface {'mgmt':
interface => $internal_interface,
interface => $internal_int,
bridge => $internal_br,
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
@ -118,7 +118,7 @@ class node_netconfig (
save_default_gateway => $save_default_gateway,
}
l23network::l3::create_br_iface {'ex':
interface => $public_interface,
interface => $public_int,
bridge => $public_br,
ipaddr => $public_ipaddr,
netmask => $public_netmask,
@ -273,7 +273,7 @@ $manage_volumes = true
# Setup network interface, which Cinder uses to export iSCSI targets.
# This interface defines which IP to use to listen on iscsi port for
# incoming connections of initiators
$cinder_iscsi_bind_iface = $internal_interface
$cinder_iscsi_bind_iface = $internal_int
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
@ -684,7 +684,7 @@ node /fuel-quantum/ {
service_endpoint => $internal_virtual_ip,
auth_host => $internal_virtual_ip,
internal_address => $internal_address,
public_interface => $public_interface,
public_interface => $public_int,
private_interface => $private_interface,
floating_range => $floating_range,
fixed_range => $fixed_range,

View File

@ -0,0 +1,635 @@
#
# Parameter values in this file should be changed, taking into consideration your
# networking setup and desired OpenStack settings.
#
# Please consult with the latest Fuel User Guide before making edits.
#
### GENERAL CONFIG ###
# This section sets main parameters such as hostnames and IP addresses of different nodes
# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
$public_interface = 'eth1'
$public_br = 'br-ex'
# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen.
$internal_interface = 'eth0'
$internal_br = 'br-mgmt'
# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface.
$private_interface = 'eth2'
# Public and Internal VIPs. These virtual addresses are required by HA topology and will be managed by keepalived.
$internal_virtual_ip = '10.0.0.253'
# Change this IP to IP routable from your 'public' network,
# e. g. Internet or your office LAN, in which your public
# interface resides
$public_virtual_ip = '10.0.204.253'
$nodes_harr = [
{
'name' => 'fuel-cobbler',
'role' => 'cobbler',
'internal_address' => '10.0.0.102',
'public_address' => '10.0.204.102',
},
{
'name' => 'fuel-controller-01',
'role' => 'controller',
'internal_address' => '10.0.0.103',
'public_address' => '10.0.204.103',
},
{
'name' => 'fuel-controller-02',
'role' => 'controller',
'internal_address' => '10.0.0.104',
'public_address' => '10.0.204.104',
},
{
'name' => 'fuel-controller-03',
'role' => 'controller',
'internal_address' => '10.0.0.105',
'public_address' => '10.0.204.105',
},
{
'name' => 'fuel-compute-01',
'role' => 'compute',
'internal_address' => '10.0.0.106',
'public_address' => '10.0.204.106',
},
{
'name' => 'fuel-compute-02',
'role' => 'compute',
'internal_address' => '10.0.0.107',
'public_address' => '10.0.204.107',
},
]
$nodes = $nodes_harr
$default_gateway = '10.0.204.1'
# Specify nameservers here.
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
$dns_nameservers = ['10.0.204.1','8.8.8.8']
# Specify netmasks for internal and external networks.
$internal_netmask = '255.255.255.0'
$public_netmask = '255.255.255.0'
$node = filter_nodes($nodes,'name',$::hostname)
$internal_address = $node[0]['internal_address']
$public_address = $node[0]['public_address']
$controller_internal_addresses = nodes_to_hash(filter_nodes($nodes,'role','controller'),'name','internal_address')
$controller_public_addresses = nodes_to_hash(filter_nodes($nodes,'role','controller'),'name','public_address')
$controller_hostnames = keys($controller_internal_addresses)
if $quantum {
$public_int = $public_br
$internal_int = $internal_br
} else {
$public_int = $public_interface
$internal_int = $internal_interface
}
if $::hostname == 'fuel-controller-01' {
$primary_controller = true
} else {
$primary_controller = false
}
#Network configuration
stage {'netconfig':
before => Stage['main'],
}
class {'l23network': stage=> 'netconfig'}
class node_netconfig (
$mgmt_ipaddr,
$mgmt_netmask = '255.255.255.0',
$public_ipaddr = undef,
$public_netmask= '255.255.255.0',
$save_default_gateway=false,
$quantum = $quantum,
) {
if $quantum {
l23network::l3::create_br_iface {'mgmt':
interface => $internal_int,
bridge => $internal_br,
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
save_default_gateway => $save_default_gateway,
}
l23network::l3::create_br_iface {'ex':
interface => $public_int,
bridge => $public_br,
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
L23network::L3::Create_br_iface['mgmt'] -> L23network::L3::Create_br_iface['ex']
} else {
# nova-network mode
l23network::l3::ifconfig {$public_int:
ipaddr => $public_ipaddr,
netmask => $public_netmask,
gateway => $default_gateway,
}
l23network::l3::ifconfig {$internal_int:
ipaddr => $mgmt_ipaddr,
netmask => $mgmt_netmask,
dns_nameservers => $dns_nameservers,
}
}
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
}
# Set hostname for master controller of HA cluster.
# It is strongly recommend that the master controller is deployed before all other controllers since it initializes the new cluster.
# Default is fuel-controller-01.
# Fully qualified domain name is also allowed.
$master_hostname = 'fuel-controller-01'
# Set nagios master fqdn
$nagios_master = 'nagios-server.your-domain-name.com'
## proj_name name of environment nagios configuration
$proj_name = 'test'
#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario.
$multi_host = true
# Specify different DB credentials for various services
$mysql_root_password = 'nova'
$admin_email = 'openstack@openstack.org'
$admin_password = 'nova'
$keystone_db_password = 'nova'
$keystone_admin_token = 'nova'
$glance_db_password = 'nova'
$glance_user_password = 'nova'
$nova_db_password = 'nova'
$nova_user_password = 'nova'
$rabbit_password = 'nova'
$rabbit_user = 'nova'
$quantum_user_password = 'quantum_pass'
$quantum_db_password = 'quantum_pass'
$quantum_db_user = 'quantum'
$quantum_db_dbname = 'quantum'
# End DB credentials section
### GENERAL CONFIG END ###
### NETWORK/QUANTUM ###
# Specify network/quantum specific settings
# Should we use quantum or nova-network(deprecated).
# Consult OpenStack documentation for differences between them.
$quantum = true
$quantum_netnode_on_cnt = true
#$quantum_host = $internal_virtual_ip
# Specify network creation criteria:
# Should puppet automatically create networks?
$create_networks = true
# Fixed IP addresses are typically used for communication between VM instances.
$fixed_range = '10.0.198.128/27'
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
$floating_range = '10.0.204.128/28'
# These parameters are passed to the previously specified network manager , e.g. nova-manage network create.
# Not used in Quantum.
# Consult openstack docs for corresponding network manager.
# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup
$num_networks = 1
$network_size = 31
$vlan_start = 300
# Quantum
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
$tenant_network_type = 'gre'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
#Which IP have Quantum network node?
$quantum_net_node_hostname= 'fuel-controller-03'
$quantum_net_node_address = $controller_internal_addresses[$quantum_net_node_hostname]
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
# the first address will be defined as an external default router,
# the second address will be attached to an uplink bridge interface,
# the remaining addresses will be utilized for the floating IP address pool.
$external_ipinfo = {}
## $external_ipinfo = {
## 'public_net_router' => '10.0.74.129',
## 'ext_bridge' => '10.0.74.130',
## 'pool_start' => '10.0.74.131',
## 'pool_end' => '10.0.74.142',
## }
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
$segment_range = '900:999'
# Set up OpenStack network manager. It is used ONLY in nova-network.
# Consult Openstack nova-network docs for possible values.
$network_manager = 'nova.network.manager.FlatDHCPManager'
# Assign floating IPs to VMs on startup automatically?
$auto_assign_floating_ip = false
# Database connection for Quantum configuration (quantum.conf)
$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${$internal_virtual_ip}/${quantum_db_dbname}"
### NETWORK/QUANTUM END ###
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
# installation. Each cluster requires a unique integer value.
# Valid identifier range is 0 to 254
$deployment_id = '89'
# Below you can enable or disable various services based on the chosen deployment topology:
### CINDER/VOLUME ###
# Should we use cinder or nova-volume(obsolete)
# Consult openstack docs for differences between them
$cinder = true
# Should we install cinder on compute nodes?
$cinder_on_computes = false
#Set it to true if your want cinder-volume been installed to the host
#Otherwise it will install api and scheduler services
$manage_volumes = true
# Setup network interface, which Cinder uses to export iSCSI targets.
# This interface defines which IP to use to listen on iscsi port for
# incoming connections of initiators
$cinder_iscsi_bind_iface = $internal_int
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
### CINDER/VOLUME END ###
### GLANCE ###
# Which backend to use for glance
# Supported backends are "swift" and "file"
$glance_backend = 'file'
# Use loopback device for swift:
# set 'loopback' or false
# This parameter controls where swift partitions are located:
# on physical partitions or inside loopback devices.
$swift_loopback = false
### Glance and swift END ###
### Syslog ###
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
$use_syslog = false
if $use_syslog {
class { "::rsyslog::client":
log_local => true,
log_auth_local => true,
server => '127.0.0.1',
port => '514'
}
}
### Syslog END ###
case $::osfamily {
"Debian": {
$rabbitmq_version_string = '2.8.7-1'
}
"RedHat": {
$rabbitmq_version_string = '2.8.7-2.el6'
}
}
#
# OpenStack packages and customized component versions to be installed.
# Use 'latest' to get the most recent ones or specify exact version if you need to install custom version.
$openstack_version = {
'keystone' => 'latest',
'glance' => 'latest',
'horizon' => 'latest',
'nova' => 'latest',
'novncproxy' => 'latest',
'cinder' => 'latest',
'rabbitmq_version' => $rabbitmq_version_string,
}
# Which package repo mirror to use. Currently "default".
# "custom" is used by Mirantis for testing purposes.
# Local puppet-managed repo option planned for future releases.
# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp,
# though it is NOT recommended.
$mirror_type = 'default'
$enable_test_repo = false
#$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${quantum_host}/${quantum_db_dbname}"
# This parameter specifies the verbosity level of log messages
# in openstack components config. Currently, it disables or enables debugging.
$verbose = true
#Rate Limits for cinder and Nova
#Cinder and Nova can rate-limit your requests to API services.
#These limits can be reduced for your installation or usage scenario.
#Change the following variables if you want. They are measured in requests per minute.
$nova_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
$cinder_rate_limits = {
'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
Exec { logoutput => true }
### END OF PUBLIC CONFIGURATION PART ###
# Normally, you do not need to change anything after this string
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
type=>$mirror_type,
enable_test_repo=>$enable_test_repo,
}
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
$horizon_use_ssl = false
class compact_controller (
$quantum_network_node = false
) {
class { 'openstack::controller_ha':
controller_public_addresses => $controller_public_addresses,
controller_internal_addresses => $controller_internal_addresses,
internal_address => $internal_address,
public_interface => $public_int,
internal_interface => $internal_int,
private_interface => $private_interface,
internal_virtual_ip => $internal_virtual_ip,
public_virtual_ip => $public_virtual_ip,
primary_controller => $primary_controller,
floating_range => $floating_range,
fixed_range => $fixed_range,
multi_host => $multi_host,
network_manager => $network_manager,
num_networks => $num_networks,
network_size => $network_size,
network_config => { 'vlan_start' => $vlan_start },
verbose => $verbose,
auto_assign_floating_ip => $auto_assign_floating_ip,
mysql_root_password => $mysql_root_password,
admin_email => $admin_email,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
memcached_servers => $controller_hostnames,
export_resources => false,
glance_backend => $glance_backend,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_network_node => $quantum_network_node,
quantum_netnode_on_cnt => $quantum_netnode_on_cnt,
quantum_gre_bind_addr => $quantum_gre_bind_addr,
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
cinder_iscsi_bind_iface => $cinder_iscsi_bind_iface,
manage_volumes => $manage_volumes,
galera_nodes => $controller_hostnames,
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits,
horizon_use_ssl => $horizon_use_ssl,
}
}
# Definition of OpenStack controller nodes.
node /fuel-controller-01/ {
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
class { compact_controller: }
}
node /fuel-controller-02/ {
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
class { compact_controller: }
}
node /fuel-controller-03/ {
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => $::public_address,
public_netmask => $::public_netmask,
stage => 'netconfig',
}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
class { 'compact_controller': quantum_network_node => true }
}
# Definition of OpenStack compute nodes.
node /fuel-compute-[\d+]/ {
#class {'::node_netconfig':
# mgmt_ipaddr => $::internal_address,
# mgmt_netmask => $::internal_netmask,
# public_ipaddr => $::public_address,
# public_netmask => $::public_netmask,
# stage => 'netconfig',
#}
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive', 'nova-compute','nova-network','libvirt'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'compute',
}
class { 'openstack::compute':
public_interface => $public_int,
private_interface => $private_interface,
internal_address => $internal_address,
libvirt_type => 'qemu',
fixed_range => $fixed_range,
network_manager => $network_manager,
network_config => { 'vlan_start' => $vlan_start },
multi_host => $multi_host,
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova",
rabbit_nodes => $controller_hostnames,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_ha_virtual_ip => $internal_virtual_ip,
glance_api_servers => "${internal_virtual_ip}:9292",
vncproxy_host => $public_virtual_ip,
verbose => $verbose,
vnc_enabled => true,
manage_volumes => $manage_volumes,
nova_user_password => $nova_user_password,
cache_server_ip => $controller_hostnames,
service_endpoint => $internal_virtual_ip,
quantum => $quantum,
quantum_sql_connection => $quantum_sql_connection,
quantum_user_password => $quantum_user_password,
quantum_host => $quantum_net_node_address,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder_on_computes,
cinder_iscsi_bind_iface=> $cinder_iscsi_bind_iface,
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
ssh_private_key => 'puppet:///ssh_keys/openstack',
ssh_public_key => 'puppet:///ssh_keys/openstack.pub',
use_syslog => $use_syslog,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits
}
}
# Definition of OpenStack Quantum node.
node /fuel-quantum/ {
class {'::node_netconfig':
mgmt_ipaddr => $::internal_address,
mgmt_netmask => $::internal_netmask,
public_ipaddr => 'none',
save_default_gateway => true,
stage => 'netconfig',
}
if ! $quantum_netnode_on_cnt {
class { 'openstack::quantum_router':
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
auth_host => $internal_virtual_ip,
internal_address => $internal_address,
public_interface => $public_int,
private_interface => $private_interface,
floating_range => $floating_range,
fixed_range => $fixed_range,
create_networks => $create_networks,
verbose => $verbose,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
rabbit_ha_virtual_ip => $internal_virtual_ip,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
quantum_netnode_on_cnt=> false,
quantum_network_node => true,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
external_ipinfo => $external_ipinfo,
api_bind_address => $internal_address,
use_syslog => $use_syslog,
}
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $internal_virtual_ip,
before => Class['openstack::quantum_router'],
}
}
}

View File

@ -1,396 +0,0 @@
#
# Parameter values in this file should be changed, taking into consideration your
# networking setup and desired OpenStack settings.
#
# Please consult with the latest Fuel User Guide before making edits.
#
# This is a name of public interface. Public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
$public_interface = 'eth1'
# This is a name of internal interface. It will be hooked to the management network, where data exchange between components of the OpenStack cluster will happen.
$internal_interface = 'eth0'
# This is a name of private interface. All traffic within OpenStack tenants' networks will go through this interface.
$private_interface = 'eth2'
# Public and Internal VIPs. These virtual addresses are required by HA topology and will be managed by keepalived.
$internal_virtual_ip = '10.0.125.253'
$public_virtual_ip = '10.0.74.253'
$nodes_harr = [
{
'name' => 'fuel-cobbler',
'role' => 'cobbler',
'internal_address' => '10.0.0.102',
'public_address' => '10.0.204.102',
},
{
'name' => 'fuel-controller-01',
'role' => 'controller',
'internal_address' => '10.0.0.103',
'public_address' => '10.0.204.103',
},
{
'name' => 'fuel-controller-02',
'role' => 'controller',
'internal_address' => '10.0.0.104',
'public_address' => '10.0.204.104',
},
{
'name' => 'fuel-controller-03',
'role' => 'controller',
'internal_address' => '10.0.0.105',
'public_address' => '10.0.204.105',
},
{
'name' => 'fuel-compute-01',
'role' => 'compute',
'internal_address' => '10.0.0.106',
'public_address' => '10.0.204.106',
},
{
'name' => 'fuel-compute-02',
'role' => 'compute',
'internal_address' => '10.0.0.107',
'public_address' => '10.0.204.107',
},
]
$nodes = $nodes_harr
$default_gateway = '10.0.204.1'
$dns_nameservers = ['10.0.204.1','8.8.8.8']
$node = filter_nodes($nodes,'name',$::hostname)
$internal_address = $node[0]['internal_address']
$public_address = $node[0]['public_address']
$internal_netmask = '255.255.255.0'
$public_netmask = '255.255.255.0'
$controller_internal_addresses = nodes_to_hash(filter_nodes($nodes,'role','controller'),'name','internal_address')
$controller_public_addresses = nodes_to_hash(filter_nodes($nodes,'role','controller'),'name','public_address')
$controller_hostnames = keys($controller_internal_addresses)
if $::hostname == 'fuel-controller-01' {
$primary_controller = true
} else {
$primary_controller = false
}
# Specify pools for Floating IP and Fixed IP.
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
# Fixed IP addresses are typically used for communication between VM instances.
$create_networks = true
$floating_range = '10.0.74.128/28'
$fixed_range = '10.0.161.128/28'
$num_networks = 1
$network_size = 15
$vlan_start = 300
# If $external_ipinfo option is not defined the addresses will be calculated automatically from $floating_range:
# the first address will be defined as an external default router
# second address will be set to an uplink bridge interface (br-ex)
# remaining addresses are utilized for ip floating pool
$external_ipinfo = {}
## $external_ipinfo = {
## 'public_net_router' => '10.0.74.129',
## 'ext_bridge' => '10.0.74.130',
## 'pool_start' => '10.0.74.131',
## 'pool_end' => '10.0.74.142',
## }
# For VLAN networks: valid VLAN VIDs are 1 through 4094.
# For GRE networks: Valid tunnel IDs are any 32 bit unsigned integer.
$segment_range = '900:999'
# it should be set to an integer value (valid range is 0..254)
$deployment_id = '89'
# Here you can enable or disable different services, based on the chosen deployment topology.
$multi_host = true
$cinder = true
$cinder_on_computes = false
$manage_volumes = true
$quantum = true
$auto_assign_floating_ip = false
$glance_backend = 'file'
# Set nagios master fqdn
$nagios_master = 'nagios-server.your-domain-name.com'
## proj_name name of environment nagios configuration
$proj_name = 'test'
# Set up OpenStack network manager
$network_manager = 'nova.network.manager.FlatDHCPManager'
# Setup network interface, which Cinder used for export iSCSI targets.
$cinder_iscsi_bind_iface = $internal_interface
# Here you can add physical volumes to cinder. Please replace values with the actual names of devices.
$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
# Specify credentials for different services
$mysql_root_password = 'nova'
$admin_email = 'openstack@openstack.org'
$admin_password = 'nova'
$keystone_db_password = 'nova'
$keystone_admin_token = 'nova'
$glance_db_password = 'nova'
$glance_user_password = 'nova'
$nova_db_password = 'nova'
$nova_user_password = 'nova'
$rabbit_password = 'nova'
$rabbit_user = 'nova'
$quantum_user_password = 'quantum_pass'
$quantum_db_password = 'quantum_pass'
$quantum_db_user = 'quantum'
$quantum_db_dbname = 'quantum'
$tenant_network_type = 'gre'
$quantum_host = $internal_virtual_ip
stage {'netconfig':
before => Stage['main'],
}
class {'l23network': stage=> 'netconfig'}
$quantum_gre_bind_addr = $internal_address
$use_syslog = false
if $use_syslog {
class { "::rsyslog::client":
log_local => true,
log_auth_local => true,
server => '127.0.0.1',
port => '514'
}
}
case $::osfamily {
"Debian": {
$rabbitmq_version_string = '2.8.7-1'
}
"RedHat": {
$rabbitmq_version_string = '2.8.7-2.el6'
}
}
# OpenStack packages to be installed
$openstack_version = {
'keystone' => 'latest',
'glance' => 'latest',
'horizon' => 'latest',
'nova' => 'latest',
'novncproxy' => 'latest',
'cinder' => 'latest',
'rabbitmq_version' => $rabbitmq_version_string,
}
$mirror_type = 'default'
$enable_test_repo = false
$quantum_sql_connection = "mysql://${quantum_db_user}:${quantum_db_password}@${quantum_host}/${quantum_db_dbname}"
$verbose = true
Exec { logoutput => true }
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
type=>$mirror_type,
enable_test_repo=>$enable_test_repo,
}
if $::operatingsystem == 'Ubuntu'
{
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
#Rate Limits for cinder and Nova
#Cinder and Nova can rate-limit your requests to API services
#These limits can be small for your installation or usage scenario
#Change the following variables if you want. The unit is requests per minute.
$nova_rate_limits = { 'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000 }
$cinder_rate_limits = { 'POST' => 1000,
'POST_SERVERS' => 1000,
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000 }
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
# Dashboard(horizon) https/ssl mode
# false: normal mode with no encryption
# 'default': uses keys supplied with the ssl module package
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
$horizon_use_ssl = false
# Definition of OpenStack controller nodes.
node /fuel-controller-[\d+]/ {
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
'nova-consoleauth', 'nova-cert', 'haproxy', 'nova-api', 'glance-api',
'glance-registry','horizon', 'rabbitmq', 'mysql'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'controller',
}
class { 'openstack::controller_ha':
controller_public_addresses => $controller_public_addresses,
public_interface => $public_interface,
internal_interface => $internal_interface,
private_interface => $private_interface,
internal_virtual_ip => $internal_virtual_ip,
public_virtual_ip => $public_virtual_ip,
controller_internal_addresses => $controller_internal_addresses,
internal_address => $internal_address,
primary_controller => $primary_controller,
floating_range => $floating_range,
fixed_range => $fixed_range,
multi_host => $multi_host,
network_manager => $network_manager,
num_networks => $num_networks,
network_size => $network_size,
network_config => { 'vlan_start' => $vlan_start },
verbose => $verbose,
auto_assign_floating_ip => $auto_assign_floating_ip,
mysql_root_password => $mysql_root_password,
admin_email => $admin_email,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
nova_db_password => $nova_db_password,
nova_user_password => $nova_user_password,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
memcached_servers => $controller_hostnames,
export_resources => false,
glance_backend => $glance_backend,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
cinder_iscsi_bind_iface => $cinder_iscsi_bind_iface,
galera_nodes => $controller_hostnames,
manage_volumes => $manage_volumes,
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
horizon_use_ssl => $horizon_use_ssl,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits
}
}
# Definition of OpenStack compute nodes.
node /fuel-compute-[\d+]/ {
class {'nagios':
proj_name => $proj_name,
services => [
'host-alive', 'nova-compute','nova-network','libvirt'
],
whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'compute',
}
class { 'openstack::compute':
public_interface => $public_interface,
private_interface => $private_interface,
internal_address => $internal_address,
libvirt_type => 'qemu',
fixed_range => $fixed_range,
network_manager => $network_manager,
network_config => { 'vlan_start' => $vlan_start },
multi_host => $multi_host,
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova",
rabbit_nodes => $controller_hostnames,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_ha_virtual_ip => $internal_virtual_ip,
glance_api_servers => "${internal_virtual_ip}:9292",
vncproxy_host => $public_virtual_ip,
verbose => $verbose,
vnc_enabled => true,
manage_volumes => $manage_volumes,
nv_physical_volume => $nv_physical_volume,
nova_user_password => $nova_user_password,
cache_server_ip => $controller_hostnames,
service_endpoint => $internal_virtual_ip,
quantum => $quantum,
quantum_host => $quantum_host,
quantum_sql_connection => $quantum_sql_connection,
quantum_user_password => $quantum_user_password,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder_on_computes,
cinder_iscsi_bind_iface => $cinder_iscsi_bind_iface,
db_host => $internal_virtual_ip,
ssh_private_key => 'puppet:///ssh_keys/openstack',
ssh_public_key => 'puppet:///ssh_keys/openstack.pub',
use_syslog => $use_syslog,
nova_rate_limits => $nova_rate_limits,
cinder_rate_limits => $cinder_rate_limits
}
}
# Definition of OpenStack Quantum node.
node /fuel-quantum/ {
class { 'openstack::quantum_router':
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
auth_host => $internal_virtual_ip,
internal_address => $internal_address,
public_interface => $public_interface,
private_interface => $private_interface,
floating_range => $floating_range,
fixed_range => $fixed_range,
create_networks => $create_networks,
verbose => $verbose,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
rabbit_nodes => $controller_hostnames,
rabbit_ha_virtual_ip => $internal_virtual_ip,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
quantum_db_password => $quantum_db_password,
quantum_db_user => $quantum_db_user,
quantum_db_dbname => $quantum_db_dbname,
tenant_network_type => $tenant_network_type,
external_ipinfo => $external_ipinfo,
segment_range => $segment_range,
api_bind_address => $internal_address,
use_syslog => $use_syslog,
}
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
controller_node => $internal_virtual_ip,
before => Class['openstack::quantum_router'],
}
}

View File

@ -7,18 +7,17 @@ from fuel_test.settings import CREATE_SNAPSHOTS
class MinimalTestCase(CobblerTestCase):
def test_minimal(self):
Manifest().write_openstack_manifest(
Manifest().write_openstack_ha_minimal_manifest(
remote=self.remote(),
template=Template.minimal(), ci=self.ci(),
controllers=self.nodes().controllers,
quantums=self.nodes().quantums,
swift=False,
quantum=True)
self.validate(self.nodes().controllers[:1], 'puppet agent --test')
self.validate(self.nodes().controllers[1:], 'puppet agent --test')
self.validate(self.nodes().controllers[:1], 'puppet agent --test')
if is_not_essex():
self.validate(self.nodes().quantums, 'puppet agent --test')
#if is_not_essex():
# self.validate(self.nodes().quantums, 'puppet agent --test')
self.validate(self.nodes().computes, 'puppet agent --test')
if CREATE_SNAPSHOTS:
self.environment().snapshot('minimal', force=True)

View File

@ -63,22 +63,22 @@ class Template(object):
@classmethod
def stomp(cls):
return cls(root('deployment', 'puppet', 'mcollective', 'examples',
'site.pp'))
'site_openstack_ha_minimal.pp'))
@classmethod
def minimal(cls):
return cls(root('deployment', 'puppet', 'openstack', 'examples',
'site_openstack_minimal.pp'))
'site_openstack_ha_minimal.pp'))
@classmethod
def compact(cls):
return cls(root('deployment', 'puppet', 'openstack', 'examples',
'site_openstack_compact.pp'))
'site_openstack_ha_compact.pp'))
@classmethod
def full(cls):
return cls(root('deployment', 'puppet', 'openstack', 'examples',
'site_openstack_full.pp'))
'site_openstack_ha_full.pp'))
@classmethod
def nagios(cls):
@ -248,6 +248,41 @@ class Manifest(object):
self.write_manifest(remote, template)
def write_openstack_ha_minimal_manifest(self, remote, template, ci, controllers, quantums,
proxies=None, use_syslog=True,
quantum=True, loopback=True,
cinder=True, cinder_on_computes=False,
):
template.replace(
internal_virtual_ip=ci.internal_virtual_ip(),
public_virtual_ip=ci.public_virtual_ip(),
floating_range=self.floating_network(ci, quantum),
fixed_range=self.fixed_network(ci,quantum),
mirror_type=self.mirror_type(),
public_interface=self.public_interface(),
internal_interface=self.internal_interface(),
private_interface=self.private_interface(),
nv_physical_volume=self.physical_volumes(),
use_syslog=use_syslog,
cinder=cinder,
cinder_on_computes=cinder_on_computes,
nagios_master = controllers[0].name + '.your-domain-name.com',
external_ipinfo=self.external_ip_info(ci, quantums),
nodes=self.generate_nodes_configs_list(ci),
dns_nameservers=self.generate_dns_nameservers_list(ci),
default_gateway=ci.public_router(),
enable_test_repo=TEST_REPO,
deployment_id = self.deployment_id(ci),
)
if is_not_essex():
template.replace(
quantum=quantum,
quantum_netnode_on_cnt=quantum,
)
self.write_manifest(remote, template)
def write_openstack_manifest(self, remote, template, ci, controllers, quantums,
proxies=None, use_syslog=True,
quantum=True, loopback=True,
@ -330,9 +365,3 @@ class Manifest(object):
return ci.internal_network().split('.')[2]
except:
return '250'