Merge branch 'grizzly' into grizzly_merge
This commit is contained in:
commit
6d8f250736
12
CHANGELOG
Normal file
12
CHANGELOG
Normal file
@ -0,0 +1,12 @@
|
||||
3.0-alpha-174-g94a98d6
|
||||
- Merge from master branch
|
||||
- Rsyslog tuning
|
||||
- Puppet debug output
|
||||
- Centos 6.4
|
||||
|
||||
2.1-folsom-docs-324-g61d1599
|
||||
- Grizzly support for centos simple
|
||||
- Option for PKI auth for keystone (grizzly native)
|
||||
- Nova-conductor as generic nova service at compute nodes
|
||||
- CI scripts changes for grizzly tempest (host only routed IP addresses for public pool)
|
||||
-
|
@ -77,9 +77,6 @@ if $cinder_rate_limits {
|
||||
}
|
||||
|
||||
if $keystone_enabled {
|
||||
cinder_config {
|
||||
'DEFAULT/auth_strategy': value => 'keystone' ;
|
||||
}
|
||||
cinder_config {
|
||||
'keystone_authtoken/auth_protocol': value => $keystone_auth_protocol;
|
||||
'keystone_authtoken/auth_host': value => $keystone_auth_host;
|
||||
|
@ -43,11 +43,6 @@ node fuel-cobbler {
|
||||
Class[cobbler::server] ->
|
||||
Class[cobbler::distro::centos64_x86_64]
|
||||
|
||||
# class { cobbler::distro::centos63_x86_64:
|
||||
# http_iso => "http://10.100.0.1/iso/CentOS-6.3-x86_64-netinstall.iso",
|
||||
# ks_url => "http://172.18.8.52/~hex/centos/6.3/os/x86_64",
|
||||
# }
|
||||
|
||||
class { cobbler::distro::centos64_x86_64:
|
||||
http_iso => "http://download.mirantis.com/epel-fuel-folsom-2.1/CentOS-6.4-x86_64-minimal.iso",
|
||||
ks_url => "http://download.mirantis.com/centos-6.4",
|
||||
|
@ -83,11 +83,6 @@ node fuel-cobbler {
|
||||
Class['cobbler::server'] ->
|
||||
Class['cobbler::distro::centos64_x86_64']
|
||||
|
||||
# class { 'cobbler::distro::centos63_x86_64':
|
||||
# http_iso => 'http://10.100.0.1/iso/CentOS-6.3-x86_64-netinstall.iso',
|
||||
# ks_url => 'http://172.18.8.52/~hex/centos/6.3/os/x86_64',
|
||||
# }
|
||||
|
||||
class { 'cobbler::distro::centos64_x86_64':
|
||||
http_iso => 'http://download.mirantis.com/epel-fuel-folsom-2.1/CentOS-6.4-x86_64-minimal.iso',
|
||||
ks_url => 'cobbler',
|
||||
|
@ -14,8 +14,8 @@
|
||||
|
||||
|
||||
class cobbler::distro::centos64_x86_64(
|
||||
$http_iso = 'http://download.mirantis.com/epel-fuel-folsom-2.1/CentOS-6.4-x86_64-minimal.iso',
|
||||
$ks_url = 'http://download.mirantis.com/epel-fuel-folsom-2.1'
|
||||
$http_iso = 'http://download.mirantis.com/epel-fuel-grizzly/isos/x86_64/CentOS-6.4-x86_64-minimal.iso',
|
||||
$ks_url = 'http://download.mirantis.com/epel-fuel-grizzly'
|
||||
) {
|
||||
|
||||
Exec {path => '/usr/bin:/bin:/usr/sbin:/sbin'}
|
||||
|
@ -32,7 +32,7 @@ class cobbler::profile::centos64_x86_64(
|
||||
},
|
||||
{
|
||||
"name" => "Mirantis-epel-fuel-install",
|
||||
"url" => "http://download.mirantis.com/epel-fuel-folsom-2.1",
|
||||
"url" => "http://download.mirantis.com/epel-fuel-grizzly",
|
||||
}
|
||||
],
|
||||
|
||||
|
@ -102,6 +102,8 @@ wget
|
||||
crontabs
|
||||
cronie
|
||||
ruby-augeas
|
||||
yum-plugin-versionlock
|
||||
|
||||
# COBBLER EMBEDDED SNIPPET: 'puppet_install_if_enabled'
|
||||
# LISTS puppet PACKAGE IF puppet_auto_setup VARIABLE IS SET TO 1
|
||||
$SNIPPET('puppet_install_if_enabled')
|
||||
@ -114,6 +116,7 @@ $SNIPPET('mcollective_install_if_enabled')
|
||||
# HERE ARE COMMANDS THAT WILL BE LAUNCHED JUST AFTER
|
||||
# INSTALLATION ITSELF COMPLETED
|
||||
%post
|
||||
yum versionlock puppet
|
||||
echo -e "modprobe nf_conntrack_ipv4\nmodprobe nf_conntrack_ipv6" >> /etc/rc.modules
|
||||
chmod +x /etc/rc.modules
|
||||
echo -e "net.nf_conntrack_max=1048576" >> /etc/sysctl.conf
|
||||
|
@ -100,6 +100,8 @@ wget
|
||||
crontabs
|
||||
cronie
|
||||
ruby-augeas
|
||||
yum-plugin-versionlock
|
||||
|
||||
# COBBLER EMBEDDED SNIPPET: 'puppet_install_if_enabled'
|
||||
# LISTS puppet PACKAGE IF puppet_auto_setup VARIABLE IS SET TO 1
|
||||
$SNIPPET('puppet_install_if_enabled')
|
||||
@ -112,6 +114,7 @@ $SNIPPET('mcollective_install_if_enabled')
|
||||
# HERE ARE COMMANDS THAT WILL BE LAUNCHED JUST AFTER
|
||||
# INSTALLATION ITSELF COMPLETED
|
||||
%post
|
||||
yum versionlock puppet
|
||||
echo -e "modprobe nf_conntrack_ipv4\nmodprobe nf_conntrack_ipv6" >> /etc/rc.modules
|
||||
chmod +x /etc/rc.modules
|
||||
echo -e "net.nf_conntrack_max=1048576" >> /etc/sysctl.conf
|
||||
|
@ -177,20 +177,25 @@ Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Cor
|
||||
next unless node[:state] == :online
|
||||
debug("getting last ops on #{node[:uname]} for #{@resource[:name]}")
|
||||
all_operations = XPath.match(@@cib,"cib/status/node_state[@uname='#{node[:uname]}']/lrm/lrm_resources/lrm_resource/lrm_rsc_op[starts-with(@id,'#{@resource[:name]}')]")
|
||||
debug("ALL OPERATIONS:\n\n #{all_operations.inspect}")
|
||||
next if all_operations.nil?
|
||||
completed_ops = all_operations.select{|op| op.attributes['op-status'].to_i != -1 }
|
||||
debug("COMPLETED OPERATIONS:\n\n #{completed_ops.inspect}")
|
||||
next if completed_ops.nil?
|
||||
start_stop_ops = completed_ops.select{|op| ["start","stop","monitor"].include? op.attributes['operation']}
|
||||
debug("START/STOP OPERATIONS:\n\n #{start_stop_ops.inspect}")
|
||||
next if start_stop_ops.nil?
|
||||
sorted_operations = start_stop_ops.sort do
|
||||
|a,b| a.attributes['call-id'] <=> b.attributes['call-id']
|
||||
|a,b| a.attributes['call-id'].to_i <=> b.attributes['call-id'].to_i
|
||||
end
|
||||
good_operations = sorted_operations.select do |op|
|
||||
op.attributes['rc-code'] == '0' or
|
||||
op.attributes['operation'] == 'monitor'
|
||||
end
|
||||
debug("GOOD OPERATIONS :\n\n #{good_operations.inspect}")
|
||||
next if good_operations.nil?
|
||||
last_op = good_operations.last
|
||||
debug("LAST GOOD OPERATION :\n\n '#{last_op.inspect}' '#{last_op.nil?}' '#{last_op}'")
|
||||
next if last_op.nil?
|
||||
last_successful_op = nil
|
||||
if ['start','stop'].include?(last_op.attributes['operation'])
|
||||
@ -204,6 +209,7 @@ Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Cor
|
||||
last_successful_op = 'start'
|
||||
end
|
||||
end
|
||||
debug("LAST SUCCESSFUL OP :\n\n #{last_successful_op.inspect}")
|
||||
@last_successful_operations << last_successful_op if !last_successful_op.nil?
|
||||
end
|
||||
@last_successful_operations
|
||||
@ -214,7 +220,7 @@ Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Cor
|
||||
# end
|
||||
|
||||
def enable
|
||||
crm('resource','manage', @resource[:name])
|
||||
crm('resource','manage', get_service_name)
|
||||
end
|
||||
|
||||
def enabled?
|
||||
@ -223,7 +229,7 @@ Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Cor
|
||||
end
|
||||
|
||||
def disable
|
||||
crm('resource','unmanage',@resource[:name])
|
||||
crm('resource','unmanage',get_service_name)
|
||||
end
|
||||
|
||||
#TODO: think about per-node start/stop/restart of services
|
||||
|
@ -138,6 +138,18 @@ class corosync (
|
||||
require => Package['corosync']
|
||||
}
|
||||
|
||||
if $::osfamily == "RedHat" {
|
||||
file { '/var/lib/pacemaker/cores/root':
|
||||
ensure => directory,
|
||||
mode => '0750',
|
||||
owner => 'hacluster',
|
||||
group => 'haclient',
|
||||
recurse => true,
|
||||
purge => true,
|
||||
require => Package['corosync']
|
||||
}
|
||||
}
|
||||
|
||||
if $::osfamily == 'Debian' {
|
||||
exec { 'enable corosync':
|
||||
command => 'sed -i s/START=no/START=yes/ /etc/default/corosync',
|
||||
|
@ -198,7 +198,9 @@ Puppet::Type.type(:firewall).provide :iptables, :parent => Puppet::Provider::Fir
|
||||
|
||||
# Normalise all rules to CIDR notation.
|
||||
[:source, :destination].each do |prop|
|
||||
hash[prop] = Puppet::Util::IPCidr.new(hash[prop]).cidr unless hash[prop].nil?
|
||||
if hash[prop] =~ /^(\d{1,3}\.){3}\d{1,3}(:?\/(\d+))?$/
|
||||
hash[prop] = Puppet::Util::IPCidr.new(hash[prop]).cidr \
|
||||
end
|
||||
end
|
||||
|
||||
[:dport, :sport, :port, :state].each do |prop|
|
||||
@ -252,9 +254,9 @@ Puppet::Type.type(:firewall).provide :iptables, :parent => Puppet::Provider::Fir
|
||||
# Proto should equal 'all' if undefined
|
||||
hash[:proto] = "all" if !hash.include?(:proto)
|
||||
|
||||
# If the jump parameter is set to one of: ACCEPT, REJECT or DROP then
|
||||
# If the jump parameter is set to one of: ACCEPT, REJECT, NOTRACK or DROP then
|
||||
# we should set the action parameter instead.
|
||||
if ['ACCEPT','REJECT','DROP'].include?(hash[:jump]) then
|
||||
if ['ACCEPT','REJECT','DROP','NOTRACK'].include?(hash[:jump]) then
|
||||
hash[:action] = hash[:jump].downcase
|
||||
hash.delete(:jump)
|
||||
end
|
||||
|
@ -87,11 +87,12 @@ Puppet::Type.newtype(:firewall) do
|
||||
* accept - the packet is accepted
|
||||
* reject - the packet is rejected with a suitable ICMP response
|
||||
* drop - the packet is dropped
|
||||
* notrack - packet will be processed atop conntrack
|
||||
|
||||
If you specify no value it will simply match the rule but perform no
|
||||
action unless you provide a provider specific parameter (such as *jump*).
|
||||
EOS
|
||||
newvalues(:accept, :reject, :drop)
|
||||
newvalues(:accept, :reject, :drop, :notrack)
|
||||
end
|
||||
|
||||
# Generic matching properties
|
||||
|
@ -14,8 +14,8 @@ class galera (
|
||||
$node_address = $ipaddress_eth0,
|
||||
$setup_multiple_gcomm = true,
|
||||
$skip_name_resolve = false,
|
||||
$node_addresses = [
|
||||
$ipaddress_eth0]) {
|
||||
$node_addresses = [$ipaddress_eth0],
|
||||
) {
|
||||
include galera::params
|
||||
|
||||
$mysql_user = $::galera::params::mysql_user
|
||||
@ -25,13 +25,6 @@ class galera (
|
||||
case $::osfamily {
|
||||
'RedHat' : {
|
||||
|
||||
if (!$::selinux == 'false') and !defined(Class['selinux']) {
|
||||
class { 'selinux':
|
||||
mode => 'disabled',
|
||||
before => Package['MySQL-server']
|
||||
}
|
||||
}
|
||||
|
||||
file { '/etc/init.d/mysql':
|
||||
ensure => present,
|
||||
mode => 755,
|
||||
@ -65,12 +58,6 @@ class galera (
|
||||
}
|
||||
}
|
||||
'Debian' : {
|
||||
if (!$::selinux == 'false') and !defined(Class['selinux']) {
|
||||
class { 'selinux':
|
||||
mode => 'disabled',
|
||||
before => Package['MySQL-server']
|
||||
}
|
||||
}
|
||||
|
||||
file { '/etc/init.d/mysql':
|
||||
ensure => present,
|
||||
@ -187,7 +174,7 @@ class galera (
|
||||
|
||||
exec { "wait-initial-sync":
|
||||
logoutput => true,
|
||||
command => "/usr/bin/mysql -Nbe \"show status like 'wsrep_local_state_comment'\" | /bin/grep -q Synced && sleep 10",
|
||||
command => "/usr/bin/mysql -Nbe \"show status like 'wsrep_local_state_comment'\" | /bin/grep -q -e Synced -e Initialized && sleep 10",
|
||||
try_sleep => 5,
|
||||
tries => 60,
|
||||
refreshonly => true,
|
||||
@ -225,4 +212,15 @@ class galera (
|
||||
node_addresses => $node_addresses,
|
||||
node_address => $node_address,
|
||||
}
|
||||
|
||||
if $primary_controller {
|
||||
exec { "start-new-galera-cluster":
|
||||
path => "/usr/bin:/usr/sbin:/bin:/sbin",
|
||||
logoutput => true,
|
||||
command => '/etc/init.d/mysql stop; sleep 10; killall -w mysqld && ( killall -w -9 mysqld_safe || : ) && sleep 10; /etc/init.d/mysql start --wsrep-cluster-address=gcomm:// &',
|
||||
onlyif => "[ -f /var/lib/mysql/grastate.dat ] && (cat /var/lib/mysql/grastate.dat | awk '\$1 == \"uuid:\" {print \$2}' | awk '{if (\$0 == \"00000000-0000-0000-0000-000000000000\") exit 0; else exit 1}')",
|
||||
require => Service["mysql-galera"],
|
||||
before => Exec ["wait-for-synced-state"],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -50,14 +50,10 @@ wsrep_provider_options="pc.ignore_sb = no;ist.recv_addr=<%= node_address %>;gmca
|
||||
# Logical cluster name. Should be the same for all nodes.
|
||||
wsrep_cluster_name="<%= cluster_name -%>"
|
||||
|
||||
<% if primary_controller -%>
|
||||
wsrep_cluster_address="gcomm://"
|
||||
<% if setup_multiple_gcomm -%>
|
||||
wsrep_cluster_address="gcomm://<%= @node_addresses.reject{|ip| ip == hostname || ip == node_address || ip == l3_fqdn_hostname }.collect {|ip| ip + ':' + 4567.to_s }.join ',' %>?pc.wait_prim=no"
|
||||
<% else -%>
|
||||
<% if setup_multiple_gcomm -%>
|
||||
wsrep_cluster_address="gcomm://<%= @node_addresses.reject{|ip| ip == hostname || ip == node_address || ip == l3_fqdn_hostname }.collect {|ip| ip + ':' + 4567.to_s }.join ',' %>"
|
||||
<% else -%>
|
||||
wsrep_cluster_address="gcomm://<%= @node_addresses.first %>:4567"
|
||||
<% end -%>
|
||||
wsrep_cluster_address="gcomm://<%= @node_addresses.first %>:4567?pc.wait_prim=no"
|
||||
<% end -%>
|
||||
|
||||
# Human-readable node name (non-unique). Hostname by default.
|
||||
|
@ -26,7 +26,7 @@ class horizon(
|
||||
$cache_server_port = '11211',
|
||||
$swift = false,
|
||||
$quantum = false,
|
||||
$package_ensure = present,
|
||||
$package_ensure = present,
|
||||
$horizon_app_links = false,
|
||||
$keystone_host = '127.0.0.1',
|
||||
$keystone_port = 5000,
|
||||
@ -37,6 +37,7 @@ class horizon(
|
||||
$http_port = 80,
|
||||
$https_port = 443,
|
||||
$use_ssl = false,
|
||||
$log_level = 'DEBUG',
|
||||
) {
|
||||
|
||||
include horizon::params
|
||||
@ -66,6 +67,11 @@ class horizon(
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
file {'/usr/share/openstack-dashboard/':
|
||||
recurse => true,
|
||||
subscribe => Package['dashboard'],
|
||||
}
|
||||
|
||||
case $use_ssl {
|
||||
'exist': { # SSL certificate already exists
|
||||
$generate_sslcert_names = true
|
||||
|
@ -109,10 +109,9 @@ LOGGING = {
|
||||
'class': 'logging.StreamHandler',
|
||||
},
|
||||
'file': {
|
||||
# Set the level to "DEBUG" for verbose output logging.
|
||||
'level': 'DEBUG',
|
||||
'level': '<%= log_level %>',
|
||||
'class': 'logging.FileHandler',
|
||||
'filename': '/var/log/horizon/horizon.log'
|
||||
'filename': '<%= scope.lookupvar("horizon::params::logdir") %>/horizon.log'
|
||||
},
|
||||
},
|
||||
'loggers': {
|
||||
@ -148,12 +147,10 @@ LOGGING = {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LOGIN_URL = '<%= root_url %>/auth/login/'
|
||||
LOGOUT_URL = '<%= root_url %>/auth/logout/'
|
||||
LOGIN_REDIRECT_URL = '<%= root_url %>/syspanel/'
|
||||
LOGIN_URL='<%= scope.lookupvar("horizon::params::root_url") %>/auth/login/'
|
||||
LOGIN_REDIRECT_URL='<%= scope.lookupvar("horizon::params::root_url") %>'
|
||||
|
||||
# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
|
||||
# offline compression by default. To enable online compression, install
|
||||
# the node-less package and enable the following option.
|
||||
COMPRESS_OFFLINE = True
|
||||
COMPRESS_OFFLINE = False
|
||||
|
@ -21,6 +21,10 @@
|
||||
# Defaults to False.
|
||||
# [catalog_type] Type of catalog that keystone uses to store endpoints,services. Optional.
|
||||
# Defaults to sql. (Also accepts template)
|
||||
# [token_format] Format keystone uses for tokens. Optional. Defaults to UUID (PKI is grizzly native mode though).
|
||||
# Supports PKI and UUID.
|
||||
# [cache_dir] Directory created when token_format is PKI. Optional.
|
||||
# Defaults to /var/cache/keystone.
|
||||
# [enalbles] If the keystone services should be enabled. Optioal. Default to true.
|
||||
# [sql_conneciton] Url used to connect to database.
|
||||
# [idle_timeout] Timeout when db connections should be reaped.
|
||||
@ -54,15 +58,20 @@ class keystone(
|
||||
$debug = 'False',
|
||||
$use_syslog = false,
|
||||
$catalog_type = 'sql',
|
||||
$token_format = 'UUID',
|
||||
# $token_format = 'PKI',
|
||||
$cache_dir = '/var/cache/keystone',
|
||||
$enabled = true,
|
||||
$sql_connection = 'sqlite:////var/lib/keystone/keystone.db',
|
||||
$idle_timeout = '200'
|
||||
) {
|
||||
|
||||
validate_re($catalog_type, 'template|sql')
|
||||
validate_re($token_format, 'UUID|PKI')
|
||||
|
||||
Keystone_config<||> ~> Service['keystone']
|
||||
Keystone_config<||> ~> Exec<| title == 'keystone-manage db_sync'|>
|
||||
Package['keystone'] ~> Exec<| title == 'keystone-manage pki_setup'|> ~> Service['keystone']
|
||||
|
||||
# TODO implement syslog features
|
||||
if $use_syslog {
|
||||
@ -72,7 +81,7 @@ class keystone(
|
||||
path => "/etc/keystone/logging.conf",
|
||||
owner => "keystone",
|
||||
group => "keystone",
|
||||
require => [User['keystone'],Group['keystone'],File['/etc/keystone']]
|
||||
require => File['/etc/keystone'],
|
||||
}
|
||||
##TODO add rsyslog module config
|
||||
} else {
|
||||
@ -86,7 +95,6 @@ class keystone(
|
||||
owner => 'keystone',
|
||||
group => 'keystone',
|
||||
mode => '0644',
|
||||
#require => Package['keystone'],
|
||||
notify => Service['keystone'],
|
||||
}
|
||||
|
||||
@ -226,10 +234,26 @@ class keystone(
|
||||
provider => $::keystone::params::service_provider,
|
||||
}
|
||||
|
||||
keystone_config { 'signing/token_format': value => $token_format }
|
||||
if($token_format == 'PKI') {
|
||||
file { $cache_dir:
|
||||
ensure => directory,
|
||||
}
|
||||
|
||||
# keystone-manage pki_setup Should be run as the same system user that will be running the Keystone service to ensure
|
||||
# proper ownership for the private key file and the associated certificates
|
||||
exec { 'keystone-manage pki_setup':
|
||||
path => '/usr/bin',
|
||||
user => 'keystone',
|
||||
refreshonly => true,
|
||||
}
|
||||
}
|
||||
|
||||
if $enabled {
|
||||
# this probably needs to happen more often than just when the db is
|
||||
# created
|
||||
exec { 'keystone-manage db_sync':
|
||||
user => 'keystone',
|
||||
path => '/usr/bin',
|
||||
refreshonly => true,
|
||||
notify => Service['keystone'],
|
||||
|
@ -1,24 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>l23network</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
<project>stdlib</project>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.cloudsmith.geppetto.pp.dsl.ui.modulefileBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
<buildCommand>
|
||||
<name>org.eclipse.xtext.ui.shared.xtextBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.cloudsmith.geppetto.pp.dsl.ui.puppetNature</nature>
|
||||
<nature>org.eclipse.xtext.ui.shared.xtextNature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
@ -1,2 +0,0 @@
|
||||
--color
|
||||
--format progress
|
@ -1,14 +0,0 @@
|
||||
#source :rubygems
|
||||
source 'https://rubygems.org'
|
||||
|
||||
gem 'rake'
|
||||
gem 'puppet-lint'
|
||||
gem 'rspec'
|
||||
gem 'rspec-puppet'
|
||||
|
||||
## Will come in handy later on. But you could just use
|
||||
# gem 'puppet'
|
||||
puppetversion = ENV.key?('PUPPET_VERSION') ? "~> #{ENV['PUPPET_VERSION']}" : ['>= 2.7']
|
||||
gem 'puppet', puppetversion
|
||||
gem 'puppetlabs_spec_helper'
|
||||
|
@ -1,8 +1,10 @@
|
||||
L23network
|
||||
==========
|
||||
Puppet module for configuring network interfaces, 802.1q vlans and bondings on 2 and 3 level.
|
||||
Puppet module for configuring network interfaces on 2nd and 3rd level (802.1q vlans, access ports, NIC-bondind, assign IP addresses, dhcp, and interfaces without IP addresses).
|
||||
Can work together with Open vSwitch or standart linux way.
|
||||
At this moment support Centos 6.3+ (RHEL6) and Ubuntu 12.04 or above.
|
||||
|
||||
Can work together with open vSwitch or standart linux way. At this moment support CentOS 6.3 (RHEL6) and Ubuntu 12.04 or above.
|
||||
L23network module have a same behavior for both operation systems.
|
||||
|
||||
|
||||
Usage
|
||||
@ -22,7 +24,9 @@ If you do not plan to use open vSwitch you can disable it:
|
||||
class {'l23network': use_ovs=>false, stage=> 'netconfig'}
|
||||
|
||||
|
||||
L2 network configuation
|
||||
|
||||
|
||||
L2 network configuation (Open vSwitch only)
|
||||
-----------------------
|
||||
|
||||
Current layout is:
|
||||
@ -52,24 +56,102 @@ If you do not define type for port (or define '') then ovs-vsctl will work by de
|
||||
|
||||
You can use skip_existing option if you do not want to interrupt the configuration during adding of existing port or bridge.
|
||||
|
||||
L3 network configuration
|
||||
-----------------------
|
||||
|
||||
l23network::l3::ifconfig {"some_name0": interface=>'eth0', ipaddr=>'192.168.0.1', netmask=>'255.255.255.0'}
|
||||
l23network::l3::ifconfig {"some_name1": interface=>'br-ex', ipaddr=>'192.168.10.1', netmask=>'255.255.255.0', ifname_order_prefix='ovs'}
|
||||
l23network::l3::ifconfig {"some_name2": interface=>'aaa0', ipaddr=>'192.168.10.1', netmask=>'255.255.255.0', ifname_order_prefix='zzz'}
|
||||
|
||||
Option 'ipaddr' can contain IP address, 'dhcp', or 'none' (for interface with no IP address).
|
||||
L3 network configuration
|
||||
------------------------
|
||||
|
||||
When CentOS or Ubuntu starts they initialize and configure network interfaces in alphabetical order.
|
||||
In example above we change the order of configuration process by ifname_order_prefix keyword. The order will be:
|
||||
### Simple IP address definition, DHCP or address-less interfaces
|
||||
|
||||
ifcfg-eth0
|
||||
l23network::l3::ifconfig {"eth0": ipaddr=>'192.168.1.1/24'}
|
||||
l23network::l3::ifconfig {"xXxXxXx":
|
||||
interface => 'eth1',
|
||||
ipaddr => '192.168.2.1',
|
||||
netmask => '255.255.255.0'
|
||||
}
|
||||
l23network::l3::ifconfig {"eth2": ipaddr=>'dhcp'}
|
||||
l23network::l3::ifconfig {"eth3": ipaddr=>'none'}
|
||||
|
||||
Option *ipaddr* can contains IP address, 'dhcp', or 'none' string. In this example we describe configuration of 4 network interfaces:
|
||||
* Interface *eth0* have short CIDR-notated form of IP address definition.
|
||||
* Interface *eth1*
|
||||
* Interface *eth2* will be configured to use dhcp protocol.
|
||||
* Interface *eth3* will be configured as interface without IP address.
|
||||
Often it's need for create "master" interface for 802.1q vlans (in native linux implementation)
|
||||
or as slave interface for bonding.
|
||||
|
||||
CIDR-notated form of IP address have more priority, that classic *ipaddr* and *netmask* definition.
|
||||
If you ommited *natmask* and not used CIDR-notated form -- will be used
|
||||
default *netmask* value as '255.255.255.0'.
|
||||
|
||||
### Multiple IP addresses for one interface (aliases)
|
||||
|
||||
l23network::l3::ifconfig {"eth0":
|
||||
ipaddr => ['192.168.0.1/24', '192.168.1.1/24', '192.168.2.1/24']
|
||||
}
|
||||
|
||||
You can pass list of CIDR-notated IP addresses to the *ipaddr* parameter for assign many IP addresses to one interface.
|
||||
In this case will be created aliases (not a subinterfaces). Array can contains one or more elements.
|
||||
|
||||
### UP and DOWN interface order
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr=>'192.168.1.1/24'
|
||||
}
|
||||
l23network::l3::ifconfig {"br-ex":
|
||||
ipaddr=>'192.168.10.1/24',
|
||||
ifname_order_prefix='ovs'
|
||||
}
|
||||
l23network::l3::ifconfig {"aaa0":
|
||||
ipaddr=>'192.168.20.1/24',
|
||||
ifname_order_prefix='zzz'
|
||||
}
|
||||
|
||||
Centos and Ubuntu (at startup OS) start and configure network interfaces in alphabetical order
|
||||
interface configuration file names. In example above we change configuration process order
|
||||
by *ifname_order_prefix* keyword. We will have this order:
|
||||
|
||||
ifcfg-eth1
|
||||
ifcfg-ovs-br-ex
|
||||
ifcfg-zzz-aaa0
|
||||
|
||||
And the OS will configure interfaces br-ex and aaa0 after eth0
|
||||
|
||||
### Default gateway
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr => '192.168.2.5/24',
|
||||
gateway => '192.168.2.1',
|
||||
check_by_ping => '8.8.8.8',
|
||||
check_by_ping_timeout => '30'
|
||||
}
|
||||
|
||||
In this example we define default *gateway* and options for waiting that network stay up.
|
||||
Parameter *check_by_ping* define IP address, that will be pinged. Puppet will be blocked for waiting
|
||||
response for *check_by_ping_timeout* seconds.
|
||||
Parameter *check_by_ping* can be IP address, 'gateway', or 'none' string for disabling checking.
|
||||
By default gateway will be pinged.
|
||||
|
||||
### DNS-specific options
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr => '192.168.2.5/24',
|
||||
dns_nameservers => ['8.8.8.8','8.8.4.4'],
|
||||
dns_search => ['aaa.com','bbb.com'],
|
||||
dns_domain => 'qqq.com'
|
||||
}
|
||||
|
||||
Also we can specify DNS nameservers, and search list that will be inserted (by resolvconf lib) to /etc/resolv.conf .
|
||||
Option *dns_domain* implemented only in Ubuntu.
|
||||
|
||||
### DHCP-specific options
|
||||
|
||||
l23network::l3::ifconfig {"eth2":
|
||||
ipaddr => 'dhcp',
|
||||
dhcp_hostname => 'compute312',
|
||||
dhcp_nowait => false,
|
||||
}
|
||||
|
||||
|
||||
|
||||
Bonding
|
||||
-------
|
||||
### Using standart linux ifenslave bonding
|
||||
@ -96,9 +178,13 @@ More information about bonding of network interfaces you can find in manuals for
|
||||
* https://help.ubuntu.com/community/UbuntuBonding
|
||||
* http://wiki.centos.org/TipsAndTricks/BondingInterfaces
|
||||
|
||||
### Using open vSwitch
|
||||
In open vSwitch for bonding of two network interfaces you need to add a special resource "bond" to bridge.
|
||||
In this example we add "eth1" and "eth2" interfaces to bridge "bridge0":
|
||||
### Using Open vSwitch
|
||||
For bonding two interfaces you need:
|
||||
* Specify OVS bridge
|
||||
* Specify special resource "bond" and add it to bridge. Specify bond-specific parameters.
|
||||
* Assign IP address to the newly-created network interface (if need).
|
||||
|
||||
In this example we add "eth1" and "eth2" interfaces to bridge "bridge0" as bond "bond1".
|
||||
|
||||
l23network::l2::bridge{'bridge0': } ->
|
||||
l23network::l2::bond{'bond1':
|
||||
@ -108,6 +194,10 @@ In this example we add "eth1" and "eth2" interfaces to bridge "bridge0":
|
||||
'lacp=active',
|
||||
'other_config:lacp-time=fast'
|
||||
],
|
||||
} ->
|
||||
l23network::l3::ifconfig {'bond1':
|
||||
ipaddr => '192.168.232.1',
|
||||
netmask => '255.255.255.0',
|
||||
}
|
||||
|
||||
Open vSwitch provides a lot of parameter for different configurations.
|
||||
@ -115,16 +205,19 @@ We can specify them in "properties" option as list of parameter=value
|
||||
(or parameter:key=value) strings.
|
||||
You can find more parameters in [open vSwitch documentation page](http://openvswitch.org/support/).
|
||||
|
||||
|
||||
|
||||
802.1q vlan access ports
|
||||
------------------------
|
||||
### Using standart linux way
|
||||
|
||||
We can use tagged vlans over ordinary network interfaces and over bonds.
|
||||
L23networks module supports two types of vlan interface namings:
|
||||
* *vlanXXX* -- 802.1q tag XXX from the vlan interface name. You must specify the
|
||||
parent interface name in the **vlandev** parameter.
|
||||
* *eth0.XXX* -- 802.1q tag XXX and parent interface name from the vlan interface name
|
||||
|
||||
If you are using 802.1q vlans over bonds it is recommended to use the first one.
|
||||
If you are using 802.1q vlans over bonds it is strongly recommended to use the first one.
|
||||
|
||||
In this example we can see both types:
|
||||
|
||||
@ -155,19 +248,30 @@ In this example we can see both types:
|
||||
### Using open vSwitch
|
||||
In the open vSwitch all internal traffic is virtually tagged.
|
||||
To create a 802.1q tagged access port you need to specify a vlan tag when adding a port to the bridge.
|
||||
In example above we create two ports with tags 10 and 20:
|
||||
In example above we create two ports with tags 10 and 20, and assign IP address to interface with tag 10:
|
||||
|
||||
|
||||
l23network::l2::bridge{'bridge0': } ->
|
||||
l23network::l2::port{'vl10':
|
||||
bridge => 'bridge0',
|
||||
type => 'internal',
|
||||
port_properties => ['tag=10'],
|
||||
bridge => 'bridge0',
|
||||
type => 'internal',
|
||||
port_properties => [
|
||||
'tag=10'
|
||||
],
|
||||
} ->
|
||||
l23network::l2::port{'vl20':
|
||||
bridge => 'bridge0',
|
||||
type => 'internal',
|
||||
port_properties => ['tag=20'],
|
||||
}
|
||||
bridge => 'bridge0',
|
||||
type => 'internal',
|
||||
port_properties => [
|
||||
'tag=20'
|
||||
],
|
||||
} ->
|
||||
l23network::l3::ifconfig {'vl10':
|
||||
ipaddr => '192.168.101.1/24',
|
||||
} ->
|
||||
l23network::l3::ifconfig {'vl20':
|
||||
ipaddr => 'none',
|
||||
}
|
||||
|
||||
You can get more details about vlans in open vSwitch at [open vSwitch documentation page](http://openvswitch.org/support/config-cookbooks/vlan-configuration-cookbook/).
|
||||
|
||||
|
@ -85,7 +85,7 @@ define l23network::l3::create_br_iface (
|
||||
skip_existing => $se,
|
||||
require => L23network::L2::Bridge["$bridge"]
|
||||
} ->
|
||||
l23network::l3::ifconfig {$interface: # no quotes here, $interface is an array!!!
|
||||
l23network::l3::ifconfig {$interface: # no quotes here, $interface _may_be_ array!!!
|
||||
ipaddr => 'none',
|
||||
ifname_order_prefix => '0',
|
||||
require => L23network::L2::Bond["$ovs_bond_name"],
|
||||
@ -98,7 +98,7 @@ define l23network::l3::create_br_iface (
|
||||
skip_existing => $se,
|
||||
require => L23network::L2::Bridge["$bridge"]
|
||||
} ->
|
||||
l23network::l3::ifconfig {"$interface": # USE quotes since the only one interface name is provided!!!!!
|
||||
l23network::l3::ifconfig {"$interface": # USE quotes!!!!!
|
||||
ipaddr => 'none',
|
||||
vlandev => $lnx_interface_vlandev,
|
||||
bond_mode => $lnx_interface_bond_mode,
|
||||
|
1
deployment/puppet/nova/Gemfile
Symbolic link
1
deployment/puppet/nova/Gemfile
Symbolic link
@ -0,0 +1 @@
|
||||
.gemfile
|
@ -13,11 +13,16 @@ Puppet::Type.type(:nova_floating).provide(:nova_manage) do
|
||||
end
|
||||
|
||||
def create
|
||||
nova_manage("floating", "create", resource[:network])
|
||||
nova_manage("floating", "create", resource[:network])
|
||||
end
|
||||
|
||||
def destroy
|
||||
nova_manage("floating", "delete", resource[:network])
|
||||
end
|
||||
|
||||
def parse
|
||||
/([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})(\/([0-9]{1,2}))?/ =~ resource[:network]
|
||||
[Regexp.last_match(1), Regexp.last_match(3)]
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -5,8 +5,8 @@ Puppet::Type.newtype(:nova_floating) do
|
||||
ensurable
|
||||
|
||||
newparam(:network, :namevar => true) do
|
||||
desc "Network (ie, 192.168.1.0/24 or 192.168.1.128/25 etc.)"
|
||||
newvalues(/^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2}$/)
|
||||
desc "Network or ip (ie, 192.168.1.0/24, 192.168.1.128/25, 192.168.1.15 etc.)"
|
||||
newvalues(/^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}(\/[0-9]{1,2})?$/)
|
||||
end
|
||||
|
||||
end
|
||||
|
@ -22,6 +22,7 @@ class nova::api(
|
||||
$auth_protocol = 'http',
|
||||
$admin_tenant_name = 'services',
|
||||
$admin_user = 'nova',
|
||||
$cinder = true,
|
||||
$enabled_apis = 'ec2,osapi_compute,metadata',
|
||||
$nova_rate_limits = undef,
|
||||
$nova_user_password= undef, #Empty password generates error and saves from non-working installation
|
||||
@ -80,10 +81,10 @@ class nova::api(
|
||||
service_name => $::nova::params::api_service_name,
|
||||
}
|
||||
|
||||
if $enabled_apis =~ /osapi_volume/ {
|
||||
$volume_api_class = 'nova.volume.api.API'
|
||||
} else {
|
||||
if $cinder {
|
||||
$volume_api_class = 'nova.volume.cinder.API'
|
||||
} else {
|
||||
$volume_api_class = 'nova.volume.api.API'
|
||||
}
|
||||
|
||||
nova_config {
|
||||
|
@ -56,6 +56,13 @@ class nova::compute::libvirt (
|
||||
ensure => present,
|
||||
}
|
||||
|
||||
file_line { 'no_qemu_selinux':
|
||||
path => '/etc/libvirt/qemu.conf',
|
||||
line => 'security_driver="none"',
|
||||
require => Package[$::nova::params::libvirt_package_name],
|
||||
notify => Service['libvirt']
|
||||
}
|
||||
|
||||
service { 'libvirt' :
|
||||
name => $::nova::params::libvirt_service_name,
|
||||
ensure => running,
|
||||
|
17
deployment/puppet/nova/manifests/conductor.pp
Normal file
17
deployment/puppet/nova/manifests/conductor.pp
Normal file
@ -0,0 +1,17 @@
|
||||
#
|
||||
# installs nova conductor package and service
|
||||
#
|
||||
class nova::conductor(
|
||||
$enabled = false,
|
||||
$ensure_package = 'present'
|
||||
) {
|
||||
|
||||
include nova::params
|
||||
|
||||
nova::generic_service { 'conductor':
|
||||
enabled => $enabled,
|
||||
package_name => $::nova::params::conductor_package_name,
|
||||
service_name => $::nova::params::conductor_service_name,
|
||||
ensure_package => $ensure_package,
|
||||
}
|
||||
}
|
@ -1,7 +1,10 @@
|
||||
define nova::manage::floating ( $network ) {
|
||||
define nova::manage::floating (
|
||||
$network = $name
|
||||
) {
|
||||
|
||||
File['/etc/nova/nova.conf'] -> Nova_floating[$name]
|
||||
Exec<| title == 'nova-db-sync' |> -> Nova_floating[$name]
|
||||
File['/etc/nova/nova.conf'] ->
|
||||
Exec<| title == 'nova-db-sync' |> ->
|
||||
Nova_floating[$name]
|
||||
|
||||
nova_floating { $name:
|
||||
ensure => present,
|
||||
|
@ -11,6 +11,7 @@ $libvirt_type_kvm = 'qemu-kvm'
|
||||
$cert_package_name = 'openstack-nova-cert'
|
||||
$common_package_name = 'openstack-nova-common'
|
||||
$compute_package_name = 'openstack-nova-compute'
|
||||
$conductor_package_name = 'openstack-nova-conductor'
|
||||
$consoleauth_package_name = 'openstack-nova-console'
|
||||
$doc_package_name = 'openstack-nova-doc'
|
||||
$libvirt_package_name = 'libvirt'
|
||||
@ -27,6 +28,7 @@ $libvirt_type_kvm = 'qemu-kvm'
|
||||
$api_service_name = 'openstack-nova-api'
|
||||
$cert_service_name = 'openstack-nova-cert'
|
||||
$compute_service_name = 'openstack-nova-compute'
|
||||
$conductor_service_name = 'openstack-nova-conductor'
|
||||
$consoleauth_service_name = 'openstack-nova-consoleauth'
|
||||
$console_service_name = 'openstack-nova-console'
|
||||
$libvirt_service_name = 'libvirtd'
|
||||
@ -51,6 +53,7 @@ $libvirt_type_kvm = 'qemu-kvm'
|
||||
$cert_package_name = 'nova-cert'
|
||||
$common_package_name = 'nova-common'
|
||||
$compute_package_name = 'nova-compute'
|
||||
$conductor_package_name = 'nova-conductor'
|
||||
$doc_package_name = 'nova-doc'
|
||||
$libvirt_package_name = 'libvirt-bin'
|
||||
$network_package_name = 'nova-network'
|
||||
@ -66,6 +69,7 @@ $libvirt_type_kvm = 'qemu-kvm'
|
||||
$api_service_name = 'nova-api'
|
||||
$cert_service_name = 'nova-cert'
|
||||
$compute_service_name = 'nova-compute'
|
||||
$conductor_service_name = 'nova-conductor'
|
||||
$consoleauth_service_name = 'nova-consoleauth'
|
||||
$console_service_name = 'nova-console'
|
||||
$libvirt_service_name = 'libvirt-bin'
|
||||
|
@ -0,0 +1,38 @@
|
||||
require 'spec_helper'
|
||||
|
||||
describe Puppet::Type.type(:nova_floating).provider(:nova_manage) do
|
||||
|
||||
let(:resource) { Puppet::Type.type(:nova_floating).new(:name => '192.168.1.1' ) }
|
||||
let(:provider) { resource.provider }
|
||||
|
||||
describe "#create_by_name" do
|
||||
it "should create floating" do
|
||||
provider.parse().should == ["192.168.1.1", nil]
|
||||
end
|
||||
end
|
||||
|
||||
for net in ['10.0.0.1', '10.0.0.0/16'] do
|
||||
describe "#create #{net}" do
|
||||
it "should create floating for #{net}" do
|
||||
resource[:network]= net
|
||||
provider.expects(:nova_manage).with("floating", "create", net)
|
||||
provider.create()
|
||||
end
|
||||
end
|
||||
describe "#destroy #{net}" do
|
||||
it "should destroy floating for #{net}" do
|
||||
resource[:network]= net
|
||||
provider.expects(:nova_manage).with("floating", "delete", net)
|
||||
provider.destroy()
|
||||
end
|
||||
end
|
||||
describe "#check masklen #{net}" do
|
||||
it "should returns right values for #{net}" do
|
||||
resource[:network]= net
|
||||
/([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})(\/([0-9]{1,2}))?/ =~ net
|
||||
provider.parse().should == [Regexp.last_match(1), Regexp.last_match(3)]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
26
deployment/puppet/nova/spec/unit/type/nova_floating_spec.rb
Normal file
26
deployment/puppet/nova/spec/unit/type/nova_floating_spec.rb
Normal file
@ -0,0 +1,26 @@
|
||||
require 'puppet'
|
||||
require 'puppet/type/nova_floating'
|
||||
describe 'Puppet::Type.type(:nova_floating)' do
|
||||
before :each do
|
||||
@nova_floating = Puppet::Type.type(:nova_floating).new(:name => 'test_IP', :network => '192.168.1.2')
|
||||
end
|
||||
|
||||
it 'should accept valid IP address' do
|
||||
@nova_floating[:network] = '192.168.1.1'
|
||||
@nova_floating[:network] == '192.168.1.1'
|
||||
end
|
||||
it 'should accept valid CIDR subnet' do
|
||||
@nova_floating[:network] = '192.168.1.0/24'
|
||||
@nova_floating[:network] == '192.168.1.0/24'
|
||||
end
|
||||
it 'should not accept masklen more 2 didits' do
|
||||
expect {
|
||||
@nova_floating[:network] = '192.168.1.0/245'
|
||||
}.to raise_error(Puppet::Error, /Invalid value/)
|
||||
end
|
||||
it 'should not accept invalid ensure values' do
|
||||
expect {
|
||||
@nova_floating[:network] = 'qweqweqweqwe'
|
||||
}.to raise_error(Puppet::Error, /Invalid value/)
|
||||
end
|
||||
end
|
@ -472,6 +472,7 @@ Exec { logoutput => true }
|
||||
# Globally apply an environment-based tag to all resources on each node.
|
||||
tag("${::deployment_id}::${::environment}")
|
||||
|
||||
|
||||
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
|
||||
class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-custom-repo',
|
||||
@ -480,6 +481,15 @@ class { 'openstack::mirantis_repos':
|
||||
repo_proxy=>$repo_proxy,
|
||||
}
|
||||
|
||||
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
|
||||
class { 'selinux':
|
||||
mode=>"disabled",
|
||||
stage=>"openstack-custom-repo"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if $::operatingsystem == 'Ubuntu' {
|
||||
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
|
||||
}
|
||||
|
@ -473,6 +473,7 @@ Exec { logoutput => true }
|
||||
# Globally apply an environment-based tag to all resources on each node.
|
||||
tag("${::deployment_id}::${::environment}")
|
||||
|
||||
|
||||
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
|
||||
class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-custom-repo',
|
||||
@ -484,7 +485,15 @@ class { 'openstack::mirantis_repos':
|
||||
class { '::openstack::firewall':
|
||||
stage => 'openstack-firewall'
|
||||
}
|
||||
|
||||
|
||||
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
|
||||
class { 'selinux':
|
||||
mode=>"disabled",
|
||||
stage=>"openstack-custom-repo"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if $::operatingsystem == 'Ubuntu' {
|
||||
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
|
||||
}
|
||||
@ -548,9 +557,9 @@ class compact_controller (
|
||||
quantum_external_ipinfo => $external_ipinfo,
|
||||
tenant_network_type => $tenant_network_type,
|
||||
segment_range => $segment_range,
|
||||
cinder => $is_cinder_node,
|
||||
cinder => $cinder,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
manage_volumes => $manage_volumes,
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
galera_nodes => $controller_hostnames,
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
use_syslog => $use_syslog,
|
||||
@ -604,9 +613,11 @@ node /fuel-controller-[\d+]/ {
|
||||
swift_local_net_ip => $swift_local_net_ip,
|
||||
master_swift_proxy_ip => $master_swift_proxy_ip,
|
||||
sync_rings => ! $primary_proxy,
|
||||
cinder => $is_cinder_node,
|
||||
#disable cinder in storage-node in order to avoid
|
||||
#duplicate classes call with different parameters
|
||||
cinder => false,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
manage_volumes => $manage_volumes,
|
||||
manage_volumes => false,
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
db_host => $internal_virtual_ip,
|
||||
service_endpoint => $internal_virtual_ip,
|
||||
@ -689,7 +700,7 @@ node /fuel-compute-[\d+]/ {
|
||||
tenant_network_type => $tenant_network_type,
|
||||
segment_range => $segment_range,
|
||||
cinder => $cinder,
|
||||
manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
db_host => $internal_virtual_ip,
|
||||
|
@ -494,6 +494,7 @@ Exec { logoutput => true }
|
||||
# Globally apply an environment-based tag to all resources on each node.
|
||||
tag("${::deployment_id}::${::environment}")
|
||||
|
||||
|
||||
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
|
||||
class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-custom-repo',
|
||||
@ -506,6 +507,14 @@ class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-firewall'
|
||||
}
|
||||
|
||||
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
|
||||
class { 'selinux':
|
||||
mode=>"disabled",
|
||||
stage=>"openstack-custom-repo"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if $::operatingsystem == 'Ubuntu' {
|
||||
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
|
||||
}
|
||||
@ -588,9 +597,9 @@ class ha_controller (
|
||||
quantum_external_ipinfo => $external_ipinfo,
|
||||
tenant_network_type => $tenant_network_type,
|
||||
segment_range => $segment_range,
|
||||
cinder => $is_cinder_node,
|
||||
cinder => $cinder,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
manage_volumes => $manage_volumes,
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
galera_nodes => $controller_hostnames,
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
use_syslog => $use_syslog,
|
||||
@ -671,7 +680,7 @@ node /fuel-compute-[\d+]/ {
|
||||
tenant_network_type => $tenant_network_type,
|
||||
segment_range => $segment_range,
|
||||
cinder => $cinder,
|
||||
manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
db_host => $internal_virtual_ip,
|
||||
@ -714,9 +723,9 @@ node /fuel-swift-[\d+]/ {
|
||||
swift_zone => $swift_zone,
|
||||
swift_local_net_ip => $swift_local_net_ip,
|
||||
master_swift_proxy_ip => $master_swift_proxy_ip,
|
||||
cinder => $is_cindernode,
|
||||
cinder => $cinder,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
manage_volumes => $manage_volumes,
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
db_host => $internal_virtual_ip,
|
||||
service_endpoint => $internal_virtual_ip,
|
||||
|
@ -172,7 +172,7 @@ $vlan_start = 300
|
||||
|
||||
# Segmentation type for isolating traffic between tenants
|
||||
# Consult Openstack Quantum docs
|
||||
$tenant_network_type = 'gre'
|
||||
$tenant_network_type = 'vlan'
|
||||
|
||||
# Which IP address will be used for creating GRE tunnels.
|
||||
$quantum_gre_bind_addr = $internal_address
|
||||
@ -192,7 +192,7 @@ $external_ipinfo = {}
|
||||
# Quantum segmentation range.
|
||||
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
|
||||
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
|
||||
$segment_range = '900:999'
|
||||
$segment_range = '300:349'
|
||||
|
||||
# Set up OpenStack network manager. It is used ONLY in nova-network.
|
||||
# Consult Openstack nova-network docs for possible values.
|
||||
@ -441,6 +441,7 @@ Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
|
||||
# Globally apply an environment-based tag to all resources on each node.
|
||||
tag("${::deployment_id}::${::environment}")
|
||||
|
||||
|
||||
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
|
||||
class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-custom-repo',
|
||||
@ -453,6 +454,15 @@ class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-firewall'
|
||||
}
|
||||
|
||||
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
|
||||
class { 'selinux':
|
||||
mode=>"disabled",
|
||||
stage=>"openstack-custom-repo"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if $::operatingsystem == 'Ubuntu' {
|
||||
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
|
||||
}
|
||||
@ -515,9 +525,9 @@ class compact_controller (
|
||||
quantum_external_ipinfo => $external_ipinfo,
|
||||
tenant_network_type => $tenant_network_type,
|
||||
segment_range => $segment_range,
|
||||
cinder => $is_cinder_node,
|
||||
cinder => $cinder,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
manage_volumes => $manage_volumes,
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
galera_nodes => $controller_hostnames,
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
use_syslog => $use_syslog,
|
||||
@ -602,7 +612,7 @@ node /fuel-compute-[\d+]/ {
|
||||
segment_range => $segment_range,
|
||||
cinder => $cinder,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
db_host => $internal_virtual_ip,
|
||||
ssh_private_key => 'puppet:///ssh_keys/openstack',
|
||||
|
@ -140,7 +140,7 @@ $vlan_start = 300
|
||||
|
||||
# Segmentation type for isolating traffic between tenants
|
||||
# Consult Openstack Quantum docs
|
||||
$tenant_network_type = 'gre'
|
||||
$tenant_network_type = 'vlan'
|
||||
|
||||
# Which IP address will be used for creating GRE tunnels.
|
||||
$quantum_gre_bind_addr = $internal_address
|
||||
@ -160,7 +160,7 @@ $external_ipinfo = {}
|
||||
# Quantum segmentation range.
|
||||
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
|
||||
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
|
||||
$segment_range = '900:999'
|
||||
$segment_range = '300:349'
|
||||
|
||||
# Set up OpenStack network manager. It is used ONLY in nova-network.
|
||||
# Consult Openstack nova-network docs for possible values.
|
||||
@ -389,6 +389,7 @@ Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
|
||||
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
|
||||
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage pki_setup' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
|
||||
@ -403,6 +404,7 @@ Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
|
||||
# Globally apply an environment-based tag to all resources on each node.
|
||||
tag("${::deployment_id}::${::environment}")
|
||||
|
||||
|
||||
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
|
||||
class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-custom-repo',
|
||||
@ -416,6 +418,14 @@ class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-firewall'
|
||||
}
|
||||
|
||||
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
|
||||
class { 'selinux':
|
||||
mode=>"disabled",
|
||||
stage=>"openstack-custom-repo"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if $::operatingsystem == 'Ubuntu' {
|
||||
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
|
||||
}
|
||||
@ -479,9 +489,9 @@ class simple_controller (
|
||||
quantum_external_ipinfo => $external_ipinfo,
|
||||
tenant_network_type => $tenant_network_type,
|
||||
segment_range => $segment_range,
|
||||
cinder => $is_cinder_node,
|
||||
cinder => $cinder,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
manage_volumes => $manage_volumes,
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
use_syslog => $use_syslog,
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
@ -597,8 +607,8 @@ node /fuel-compute-[\d+]/ {
|
||||
verbose => $verbose,
|
||||
segment_range => $segment_range,
|
||||
cinder => $cinder,
|
||||
manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
use_syslog => $use_syslog,
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
|
@ -1,92 +1,380 @@
|
||||
#
|
||||
# Example of how to deploy basic single openstack environment.
|
||||
# Parameter values in this file should be changed, taking into consideration your
|
||||
# networking setup and desired OpenStack settings.
|
||||
#
|
||||
# Please consult with the latest Fuel User Guide before making edits.
|
||||
#
|
||||
|
||||
### GENERAL CONFIG ###
|
||||
# This section sets main parameters such as hostnames and IP addresses of different nodes
|
||||
|
||||
# deploy a script that can be used to test nova
|
||||
class { 'openstack::test_file': }
|
||||
|
||||
####### shared variables ##################
|
||||
# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
|
||||
$public_interface = 'eth1'
|
||||
|
||||
# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen.
|
||||
$internal_interface = 'eth0'
|
||||
|
||||
# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface.
|
||||
$private_interface = 'eth2'
|
||||
|
||||
$nodes_harr = [
|
||||
{
|
||||
'name' => 'fuel-cobbler',
|
||||
'role' => 'cobbler',
|
||||
'internal_address' => '10.0.0.102',
|
||||
'public_address' => '10.0.204.102',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-controller-01',
|
||||
'role' => 'controller',
|
||||
'internal_address' => '10.0.0.103',
|
||||
'public_address' => '10.0.204.103',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-controller-01',
|
||||
'role' => 'compute',
|
||||
'internal_address' => '10.0.0.103',
|
||||
'public_address' => '10.0.204.103',
|
||||
},
|
||||
]
|
||||
$nodes = $nodes_harr
|
||||
$default_gateway = '10.0.204.1'
|
||||
|
||||
# Specify nameservers here.
|
||||
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
|
||||
$dns_nameservers = ['10.0.204.1','8.8.8.8']
|
||||
|
||||
# Specify netmasks for internal and external networks.
|
||||
$internal_netmask = '255.255.255.0'
|
||||
$public_netmask = '255.255.255.0'
|
||||
|
||||
|
||||
# this section is used to specify global variables that will
|
||||
# be used in the deployment of multi and single node openstack
|
||||
# environments
|
||||
$node = filter_nodes($nodes,'name',$::hostname)
|
||||
$internal_address = $node[0]['internal_address']
|
||||
$public_address = $node[0]['public_address']
|
||||
|
||||
# assumes that eth0 is the public interface
|
||||
$public_interface = 'eth0'
|
||||
# assumes that eth1 is the interface that will be used for the vm network
|
||||
# this configuration assumes this interface is active but does not have an
|
||||
# ip address allocated to it.
|
||||
$private_interface = 'eth1'
|
||||
# credentials
|
||||
$admin_email = 'root@localhost'
|
||||
$controllers = merge_arrays(filter_nodes($nodes,'role','primary-controller'), filter_nodes($nodes,'role','controller'))
|
||||
$controller_internal_address = $controllers[0]['internal_address']
|
||||
$controller_public_address = $controllers[0]['public_address']
|
||||
|
||||
$ha_provider = 'generic'
|
||||
|
||||
# Set nagios master fqdn
|
||||
$nagios_master = 'nagios-server.localdomain'
|
||||
## proj_name name of environment nagios configuration
|
||||
$proj_name = 'test'
|
||||
|
||||
#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario.
|
||||
$multi_host = false
|
||||
|
||||
# Specify different DB credentials for various services
|
||||
$mysql_root_password = 'nova'
|
||||
$admin_email = 'openstack@openstack.org'
|
||||
$admin_password = 'nova'
|
||||
$keystone_db_password = 'keystone_db_pass'
|
||||
$keystone_admin_token = 'keystone_admin_token'
|
||||
$nova_db_password = 'nova_pass'
|
||||
$nova_user_password = 'nova_pass'
|
||||
$glance_db_password = 'glance_pass'
|
||||
$glance_user_password = 'glance_pass'
|
||||
$horizon_secret_key = 'dummy_secret_key'
|
||||
$mysql_root_password = 'sql_pass'
|
||||
$rabbit_password = 'openstack_rabbit_password'
|
||||
$rabbit_user = 'openstack_rabbit_user'
|
||||
$fixed_range = '10.0.58.0/24'
|
||||
$floating_range = '10.0.75.128/27'
|
||||
$vlan_start = 300
|
||||
# switch this to true to have all service log at verbose
|
||||
$verbose = true
|
||||
# by default it does not enable atomatically adding floating IPs
|
||||
|
||||
$keystone_db_password = 'nova'
|
||||
$keystone_admin_token = 'nova'
|
||||
|
||||
$glance_db_password = 'nova'
|
||||
$glance_user_password = 'nova'
|
||||
|
||||
$nova_db_password = 'nova'
|
||||
$nova_user_password = 'nova'
|
||||
|
||||
$rabbit_password = 'nova'
|
||||
$rabbit_user = 'nova'
|
||||
|
||||
# End DB credentials section
|
||||
|
||||
### GENERAL CONFIG END ###
|
||||
|
||||
### NETWORK/QUANTUM ###
|
||||
# Specify network/quantum specific settings
|
||||
|
||||
# Should we use quantum or nova-network(deprecated).
|
||||
# Consult OpenStack documentation for differences between them.
|
||||
$quantum = false
|
||||
$quantum_netnode_on_cnt = true
|
||||
|
||||
# Specify network creation criteria:
|
||||
# Should puppet automatically create networks?
|
||||
$create_networks = true
|
||||
# Fixed IP addresses are typically used for communication between VM instances.
|
||||
$fixed_range = '10.0.198.128/27'
|
||||
# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
|
||||
$floating_range = '10.0.204.128/28'
|
||||
|
||||
# These parameters are passed to the previously specified network manager , e.g. nova-manage network create.
|
||||
# Not used in Quantum.
|
||||
# Consult openstack docs for corresponding network manager.
|
||||
# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup
|
||||
$num_networks = 1
|
||||
$network_size = 31
|
||||
$vlan_start = 300
|
||||
|
||||
# Quantum
|
||||
|
||||
# Segmentation type for isolating traffic between tenants
|
||||
# Consult Openstack Quantum docs
|
||||
$tenant_network_type = 'gre'
|
||||
|
||||
# Which IP address will be used for creating GRE tunnels.
|
||||
$quantum_gre_bind_addr = $internal_address
|
||||
|
||||
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
|
||||
# the first address will be defined as an external default router,
|
||||
# the second address will be attached to an uplink bridge interface,
|
||||
# the remaining addresses will be utilized for the floating IP address pool.
|
||||
$external_ipinfo = {}
|
||||
## $external_ipinfo = {
|
||||
## 'public_net_router' => '10.0.74.129',
|
||||
## 'ext_bridge' => '10.0.74.130',
|
||||
## 'pool_start' => '10.0.74.131',
|
||||
## 'pool_end' => '10.0.74.142',
|
||||
## }
|
||||
|
||||
# Quantum segmentation range.
|
||||
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
|
||||
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
|
||||
$segment_range = '900:999'
|
||||
|
||||
# Set up OpenStack network manager. It is used ONLY in nova-network.
|
||||
# Consult Openstack nova-network docs for possible values.
|
||||
$network_manager = 'nova.network.manager.FlatDHCPManager'
|
||||
|
||||
# Assign floating IPs to VMs on startup automatically?
|
||||
$auto_assign_floating_ip = false
|
||||
|
||||
# Cinder service
|
||||
$cinder = false
|
||||
$quantum = false
|
||||
$swift = false
|
||||
$use_syslog = false
|
||||
# Database connections
|
||||
$sql_connection = "mysql://nova:${nova_db_password}@${controller_internal_address}/nova"
|
||||
|
||||
$public_int = $public_interface
|
||||
$internal_int = $internal_interface
|
||||
|
||||
#Network configuration
|
||||
stage {'netconfig':
|
||||
before => Stage['main'],
|
||||
}
|
||||
class {'l23network': stage=> 'netconfig'}
|
||||
$quantum_gre_bind_addr = $internal_address
|
||||
|
||||
# Packages repo setup
|
||||
class {'l23network': use_ovs=>$quantum, stage=> 'netconfig'}
|
||||
class node_netconfig (
|
||||
$mgmt_ipaddr,
|
||||
$mgmt_netmask = '255.255.255.0',
|
||||
$public_ipaddr = undef,
|
||||
$public_netmask= '255.255.255.0',
|
||||
$save_default_gateway=false,
|
||||
$quantum = $quantum,
|
||||
) {
|
||||
if $quantum {
|
||||
l23network::l3::create_br_iface {'mgmt':
|
||||
interface => $internal_interface, # !!! NO $internal_int /sv !!!
|
||||
bridge => $internal_br,
|
||||
ipaddr => $mgmt_ipaddr,
|
||||
netmask => $mgmt_netmask,
|
||||
dns_nameservers => $dns_nameservers,
|
||||
save_default_gateway => $save_default_gateway,
|
||||
} ->
|
||||
l23network::l3::create_br_iface {'ex':
|
||||
interface => $public_interface, # !! NO $public_int /sv !!!
|
||||
bridge => $public_br,
|
||||
ipaddr => $public_ipaddr,
|
||||
netmask => $public_netmask,
|
||||
gateway => $default_gateway,
|
||||
}
|
||||
} else {
|
||||
# nova-network mode
|
||||
l23network::l3::ifconfig {$public_int:
|
||||
ipaddr => $public_ipaddr,
|
||||
netmask => $public_netmask,
|
||||
gateway => $default_gateway,
|
||||
}
|
||||
l23network::l3::ifconfig {$internal_int:
|
||||
ipaddr => $mgmt_ipaddr,
|
||||
netmask => $mgmt_netmask,
|
||||
dns_nameservers => $dns_nameservers,
|
||||
}
|
||||
}
|
||||
l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
|
||||
class { cobbler::checksum_bootpc: }
|
||||
}
|
||||
### NETWORK/QUANTUM END ###
|
||||
|
||||
|
||||
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
|
||||
# installation. Each cluster requires a unique integer value.
|
||||
# Valid identifier range is 1 to 254
|
||||
$deployment_id = '69'
|
||||
|
||||
# Below you can enable or disable various services based on the chosen deployment topology:
|
||||
### CINDER/VOLUME ###
|
||||
|
||||
# Should we use cinder or nova-volume(obsolete)
|
||||
# Consult openstack docs for differences between them
|
||||
$cinder = true
|
||||
|
||||
# Choose which nodes to install cinder onto
|
||||
# 'compute' -> compute nodes will run cinder
|
||||
# 'controller' -> controller nodes will run cinder
|
||||
# 'storage' -> storage nodes will run cinder
|
||||
# 'fuel-controller-XX' -> specify particular host(s) by hostname
|
||||
# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address
|
||||
# 'all' -> compute, controller, and storage nodes will run cinder (excluding swift and proxy nodes)
|
||||
|
||||
$cinder_nodes = ['controller']
|
||||
|
||||
#Set it to true if your want cinder-volume been installed to the host
|
||||
#Otherwise it will install api and scheduler services
|
||||
$manage_volumes = true
|
||||
|
||||
# Setup network interface, which Cinder uses to export iSCSI targets.
|
||||
$cinder_iscsi_bind_addr = $internal_address
|
||||
|
||||
# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
|
||||
# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
|
||||
# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
|
||||
# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
|
||||
$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
|
||||
|
||||
#Evaluate cinder node selection
|
||||
if ($cinder) {
|
||||
if (member($cinder_nodes,'all')) {
|
||||
$is_cinder_node = true
|
||||
} elsif (member($cinder_nodes,$::hostname)) {
|
||||
$is_cinder_node = true
|
||||
} elsif (member($cinder_nodes,$internal_address)) {
|
||||
$is_cinder_node = true
|
||||
} elsif ($node[0]['role'] =~ /controller/ ) {
|
||||
$is_cinder_node = member($cinder_nodes,'controller')
|
||||
} else {
|
||||
$is_cinder_node = member($cinder_nodes,$node[0]['role'])
|
||||
}
|
||||
} else {
|
||||
$is_cinder_node = false
|
||||
}
|
||||
|
||||
### CINDER/VOLUME END ###
|
||||
|
||||
### GLANCE and SWIFT ###
|
||||
|
||||
# Which backend to use for glance
|
||||
# Supported backends are "swift" and "file"
|
||||
$glance_backend = 'file'
|
||||
|
||||
# Use loopback device for swift:
|
||||
# set 'loopback' or false
|
||||
# This parameter controls where swift partitions are located:
|
||||
# on physical partitions or inside loopback devices.
|
||||
$swift_loopback = false
|
||||
|
||||
### Glance and swift END ###
|
||||
|
||||
### Syslog ###
|
||||
# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
|
||||
$use_syslog = false
|
||||
if $use_syslog {
|
||||
class { "::rsyslog::client":
|
||||
log_local => true,
|
||||
log_auth_local => true,
|
||||
server => '127.0.0.1',
|
||||
port => '514'
|
||||
}
|
||||
}
|
||||
|
||||
### Syslog END ###
|
||||
|
||||
|
||||
case $::osfamily {
|
||||
"Debian": {
|
||||
$rabbitmq_version_string = '2.8.7-1'
|
||||
}
|
||||
"RedHat": {
|
||||
$rabbitmq_version_string = '2.8.7-2.el6'
|
||||
}
|
||||
}
|
||||
|
||||
# OpenStack packages to be installed
|
||||
$openstack_version = {
|
||||
'keystone' => 'latest',
|
||||
'glance' => 'latest',
|
||||
'horizon' => 'latest',
|
||||
'nova' => 'latest',
|
||||
'novncproxy' => 'latest',
|
||||
'cinder' => 'latest',
|
||||
'rabbitmq_version' => $rabbitmq_version_string,
|
||||
}
|
||||
|
||||
# Which package repo mirror to use. Currently "default".
|
||||
# "custom" is used by Mirantis for testing purposes.
|
||||
# Local puppet-managed repo option planned for future releases.
|
||||
# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp,
|
||||
# though it is NOT recommended.
|
||||
$mirror_type = 'default'
|
||||
$enable_test_repo = false
|
||||
$repo_proxy = undef
|
||||
$use_upstream_mysql = true
|
||||
|
||||
# This parameter specifies the verbosity level of log messages
|
||||
# in openstack components config. Currently, it disables or enables debugging.
|
||||
$verbose = true
|
||||
|
||||
#Rate Limits for cinder and Nova
|
||||
#Cinder and Nova can rate-limit your requests to API services.
|
||||
#These limits can be reduced for your installation or usage scenario.
|
||||
#Change the following variables if you want. They are measured in requests per minute.
|
||||
$nova_rate_limits = {
|
||||
'POST' => 1000,
|
||||
'POST_SERVERS' => 1000,
|
||||
'PUT' => 1000, 'GET' => 1000,
|
||||
'DELETE' => 1000
|
||||
}
|
||||
$cinder_rate_limits = {
|
||||
'POST' => 1000,
|
||||
'POST_SERVERS' => 1000,
|
||||
'PUT' => 1000, 'GET' => 1000,
|
||||
'DELETE' => 1000
|
||||
}
|
||||
|
||||
|
||||
Exec { logoutput => true }
|
||||
#Specify desired NTP servers here.
|
||||
#If you leave it undef pool.ntp.org
|
||||
#will be used
|
||||
|
||||
$ntp_servers = ['pool.ntp.org']
|
||||
|
||||
# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
|
||||
# installation. Each cluster requires a unique integer value.
|
||||
# Valid identifier range is 1 to 254
|
||||
$deployment_id = '59'
|
||||
|
||||
# Globally apply an environment-based tag to all resources on each node.
|
||||
tag("${::deployment_id}::${::environment}")
|
||||
|
||||
class {'openstack::clocksync': ntp_servers=>$ntp_servers}
|
||||
|
||||
#Exec clocksync from openstack::clocksync before services
|
||||
#connectinq to AMQP server are started.
|
||||
|
||||
Exec<| title == 'clocksync' |>->Nova::Generic_service<| |>
|
||||
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-l3' |>
|
||||
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-dhcp-service' |>
|
||||
Exec<| title == 'clocksync' |>->Service<| title == 'quantum-ovs-plugin-service' |>
|
||||
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
|
||||
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
|
||||
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage pki_setup' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
|
||||
Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
|
||||
|
||||
|
||||
|
||||
|
||||
### END OF PUBLIC CONFIGURATION PART ###
|
||||
# Normally, you do not need to change anything after this string
|
||||
|
||||
# Globally apply an environment-based tag to all resources on each node.
|
||||
tag("${::deployment_id}::${::environment}")
|
||||
|
||||
|
||||
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
|
||||
class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-custom-repo',
|
||||
@ -100,27 +388,29 @@ class { 'openstack::mirantis_repos':
|
||||
stage => 'openstack-firewall'
|
||||
}
|
||||
|
||||
# OpenStack packages and customized component versions to be installed.
|
||||
# Use 'latest' to get the most recent ones or specify exact version if you need to install custom version.
|
||||
case $::osfamily {
|
||||
"Debian": {
|
||||
$rabbitmq_version_string = '2.8.7-1'
|
||||
}
|
||||
"RedHat": {
|
||||
$rabbitmq_version_string = '2.8.7-2.el6'
|
||||
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
|
||||
class { 'selinux':
|
||||
mode=>"disabled",
|
||||
stage=>"openstack-custom-repo"
|
||||
}
|
||||
}
|
||||
|
||||
$openstack_version = {
|
||||
'keystone' => 'latest',
|
||||
'glance' => 'latest',
|
||||
'horizon' => 'latest',
|
||||
'nova' => 'latest',
|
||||
'novncproxy' => 'latest',
|
||||
'cinder' => 'latest',
|
||||
'rabbitmq_version' => $rabbitmq_version_string,
|
||||
|
||||
|
||||
if $::operatingsystem == 'Ubuntu' {
|
||||
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
|
||||
}
|
||||
|
||||
sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
|
||||
|
||||
# Dashboard(horizon) https/ssl mode
|
||||
# false: normal mode with no encryption
|
||||
# 'default': uses keys supplied with the ssl module package
|
||||
# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
|
||||
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
|
||||
$horizon_use_ssl = false
|
||||
$horizon_secret_key = 'dummy_secret_key'
|
||||
|
||||
# Every node should be deployed as all-in-one openstack installations.
|
||||
node default {
|
||||
include stdlib
|
||||
@ -128,40 +418,69 @@ node default {
|
||||
stage => 'setup'
|
||||
}
|
||||
|
||||
class {'::node_netconfig':
|
||||
mgmt_ipaddr => $::internal_address,
|
||||
mgmt_netmask => $::internal_netmask,
|
||||
public_ipaddr => $::public_address,
|
||||
public_netmask => $::public_netmask,
|
||||
stage => 'netconfig',
|
||||
}
|
||||
|
||||
class {'nagios':
|
||||
proj_name => $proj_name,
|
||||
services => [
|
||||
'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
|
||||
'nova-consoleauth', 'nova-cert', 'nova-api', 'glance-api',
|
||||
'glance-registry','horizon', 'rabbitmq', 'mysql',
|
||||
],
|
||||
whitelist => ['127.0.0.1', $nagios_master],
|
||||
hostgroup => 'controller',
|
||||
}
|
||||
|
||||
class { 'openstack::all':
|
||||
public_address => $ipaddress_eth0,
|
||||
public_interface => $public_interface,
|
||||
admin_address => $controller_internal_address,
|
||||
service_endpoint => $controller_internal_address,
|
||||
public_address => $controller_public_address,
|
||||
public_interface => $public_int,
|
||||
private_interface => $private_interface,
|
||||
internal_address => $controller_internal_address,
|
||||
floating_range => $floating_range,
|
||||
fixed_range => $fixed_range,
|
||||
network_manager => $network_manager,
|
||||
num_networks => $num_networks,
|
||||
network_size => $network_size,
|
||||
network_config => { 'vlan_start' => $vlan_start },
|
||||
verbose => $verbose,
|
||||
auto_assign_floating_ip => $auto_assign_floating_ip,
|
||||
mysql_root_password => $mysql_root_password,
|
||||
admin_email => $admin_email,
|
||||
admin_password => $admin_password,
|
||||
keystone_db_password => $keystone_db_password,
|
||||
keystone_admin_token => $keystone_admin_token,
|
||||
nova_db_password => $nova_db_password,
|
||||
nova_user_password => $nova_user_password,
|
||||
glance_db_password => $glance_db_password,
|
||||
glance_user_password => $glance_user_password,
|
||||
nova_db_password => $nova_db_password,
|
||||
nova_user_password => $nova_user_password,
|
||||
secret_key => $horizon_secret_key,
|
||||
mysql_root_password => $mysql_root_password,
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_user => $rabbit_user,
|
||||
libvirt_type => 'kvm',
|
||||
floating_range => $floating_range,
|
||||
fixed_range => $fixed_range,
|
||||
verbose => $verbose,
|
||||
auto_assign_floating_ip => $auto_assign_floating_ip,
|
||||
network_config => { 'vlan_start' => $vlan_start },
|
||||
purge_nova_config => false,
|
||||
cinder => $cinder,
|
||||
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
|
||||
manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
use_syslog => $use_syslog,
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
cinder_rate_limits => $cinder_rate_limits,
|
||||
quantum => $quantum,
|
||||
swift => $swift,
|
||||
glance_backend => $glance_backend,
|
||||
}
|
||||
|
||||
class { 'openstack::auth_file':
|
||||
admin_password => $admin_password,
|
||||
keystone_admin_token => $keystone_admin_token,
|
||||
controller_node => '127.0.0.1',
|
||||
controller_node => $controller_internal_address,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
@ -109,7 +109,7 @@ class QuantumXxx(object):
|
||||
return []
|
||||
rv = []
|
||||
for i in self.get_ports_by_owner(port_owner, activeonly=activeonly):
|
||||
rv.append("{0}{1}".format(port_name_prefix, i['id'][:port_id_part_len]))
|
||||
rv.append("{0}{1} {2}".format(port_name_prefix, i['id'][:port_id_part_len], i['fixed_ips'][0]['ip_address']))
|
||||
return rv
|
||||
|
||||
|
||||
@ -129,4 +129,4 @@ if __name__ == '__main__':
|
||||
Qu = QuantumXxx(get_authconfig(options.authconf), retries=options.retries)
|
||||
for i in Qu.get_ifnames_for(args[0].strip(" \"\'"), activeonly=options.activeonly):
|
||||
print(i)
|
||||
###
|
||||
###
|
||||
|
@ -250,7 +250,7 @@ quantum_dhcp_agent_status() {
|
||||
|
||||
clean_up()
|
||||
{
|
||||
filter_quantum_ports.py "network:dhcp" | while read port; do
|
||||
filter_quantum_ports.py "network:dhcp" | while read port ip; do
|
||||
ocf_log info "Cleaning up port ${port}"
|
||||
ovs-vsctl -- --if-exists del-port ${port};
|
||||
rc=$?
|
||||
|
@ -41,6 +41,7 @@ OCF_RESKEY_os_auth_url_default="http://localhost:5000/v2.0"
|
||||
OCF_RESKEY_username_default="quantum"
|
||||
OCF_RESKEY_password_default="quantum_pass"
|
||||
OCF_RESKEY_tenant_default="services"
|
||||
OCF_RESKEY_external_bridge_default="br-ex"
|
||||
|
||||
|
||||
: ${OCF_RESKEY_os_auth_url=${OCF_RESKEY_os_auth_url_default}}
|
||||
@ -52,6 +53,7 @@ OCF_RESKEY_tenant_default="services"
|
||||
: ${OCF_RESKEY_plugin_config=${OCF_RESKEY_plugin_config_default}}
|
||||
: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}}
|
||||
: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
|
||||
: ${OCF_RESKEY_external_bridge=${OCF_RESKEY_external_bridge_default}}
|
||||
|
||||
|
||||
|
||||
@ -170,6 +172,15 @@ Admin tenant name
|
||||
<content type="string" default="${OCF_RESKEY_tenant_default}" />
|
||||
</parameter>
|
||||
|
||||
<parameter name="external_bridge" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
External bridge for l3-agent
|
||||
</longdesc>
|
||||
<shortdesc lang="en">External bridge</shortdesc>
|
||||
<content type="string" />
|
||||
</parameter>
|
||||
|
||||
|
||||
|
||||
<parameter name="additional_parameters" unique="0" required="0">
|
||||
<longdesc lang="en">
|
||||
@ -260,8 +271,9 @@ quantum_l3_agent_status() {
|
||||
|
||||
clean_up()
|
||||
{
|
||||
filter_quantum_ports.py "network:router_gateway" | while read port; do
|
||||
filter_quantum_ports.py "network:router_gateway" | while read port ip; do
|
||||
ocf_log info "Cleaning up port ${port}"
|
||||
( ip address delete ${ip} dev $OCF_RESKEY_external_bridge || : )
|
||||
ovs-vsctl -- --if-exists del-port ${port};
|
||||
rc=$?
|
||||
if [ $rc -ne 0 ]; then
|
||||
@ -269,7 +281,7 @@ clean_up()
|
||||
exit $OCF_ERR_GENERIC
|
||||
fi
|
||||
done
|
||||
filter_quantum_ports.py "network:router_interface" | while read port; do
|
||||
filter_quantum_ports.py "network:router_interface" | while read port ip; do
|
||||
ocf_log info "Cleaning up port ${port}"
|
||||
ovs-vsctl -- --if-exists del-port ${port};
|
||||
rc=$?
|
||||
|
@ -0,0 +1,11 @@
|
||||
require 'shellwords'
|
||||
module Puppet::Parser::Functions
|
||||
newfunction(:shellescape, :type => :rvalue, :doc => <<-EOS
|
||||
Escapes shell charactes.
|
||||
EOS
|
||||
) do |arguments|
|
||||
raise(Puppet::ParseError, "shellescape(): Wrong number of arguments " +
|
||||
"given (#{arguments.size} for 1)") if arguments.size != 1
|
||||
return Shellwords.escape(arguments[0])
|
||||
end
|
||||
end
|
@ -75,6 +75,7 @@ class openstack::all (
|
||||
$nova_user_password,
|
||||
$secret_key,
|
||||
$internal_address = '127.0.0.1',
|
||||
$admin_address = '127.0.0.1',
|
||||
# cinder and quantum password are not required b/c they are
|
||||
# optional. Not sure what to do about this.
|
||||
$cinder_user_password = 'cinder_pass',
|
||||
@ -103,11 +104,13 @@ class openstack::all (
|
||||
$floating_range = false,
|
||||
$create_networks = true,
|
||||
$num_networks = 1,
|
||||
$network_size = 255,
|
||||
$auto_assign_floating_ip = false,
|
||||
$network_config = {},
|
||||
$quantum = false,
|
||||
# Rabbit
|
||||
$rabbit_user = 'nova',
|
||||
$rabbit_nodes = ['127.0.0.1'],
|
||||
# Horizon
|
||||
$horizon = true,
|
||||
$cache_server_ip = '127.0.0.1',
|
||||
@ -118,8 +121,11 @@ class openstack::all (
|
||||
$cinder = false,
|
||||
$cinder_db_user = 'cinder',
|
||||
$cinder_db_dbname = 'cinder',
|
||||
$volume_group = 'cinder-volumes',
|
||||
$cinder_test = false,
|
||||
$cinder_iscsi_bind_addr = false,
|
||||
$cinder_volume_group = 'cinder-volumes',
|
||||
$nv_physical_volume = undef,
|
||||
$manage_volumes = false,
|
||||
$cinder_rate_limits = undef,
|
||||
#
|
||||
$quantum_db_user = 'quantum',
|
||||
$quantum_db_dbname = 'quantum',
|
||||
@ -129,14 +135,21 @@ class openstack::all (
|
||||
$vnc_enabled = true,
|
||||
# General
|
||||
$enabled = true,
|
||||
$verbose = 'False'
|
||||
$verbose = 'False',
|
||||
$service_endpoint = '127.0.0.1',
|
||||
$glance_backend = 'file',
|
||||
$use_syslog = false,
|
||||
$nova_rate_limits = undef,
|
||||
) {
|
||||
|
||||
# Ensure things are run in order
|
||||
Class['openstack::db::mysql'] -> Class['openstack::keystone']
|
||||
Class['openstack::db::mysql'] -> Class['openstack::glance']
|
||||
|
||||
# set up mysql server
|
||||
if defined(Class['openstack::cinder']) {
|
||||
Class['openstack::db::mysql'] -> Class['openstack::cinder']
|
||||
}
|
||||
# set up mysql server
|
||||
if ($db_type == 'mysql') {
|
||||
if ($enabled) {
|
||||
Class['glance::db::mysql'] -> Class['glance::registry']
|
||||
@ -184,8 +197,8 @@ class openstack::all (
|
||||
admin_email => $admin_email,
|
||||
admin_password => $admin_password,
|
||||
public_address => $public_address,
|
||||
internal_address => '127.0.0.1',
|
||||
admin_address => '127.0.0.1',
|
||||
internal_address => $internal_address,
|
||||
admin_address => $admin_address,
|
||||
#region => $region,
|
||||
glance_user_password => $glance_user_password,
|
||||
nova_user_password => $nova_user_password,
|
||||
@ -193,6 +206,7 @@ class openstack::all (
|
||||
cinder_user_password => $cinder_user_password,
|
||||
quantum => $quantum,
|
||||
quantum_user_password => $quantum_user_password,
|
||||
use_syslog => $use_syslog,
|
||||
}
|
||||
|
||||
######## GLANCE ##########
|
||||
@ -205,7 +219,12 @@ class openstack::all (
|
||||
glance_db_dbname => $glance_db_dbname,
|
||||
glance_db_password => $glance_db_password,
|
||||
glance_user_password => $glance_user_password,
|
||||
auth_uri => "http://${service_endpoint}:5000/",
|
||||
keystone_host => $service_endpoint,
|
||||
enabled => $enabled,
|
||||
glance_backend => $glance_backend,
|
||||
registry_host => $service_endpoint,
|
||||
use_syslog => $use_syslog,
|
||||
}
|
||||
|
||||
######## NOVA ###########
|
||||
@ -227,27 +246,26 @@ class openstack::all (
|
||||
}
|
||||
}
|
||||
|
||||
######### Cinder Controller Services ########
|
||||
$enabled_apis_ = 'ec2,osapi_compute,metadata'
|
||||
|
||||
if ($cinder) {
|
||||
class { "cinder::base":
|
||||
verbose => $verbose,
|
||||
sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@127.0.0.1/${cinder_db_dbname}?charset=utf8",
|
||||
rabbit_password => $rabbit_password,
|
||||
}
|
||||
$enabled_apis = 'ec2,osapi_compute'
|
||||
}
|
||||
else {
|
||||
$enabled_apis = 'ec2,osapi_compute,osapi_volume'
|
||||
}
|
||||
|
||||
class { 'cinder::api':
|
||||
keystone_password => $cinder_user_password,
|
||||
######### Cinder Controller Services ########
|
||||
if !defined(Class['openstack::cinder']) {
|
||||
class {'openstack::cinder':
|
||||
sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@127.0.0.1/${cinder_db_dbname}?charset=utf8",
|
||||
rabbit_password => $rabbit_password,
|
||||
cinder_user_password => $cinder_user_password,
|
||||
volume_group => $cinder_volume_group,
|
||||
physical_volume => $nv_physical_volume,
|
||||
manage_volumes => true,
|
||||
enabled => true,
|
||||
iscsi_bind_host => $cinder_iscsi_bind_addr,
|
||||
cinder_rate_limits => $cinder_rate_limits,
|
||||
}
|
||||
|
||||
class { 'cinder::scheduler': }
|
||||
class { 'cinder::volume': }
|
||||
class { 'cinder::volume::iscsi':
|
||||
iscsi_ip_address => '127.0.0.1',
|
||||
}
|
||||
|
||||
$enabled_apis = $enabled_apis_
|
||||
} else {
|
||||
# Set up nova-volume
|
||||
class { 'lvm':
|
||||
@ -263,8 +281,6 @@ class openstack::all (
|
||||
}
|
||||
|
||||
class { 'nova::volume::iscsi': }
|
||||
|
||||
$enabled_apis = "${enabled_apis_},osapi_volume"
|
||||
}
|
||||
|
||||
# Install / configure rabbitmq
|
||||
@ -280,7 +296,7 @@ class openstack::all (
|
||||
rabbit_userid => $rabbit_user,
|
||||
rabbit_password => $rabbit_password,
|
||||
image_service => 'nova.image.glance.GlanceImageService',
|
||||
glance_api_servers => '127.0.0.1:9292',
|
||||
glance_api_servers => "$internal_address:9292",
|
||||
verbose => $verbose,
|
||||
rabbit_host => '127.0.0.1',
|
||||
}
|
||||
@ -289,8 +305,16 @@ class openstack::all (
|
||||
class { 'nova::api':
|
||||
enabled => $enabled,
|
||||
admin_password => $nova_user_password,
|
||||
auth_host => 'localhost',
|
||||
auth_host => $service_endpoint,
|
||||
enabled_apis => $enabled_apis,
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
cinder => $cinder,
|
||||
}
|
||||
|
||||
# Configure nova-conductor
|
||||
class {'nova::conductor':
|
||||
enabled => $enabled,
|
||||
ensure_package => $ensure_package,
|
||||
}
|
||||
|
||||
# Configure nova-quota
|
||||
@ -313,6 +337,7 @@ class openstack::all (
|
||||
config_overrides => $network_config,
|
||||
create_networks => $really_create_networks,
|
||||
num_networks => $num_networks,
|
||||
network_size => $network_size,
|
||||
enabled => $enabled,
|
||||
}
|
||||
} else {
|
||||
@ -355,12 +380,12 @@ class openstack::all (
|
||||
quantum_admin_password => $quantum_user_password,
|
||||
#$use_dhcp = 'True',
|
||||
#$public_interface = undef,
|
||||
quantum_connection_host => 'localhost',
|
||||
quantum_connection_host => $service_endpoint,
|
||||
quantum_auth_strategy => 'keystone',
|
||||
quantum_url => "http://127.0.0.1:9696",
|
||||
quantum_url => "http://$internal_address:9696",
|
||||
quantum_admin_tenant_name => 'services',
|
||||
#quantum_admin_username => 'quantum',
|
||||
quantum_admin_auth_url => "http://127.0.0.1:35357/v2.0",
|
||||
quantum_admin_auth_url => "http://${admin_address}:35357/v2.0",
|
||||
public_interface => $public_interface,
|
||||
}
|
||||
}
|
||||
@ -402,7 +427,7 @@ class openstack::all (
|
||||
######## Horizon ########
|
||||
if ($horizon) {
|
||||
class { 'memcached':
|
||||
listen_ip => '127.0.0.1',
|
||||
listen_ip => '0.0.0.0',
|
||||
}
|
||||
|
||||
class { 'openstack::horizon':
|
||||
@ -412,6 +437,7 @@ class openstack::all (
|
||||
swift => $swift,
|
||||
quantum => $quantum,
|
||||
horizon_app_links => $horizon_app_links,
|
||||
bind_address => $public_address,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10,12 +10,15 @@ class openstack::auth_file(
|
||||
$admin_user = 'admin',
|
||||
$admin_tenant = 'admin'
|
||||
) {
|
||||
$escaped_tenant = shellescape($admin_tenant)
|
||||
$escaped_user = shellescape($admin_user)
|
||||
$escaped_password = shellescape($admin_password)
|
||||
file { '/root/openrc':
|
||||
content =>
|
||||
"
|
||||
export OS_TENANT_NAME=${admin_tenant}
|
||||
export OS_USERNAME=${admin_user}
|
||||
export OS_PASSWORD=${admin_password}
|
||||
export OS_TENANT_NAME=${escaped_tenant}
|
||||
export OS_USERNAME=${escaped_user}
|
||||
export OS_PASSWORD=${escaped_password}
|
||||
export OS_AUTH_URL=\"http://${controller_node}:5000/v2.0/\"
|
||||
export OS_AUTH_STRATEGY=keystone
|
||||
export SERVICE_TOKEN=${keystone_admin_token}
|
||||
|
@ -22,6 +22,14 @@ class openstack::cinder(
|
||||
# purge => true,
|
||||
# }
|
||||
#}
|
||||
# There are two assumptions - everyone should use keystone auth
|
||||
# and we had glance_api_servers set globally in every mode except
|
||||
# single when service should authenticate itself against
|
||||
# localhost anyway.
|
||||
|
||||
cinder_config { 'DEFAULT/auth_strategy': value => 'keystone' }
|
||||
cinder_config { 'DEFAULT/glance_api_servers': value => $glance_api_servers }
|
||||
|
||||
if $rabbit_nodes and !$rabbit_ha_virtual_ip {
|
||||
$rabbit_hosts = inline_template("<%= @rabbit_nodes.map {|x| x + ':5672'}.join ',' %>")
|
||||
Cinder_config['DEFAULT/rabbit_ha_queues']->Service<| title == 'cinder-api'|>
|
||||
|
@ -90,13 +90,12 @@ class openstack::compute (
|
||||
$ssh_private_key = undef,
|
||||
$cache_server_ip = ['127.0.0.1'],
|
||||
$cache_server_port = '11211',
|
||||
$nova_volume = 'nova-volumes',
|
||||
$ssh_public_key = undef,
|
||||
# if the cinder management components should be installed
|
||||
$manage_volumes = false,
|
||||
$nv_physical_volume = undef,
|
||||
$cinder_volume_group = 'cinder-volumes',
|
||||
$cinder = false,
|
||||
$cinder = true,
|
||||
$cinder_user_password = 'cinder_user_pass',
|
||||
$cinder_db_password = 'cinder_db_pass',
|
||||
$cinder_db_user = 'cinder',
|
||||
@ -104,9 +103,9 @@ class openstack::compute (
|
||||
$cinder_iscsi_bind_addr = false,
|
||||
$db_host = '127.0.0.1',
|
||||
$use_syslog = false,
|
||||
$nova_rate_limits = undef,
|
||||
$cinder_rate_limits = undef,
|
||||
$create_networks = false
|
||||
$nova_rate_limits = undef,
|
||||
$cinder_rate_limits = undef,
|
||||
$create_networks = false
|
||||
) {
|
||||
|
||||
#
|
||||
@ -159,45 +158,43 @@ class openstack::compute (
|
||||
}
|
||||
|
||||
class { 'nova':
|
||||
ensure_package => $::openstack_version['nova'],
|
||||
sql_connection => $sql_connection,
|
||||
rabbit_nodes => $rabbit_nodes,
|
||||
rabbit_userid => $rabbit_user,
|
||||
rabbit_password => $rabbit_password,
|
||||
image_service => 'nova.image.glance.GlanceImageService',
|
||||
glance_api_servers => $glance_api_servers,
|
||||
verbose => $verbose,
|
||||
rabbit_host => $rabbit_host,
|
||||
use_syslog => $use_syslog,
|
||||
api_bind_address => $internal_address,
|
||||
rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
|
||||
ensure_package => $::openstack_version['nova'],
|
||||
sql_connection => $sql_connection,
|
||||
rabbit_nodes => $rabbit_nodes,
|
||||
rabbit_userid => $rabbit_user,
|
||||
rabbit_password => $rabbit_password,
|
||||
image_service => 'nova.image.glance.GlanceImageService',
|
||||
glance_api_servers => $glance_api_servers,
|
||||
verbose => $verbose,
|
||||
rabbit_host => $rabbit_host,
|
||||
use_syslog => $use_syslog,
|
||||
api_bind_address => $internal_address,
|
||||
rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
|
||||
}
|
||||
|
||||
#Cinder setup
|
||||
if ($cinder) {
|
||||
$enabled_apis = 'metadata'
|
||||
package {'python-cinderclient': ensure => present}
|
||||
if $cinder and $manage_volumes {
|
||||
class {'openstack::cinder':
|
||||
sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_host => false,
|
||||
rabbit_nodes => $rabbit_nodes,
|
||||
volume_group => $cinder_volume_group,
|
||||
physical_volume => $nv_physical_volume,
|
||||
manage_volumes => $manage_volumes,
|
||||
enabled => true,
|
||||
auth_host => $service_endpoint,
|
||||
bind_host => false,
|
||||
iscsi_bind_host => $cinder_iscsi_bind_addr,
|
||||
cinder_user_password => $cinder_user_password,
|
||||
use_syslog => $use_syslog,
|
||||
cinder_rate_limits => $cinder_rate_limits,
|
||||
rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
|
||||
sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_host => false,
|
||||
rabbit_nodes => $rabbit_nodes,
|
||||
volume_group => $cinder_volume_group,
|
||||
physical_volume => $nv_physical_volume,
|
||||
manage_volumes => $manage_volumes,
|
||||
enabled => true,
|
||||
auth_host => $service_endpoint,
|
||||
bind_host => false,
|
||||
iscsi_bind_host => $cinder_iscsi_bind_addr,
|
||||
cinder_user_password => $cinder_user_password,
|
||||
use_syslog => $use_syslog,
|
||||
cinder_rate_limits => $cinder_rate_limits,
|
||||
rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
$enabled_apis = 'metadata,osapi_volume'
|
||||
}
|
||||
|
||||
|
||||
# Install / configure nova-compute
|
||||
@ -270,6 +267,7 @@ class openstack::compute (
|
||||
admin_user => 'nova',
|
||||
admin_password => $nova_user_password,
|
||||
enabled_apis => $enabled_apis,
|
||||
cinder => $cinder,
|
||||
auth_host => $service_endpoint,
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ class openstack::controller (
|
||||
$cache_server_ip = ['127.0.0.1'],
|
||||
$cache_server_port = '11211',
|
||||
$swift = false,
|
||||
$cinder = false,
|
||||
$cinder = true,
|
||||
$horizon_app_links = undef,
|
||||
# General
|
||||
$verbose = 'False',
|
||||
@ -175,7 +175,7 @@ class openstack::controller (
|
||||
Class['openstack::db::mysql'] -> Class['openstack::glance']
|
||||
Class['openstack::db::mysql'] -> Class['openstack::nova::controller']
|
||||
if defined(Class['openstack::cinder']) {
|
||||
Class['openstack::db::mysql'] -> Class['openstack::cinder']
|
||||
Class['openstack::db::mysql'] -> Class['openstack::cinder']
|
||||
}
|
||||
|
||||
$rabbit_addresses = inline_template("<%= @rabbit_nodes.map {|x| x + ':5672'}.join ',' %>")
|
||||
@ -281,9 +281,10 @@ class openstack::controller (
|
||||
}
|
||||
if ($cinder) {
|
||||
$enabled_apis = 'ec2,osapi_compute'
|
||||
} else {
|
||||
$enabled_apis = 'ec2,osapi_compute,osapi_volume'
|
||||
}
|
||||
else {
|
||||
$enabled_apis = 'ec2,osapi_compute,osapi_volume'
|
||||
}
|
||||
|
||||
class { 'openstack::nova::controller':
|
||||
# Database
|
||||
@ -338,11 +339,13 @@ class openstack::controller (
|
||||
ensure_package => $::openstack_version['nova'],
|
||||
use_syslog => $use_syslog,
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
cinder => $cinder
|
||||
}
|
||||
|
||||
######### Cinder Controller Services ########
|
||||
if ($cinder) {
|
||||
class {'openstack::cinder':
|
||||
if $cinder {
|
||||
if !defined(Class['openstack::cinder']) {
|
||||
class {'openstack::cinder':
|
||||
sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_host => false,
|
||||
@ -359,21 +362,22 @@ class openstack::controller (
|
||||
cinder_rate_limits => $cinder_rate_limits,
|
||||
rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
|
||||
}
|
||||
} else {
|
||||
if $manage_volumes {
|
||||
|
||||
class { 'nova::volume':
|
||||
ensure_package => $::openstack_version['nova'],
|
||||
enabled => true,
|
||||
}
|
||||
|
||||
class { 'nova::volume::iscsi':
|
||||
iscsi_ip_address => $api_bind_address,
|
||||
physical_volume => $nv_physical_volume,
|
||||
}
|
||||
}
|
||||
# Set up nova-volume
|
||||
}
|
||||
else {
|
||||
if $manage_volumes {
|
||||
|
||||
class { 'nova::volume':
|
||||
ensure_package => $::openstack_version['nova'],
|
||||
enabled => true,
|
||||
}
|
||||
class { 'nova::volume::iscsi':
|
||||
iscsi_ip_address => $api_bind_address,
|
||||
physical_volume => $nv_physical_volume,
|
||||
}
|
||||
}
|
||||
# Set up nova-volume
|
||||
}
|
||||
|
||||
if !defined(Class['memcached']){
|
||||
class { 'memcached':
|
||||
|
@ -22,6 +22,7 @@ class openstack::firewall (
|
||||
$nova_vncproxy_port = 6080,
|
||||
$erlang_epmd_port = 4369,
|
||||
$erlang_rabbitmq_port = 5672,
|
||||
$erlang_rabbitmq_backend_port = 5673,
|
||||
$erlang_inet_dist_port = 41055,
|
||||
$memcached_port = 11211,
|
||||
$rsync_port = 873,
|
||||
@ -116,7 +117,7 @@ class openstack::firewall (
|
||||
}
|
||||
|
||||
firewall {'106 rabbitmq ':
|
||||
port => [$erlang_epmd_port, $erlang_rabbitmq_port, $erlang_inet_dist_port],
|
||||
port => [$erlang_epmd_port, $erlang_rabbitmq_port, $erlang_rabbitmq_backend_port, $erlang_inet_dist_port],
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
}
|
||||
@ -199,6 +200,19 @@ class openstack::firewall (
|
||||
action => 'accept',
|
||||
}
|
||||
|
||||
firewall {'118 vnc ports':
|
||||
port => "5900-6100",
|
||||
proto => 'tcp',
|
||||
action => 'accept',
|
||||
}
|
||||
|
||||
firewall { '333 accept gre':
|
||||
chain => 'PREROUTING',
|
||||
table => 'raw',
|
||||
proto => 'gre',
|
||||
action => 'notrack',
|
||||
}
|
||||
|
||||
firewall { '999 drop all other requests':
|
||||
action => 'drop',
|
||||
}
|
||||
|
@ -9,15 +9,18 @@ class openstack::mirantis_repos (
|
||||
$deb_updates = 'http://172.18.67.168/ubuntu-repo/mirror.yandex.ru/ubuntu',
|
||||
$deb_security = 'http://172.18.67.168/ubuntu-repo/mirror.yandex.ru/ubuntu',
|
||||
$deb_fuel_folsom_repo = 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom',
|
||||
$deb_fuel_grizzly_repo = 'http://osci-gbp.srt.mirantis.net/ubuntu/fuel/',
|
||||
$deb_cloud_archive_repo = 'http://172.18.67.168/ubuntu-cloud.archive.canonical.com/ubuntu',
|
||||
$deb_rabbit_repo = 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom',
|
||||
$enable_epel = false,
|
||||
$enable_epel = false,
|
||||
$fuel_mirrorlist = 'http://download.mirantis.com/epel-fuel-folsom-2.1/mirror.internal-stage.list',
|
||||
$mirrorlist_base = 'http://172.18.67.168/centos-repo/mirror-6.3-os.list',
|
||||
$mirrorlist_updates = 'http://172.18.67.168/centos-repo/mirror-6.3-updates.list',
|
||||
$grizzly_baseurl = 'http://download.mirantis.com/epel-fuel-grizzly/',
|
||||
$enable_test_repo = false,
|
||||
$repo_proxy = undef,
|
||||
$use_upstream_mysql = false,) {
|
||||
$use_upstream_mysql = false,
|
||||
) {
|
||||
case $::osfamily {
|
||||
'Debian' : {
|
||||
class { 'apt::proxy':
|
||||
@ -25,11 +28,11 @@ class openstack::mirantis_repos (
|
||||
stage => $::openstack::mirantis_repos::stage
|
||||
}
|
||||
|
||||
apt::pin { 'mirantis-releases':
|
||||
order => 20,
|
||||
priority => 1001,
|
||||
originator => $originator
|
||||
}
|
||||
# apt::pin { 'mirantis-releases':
|
||||
# order => 20,
|
||||
# priority => 1001,
|
||||
# originator => $originator
|
||||
# }
|
||||
|
||||
if $use_upstream_mysql {
|
||||
apt::pin { 'upstream-mysql':
|
||||
@ -75,29 +78,20 @@ class openstack::mirantis_repos (
|
||||
|
||||
# Below we set our internal repos for testing purposes. Some of them may match with external ones.
|
||||
if $type == 'custom' {
|
||||
if $enable_test_repo {
|
||||
apt::source { 'precise-fuel-folsom':
|
||||
location => $deb_fuel_folsom_repo,
|
||||
release => 'precise-2.1.0.1',
|
||||
repos => 'main',
|
||||
key => 'F8AF89DD',
|
||||
key_source => 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom/Mirantis.key',
|
||||
include_src => false,
|
||||
}
|
||||
} else {
|
||||
apt::source { 'precise-fuel-folsom':
|
||||
location => $deb_fuel_folsom_repo,
|
||||
release => 'precise-2.1.0.1',
|
||||
repos => 'main',
|
||||
key => 'F8AF89DD',
|
||||
key_source => 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom/Mirantis.key',
|
||||
include_src => false,
|
||||
|
||||
apt::pin { 'precise-fuel-grizzly':
|
||||
order => 19,
|
||||
priority => 1001,
|
||||
}
|
||||
|
||||
apt::pin { 'cloud-archive':
|
||||
order => 20,
|
||||
priority => 1002,
|
||||
}
|
||||
|
||||
apt::source { 'cloud-archive':
|
||||
location => $deb_cloud_archive_repo,
|
||||
release => 'precise-updates/folsom',
|
||||
release => 'precise-updates/grizzly',
|
||||
repos => 'main',
|
||||
key => '5EDB1B62EC4926EA',
|
||||
key_source => 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom/cloud-archive.key',
|
||||
@ -105,6 +99,15 @@ class openstack::mirantis_repos (
|
||||
include_src => false,
|
||||
}
|
||||
|
||||
apt::source { 'precise-fuel-grizzly':
|
||||
location => $deb_fuel_grizzly_repo,
|
||||
release => 'precise-3.0',
|
||||
repos => 'main',
|
||||
key => 'F8AF89DD',
|
||||
key_source => 'http://osci-gbp.srt.mirantis.net/ubuntu/key.gpg',
|
||||
include_src => false,
|
||||
}
|
||||
|
||||
apt::source { 'rabbit-3.0':
|
||||
location => $deb_rabbit_repo,
|
||||
release => 'precise-rabbitmq-3.0',
|
||||
@ -151,62 +154,72 @@ class openstack::mirantis_repos (
|
||||
# ############### End of forced apt-get update block ###############
|
||||
}
|
||||
|
||||
'RedHat' : {
|
||||
'RedHat': {
|
||||
|
||||
Yumrepo {
|
||||
proxy => $repo_proxy, }
|
||||
proxy => $repo_proxy,
|
||||
}
|
||||
|
||||
yumrepo { 'centos-extras':
|
||||
descr => 'Local extras mirror repository',
|
||||
name => 'extras',
|
||||
enabled => 0,
|
||||
baseurl => "http://archive.kernel.org/centos/6.3/os/x86_64/",
|
||||
mirrorlist => absent
|
||||
}
|
||||
|
||||
|
||||
# added internal/external network mirror
|
||||
# added internal (custom)/external (default) network mirror
|
||||
if $type == 'default' {
|
||||
yumrepo { 'openstack-epel-fuel':
|
||||
descr => 'Mirantis OpenStack Custom Packages',
|
||||
mirrorlist => 'http://download.mirantis.com/epel-fuel-folsom-2.1/mirror.external.list',
|
||||
gpgcheck => '1',
|
||||
gpgkey => 'http://download.mirantis.com/epel-fuel-folsom-2.1/epel.key http://download.mirantis.com/epel-fuel-folsom-2.1/centos.key http://download.mirantis.com/epel-fuel-folsom-2.1/rabbit.key http://download.mirantis.com/epel-fuel-folsom-2.1/mirantis.key http://download.mirantis.com/epel-fuel-folsom-2.1/mysql.key http://download.mirantis.com/epel-fuel-folsom-2.1/nginx.key',
|
||||
}
|
||||
|
||||
yumrepo { 'centos-base':
|
||||
descr => 'Mirantis-CentOS',
|
||||
descr => 'Mirantis-CentOS-Base',
|
||||
name => 'base',
|
||||
baseurl => "http://download.mirantis.com/centos-6.4",
|
||||
mirrorlist => absent
|
||||
}
|
||||
yumrepo { 'vault6.3-base':
|
||||
descr => 'Vault 6.3 base mirror repository',
|
||||
name => 'v6.3-base',
|
||||
enabled => 0,
|
||||
baseurl => "http://vault.centos.org/6.3/os/x86_64/",
|
||||
mirrorlist => absent
|
||||
baseurl => 'http://download.mirantis.com/centos-6.4',
|
||||
gpgcheck => '1',
|
||||
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
|
||||
mirrorlist => absent,
|
||||
}
|
||||
|
||||
yumrepo { 'openstack-epel-fuel-grizzly':
|
||||
descr => 'Mirantis OpenStack grizzly Custom Packages',
|
||||
baseurl => 'http://download.mirantis.com/epel-fuel-grizzly',
|
||||
gpgcheck => '1',
|
||||
gpgkey => 'http://download.mirantis.com/epel-fuel-grizzly/mirantis.key',
|
||||
mirrorlist => absent,
|
||||
}
|
||||
# completely disable additional out-of-box repos
|
||||
yumrepo { 'extras':
|
||||
descr => 'CentOS-$releasever - Extras',
|
||||
mirrorlist => 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras',
|
||||
gpgcheck => '1',
|
||||
baseurl => absent,
|
||||
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
|
||||
enabled => '0',
|
||||
}
|
||||
|
||||
yumrepo { 'updates':
|
||||
descr => 'CentOS-$releasever - Updates',
|
||||
mirrorlist => 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates',
|
||||
gpgcheck => '1',
|
||||
baseurl => absent,
|
||||
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
|
||||
enabled => '0',
|
||||
}
|
||||
}
|
||||
|
||||
if $type == 'custom' {
|
||||
yumrepo { 'openstack-epel-fuel':
|
||||
descr => 'Mirantis OpenStack Custom Packages',
|
||||
mirrorlist => $fuel_mirrorlist,
|
||||
gpgcheck => '1',
|
||||
gpgkey => 'http://download.mirantis.com/epel-fuel-folsom-2.1/epel.key http://download.mirantis.com/epel-fuel-folsom-2.1/centos.key http://download.mirantis.com/epel-fuel-folsom-2.1/rabbit.key http://download.mirantis.com/epel-fuel-folsom-2.1/mirantis.key http://download.mirantis.com/epel-fuel-folsom-2.1/mysql.key http://download.mirantis.com/epel-fuel-folsom-2.1/nginx.key',
|
||||
|
||||
yumrepo { 'openstack-epel-fuel-grizzly':
|
||||
descr => 'Mirantis OpenStack grizzly Custom Packages',
|
||||
baseurl => 'http://download.mirantis.com/epel-fuel-grizzly/',
|
||||
gpgcheck => '0',
|
||||
}
|
||||
|
||||
if $upstream_mirror == true {
|
||||
yumrepo { 'centos-base':
|
||||
descr => 'Local base mirror repository',
|
||||
name => 'base',
|
||||
gpgcheck => '1',
|
||||
mirrorlist => $mirrorlist_base,
|
||||
gpgkey => 'http://centos.srt.mirantis.net/RPM-GPG-KEY-CentOS-6',
|
||||
}
|
||||
|
||||
yumrepo { 'centos-updates':
|
||||
descr => 'Local updates mirror repository',
|
||||
name => 'updates',
|
||||
gpgcheck => '1',
|
||||
mirrorlist => $mirrorlist_updates,
|
||||
gpgkey => 'http://centos.srt.mirantis.net/RPM-GPG-KEY-CentOS-6',
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -214,16 +227,16 @@ class openstack::mirantis_repos (
|
||||
if $enable_test_repo {
|
||||
yumrepo { 'openstack-osci-repo':
|
||||
descr => 'Mirantis OpenStack OSCI Packages',
|
||||
baseurl => 'http://osci-koji.srt.mirantis.net/mash/fuel-folsom/x86_64/',
|
||||
baseurl => 'http://osci-koji.srt.mirantis.net/mash/fuel-3.0/x86_64/',
|
||||
gpgcheck => '1',
|
||||
gpgkey => 'http://download.mirantis.com/epel-fuel-folsom/epel.key http://download.mirantis.com/epel-fuel-folsom/centos.key http://download.mirantis.com/epel-fuel-folsom/rabbit.key http://download.mirantis.com/epel-fuel-folsom/mirantis.key http://download.mirantis.com/epel-fuel-folsom/mysql.key http://download.mirantis.com/epel-fuel-folsom/nginx.key',
|
||||
gpgkey => 'http://download.mirantis.com/epel-fuel-grizzly/mirantis.key',
|
||||
}
|
||||
}
|
||||
|
||||
if $enable_epel {
|
||||
Yumrepo {
|
||||
failovermethod => 'priority',
|
||||
gpgkey => 'http://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6',
|
||||
gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
|
||||
gpgcheck => 1,
|
||||
enabled => 1,
|
||||
}
|
||||
@ -231,6 +244,7 @@ class openstack::mirantis_repos (
|
||||
yumrepo { 'epel-testing':
|
||||
descr => 'Extra Packages for Enterprise Linux 6 - Testing - $basearch',
|
||||
mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=testing-epel6&arch=$basearch',
|
||||
enabled => 1,
|
||||
}
|
||||
|
||||
yumrepo { 'epel':
|
||||
|
@ -82,6 +82,7 @@ class openstack::nova::controller (
|
||||
$api_bind_address = '0.0.0.0',
|
||||
$use_syslog = false,
|
||||
$nova_rate_limits = undef,
|
||||
$cinder = true
|
||||
) {
|
||||
|
||||
# Configure the db string
|
||||
@ -257,10 +258,16 @@ class openstack::nova::controller (
|
||||
auth_host => $keystone_host,
|
||||
enabled_apis => $_enabled_apis,
|
||||
ensure_package => $ensure_package,
|
||||
nova_rate_limits => $nova_rate_limits
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
cinder => $cinder
|
||||
}
|
||||
|
||||
if $auto_assign_floating_ip {
|
||||
class {'nova::conductor':
|
||||
enabled => $enabled,
|
||||
ensure_package => $ensure_package,
|
||||
}
|
||||
|
||||
if $auto_assign_floating_ip {
|
||||
nova_config { 'DEFAULT/auto_assign_floating_ip': value => 'True' }
|
||||
}
|
||||
|
||||
|
@ -64,8 +64,8 @@ class openstack::quantum_router (
|
||||
|
||||
if $quantum_network_node {
|
||||
class { 'quantum::agents::ovs':
|
||||
bridge_uplinks => ["br-ex:${public_interface}","br-prv:${private_interface}"],
|
||||
bridge_mappings => ['physnet1:br-ex', 'physnet2:br-prv'],
|
||||
bridge_uplinks => ["br-prv:${private_interface}"],
|
||||
bridge_mappings => ['physnet2:br-prv'],
|
||||
enable_tunneling => $enable_tunneling,
|
||||
local_ip => $internal_address,
|
||||
service_provider => $service_provider
|
||||
|
@ -1,39 +1,42 @@
|
||||
class openstack::swift::storage_node (
|
||||
$swift_zone,
|
||||
$swift_hash_suffix = 'swift_secret',
|
||||
$swift_local_net_ip = $::ipaddress_eth0,
|
||||
$storage_type = 'loopback',
|
||||
$storage_base_dir = '/srv/loopback-device',
|
||||
$storage_mnt_base_dir = '/srv/node',
|
||||
$storage_devices = ['1', '2'],
|
||||
$storage_weight = 1,
|
||||
$package_ensure = 'present',
|
||||
$loopback_size = '1048756',
|
||||
$swift_hash_suffix = 'swift_secret',
|
||||
$swift_local_net_ip = $::ipaddress_eth0,
|
||||
$storage_type = 'loopback',
|
||||
$storage_base_dir = '/srv/loopback-device',
|
||||
$storage_mnt_base_dir = '/srv/node',
|
||||
$storage_devices = [
|
||||
'1',
|
||||
'2'],
|
||||
$storage_weight = 1,
|
||||
$package_ensure = 'present',
|
||||
$loopback_size = '1048756',
|
||||
$master_swift_proxy_ip,
|
||||
$rings = ['account', 'object', 'container'],
|
||||
$sync_rings = true,
|
||||
$loopback_size = '1048756',
|
||||
$rings = [
|
||||
'account',
|
||||
'object',
|
||||
'container'],
|
||||
$sync_rings = true,
|
||||
# if the cinder management components should be installed
|
||||
$cinder = false,
|
||||
$manage_volumes = false,
|
||||
$nv_physical_volume = undef,
|
||||
$cinder_volume_group = 'cinder-volumes',
|
||||
$cinder_user_password = 'cinder_user_pass',
|
||||
$cinder_db_password = 'cinder_db_pass',
|
||||
$cinder_db_user = 'cinder',
|
||||
$cinder_db_dbname = 'cinder',
|
||||
$cinder_iscsi_bind_addr = false,
|
||||
$cinder_rate_limits = false,
|
||||
$db_host = '127.0.0.1',
|
||||
$service_endpoint = '127.0.0.1',
|
||||
$use_syslog = false,
|
||||
$cinder = true,
|
||||
$manage_volumes = false,
|
||||
$nv_physical_volume = undef,
|
||||
$cinder_volume_group = 'cinder-volumes',
|
||||
$cinder_user_password = 'cinder_user_pass',
|
||||
$cinder_db_password = 'cinder_db_pass',
|
||||
$cinder_db_user = 'cinder',
|
||||
$cinder_db_dbname = 'cinder',
|
||||
$cinder_iscsi_bind_addr = false,
|
||||
$cinder_rate_limits = false,
|
||||
$db_host = '127.0.0.1',
|
||||
$service_endpoint = '127.0.0.1',
|
||||
$use_syslog = false,
|
||||
# Rabbit details necessary for cinder
|
||||
$rabbit_nodes = false,
|
||||
$rabbit_password = 'rabbit_pw',
|
||||
$rabbit_host = false,
|
||||
$rabbit_user = 'nova',
|
||||
$rabbit_ha_virtual_ip = false,
|
||||
) {
|
||||
$rabbit_nodes = false,
|
||||
$rabbit_password = 'rabbit_pw',
|
||||
$rabbit_host = false,
|
||||
$rabbit_user = 'nova',
|
||||
$rabbit_ha_virtual_ip = false,) {
|
||||
if !defined(Class['swift']) {
|
||||
class { 'swift':
|
||||
swift_hash_suffix => $swift_hash_suffix,
|
||||
@ -58,46 +61,43 @@ class openstack::swift::storage_node (
|
||||
}
|
||||
|
||||
validate_string($master_swift_proxy_ip)
|
||||
|
||||
|
||||
if $sync_rings {
|
||||
if member($rings, 'account') and ! defined(Swift::Ringsync['account']) {
|
||||
if member($rings, 'account') and !defined(Swift::Ringsync['account']) {
|
||||
swift::ringsync { 'account': ring_server => $master_swift_proxy_ip }
|
||||
}
|
||||
|
||||
if member($rings, 'object') and ! defined(Swift::Ringsync['object']) {
|
||||
|
||||
if member($rings, 'object') and !defined(Swift::Ringsync['object']) {
|
||||
swift::ringsync { 'object': ring_server => $master_swift_proxy_ip }
|
||||
}
|
||||
|
||||
if member($rings, 'container') and ! defined(Swift::Ringsync['container']) {
|
||||
|
||||
if member($rings, 'container') and !defined(Swift::Ringsync['container']) {
|
||||
swift::ringsync { 'container': ring_server => $master_swift_proxy_ip }
|
||||
}
|
||||
Swift::Ringsync <| |> ~> Class["swift::storage::all"]
|
||||
}
|
||||
|
||||
$enabled_apis = 'ec2,osapi_compute'
|
||||
if ($cinder) and !defined(Class['swift']) {
|
||||
package {'python-cinderclient': ensure => present}
|
||||
class {'openstack::cinder':
|
||||
sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_host => false,
|
||||
rabbit_nodes => $rabbit_nodes,
|
||||
volume_group => $cinder_volume_group,
|
||||
physical_volume => $nv_physical_volume,
|
||||
manage_volumes => $manage_volumes,
|
||||
enabled => true,
|
||||
auth_host => $service_endpoint,
|
||||
bind_host => false,
|
||||
iscsi_bind_host => $cinder_iscsi_bind_addr,
|
||||
cinder_user_password => $cinder_user_password,
|
||||
use_syslog => $use_syslog,
|
||||
cinder_rate_limits => $cinder_rate_limits,
|
||||
rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
|
||||
if ($cinder and $manage_volumes) {
|
||||
if !(defined(Class['openstack::cinder'])) {
|
||||
class { 'openstack::cinder':
|
||||
sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_host => false,
|
||||
rabbit_nodes => $rabbit_nodes,
|
||||
volume_group => $cinder_volume_group,
|
||||
physical_volume => $nv_physical_volume,
|
||||
manage_volumes => $manage_volumes,
|
||||
enabled => true,
|
||||
auth_host => $service_endpoint,
|
||||
bind_host => false,
|
||||
iscsi_bind_host => $cinder_iscsi_bind_addr,
|
||||
cinder_user_password => $cinder_user_password,
|
||||
use_syslog => $use_syslog,
|
||||
cinder_rate_limits => $cinder_rate_limits,
|
||||
rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,6 @@ Puppet::Type.type(:quantum_net).provide(
|
||||
optional_opts = []
|
||||
{
|
||||
:router_ext => '--router:external',
|
||||
:shared => '--shared',
|
||||
:network_type => '--provider:network_type',
|
||||
:physnet => '--provider:physical_network',
|
||||
:segment_id => '--provider:segmentation_id'
|
||||
@ -45,6 +44,9 @@ Puppet::Type.type(:quantum_net).provide(
|
||||
optional_opts.push(opt).push(@resource[param])
|
||||
end
|
||||
end
|
||||
if @resource[:shared] == 'True'
|
||||
optional_opts.push("--shared")
|
||||
end
|
||||
|
||||
auth_quantum('net-create',
|
||||
'--tenant_id', tenant_id[@resource[:tenant]],
|
||||
|
@ -134,14 +134,14 @@ class quantum::agents::dhcp (
|
||||
cs_colocation { 'dhcp-with-ovs':
|
||||
ensure => present,
|
||||
cib => 'dhcp',
|
||||
primitives => ["p_${::quantum::params::dhcp_agent_service}", "p_${::quantum::params::ovs_agent_service}"],
|
||||
primitives => ["p_${::quantum::params::dhcp_agent_service}", "clone_p_${::quantum::params::ovs_agent_service}"],
|
||||
score => 'INFINITY',
|
||||
}
|
||||
|
||||
cs_order { 'dhcp-after-ovs':
|
||||
ensure => present,
|
||||
cib => 'dhcp',
|
||||
first => "p_${::quantum::params::ovs_agent_service}",
|
||||
first => "clone_p_${::quantum::params::ovs_agent_service}",
|
||||
second => "p_${::quantum::params::dhcp_agent_service}",
|
||||
score => 'INFINITY',
|
||||
}
|
||||
|
@ -145,6 +145,7 @@ class quantum::agents::l3 (
|
||||
subnet_gw => $external_gateway, # undef,
|
||||
alloc_pool => $external_alloc_pool, # undef,
|
||||
enable_dhcp => 'False', # 'True',
|
||||
shared => 'True',
|
||||
}
|
||||
Quantum_l3_agent_config <| |> -> Quantum::Network::Setup['net04_ext']
|
||||
|
||||
@ -273,18 +274,25 @@ class quantum::agents::l3 (
|
||||
cs_colocation { 'l3-with-ovs':
|
||||
ensure => present,
|
||||
cib => 'l3',
|
||||
primitives => ["p_${::quantum::params::l3_agent_service}", "p_${::quantum::params::ovs_agent_service}"],
|
||||
primitives => ["p_${::quantum::params::l3_agent_service}", "clone_p_${::quantum::params::ovs_agent_service}"],
|
||||
score => 'INFINITY',
|
||||
}
|
||||
|
||||
cs_order { 'l3-after-ovs':
|
||||
ensure => present,
|
||||
cib => 'l3',
|
||||
first => "p_${::quantum::params::ovs_agent_service}",
|
||||
first => "clone_p_${::quantum::params::ovs_agent_service}",
|
||||
second => "p_${::quantum::params::l3_agent_service}",
|
||||
score => 'INFINITY',
|
||||
}
|
||||
|
||||
# start DHCP and L3 agents on different controllers if it's possible
|
||||
cs_colocation { 'dhcp-without-l3':
|
||||
ensure => present,
|
||||
cib => 'l3',
|
||||
primitives => ["p_${::quantum::params::dhcp_agent_service}", "p_${::quantum::params::l3_agent_service}"],
|
||||
score => '-100',
|
||||
}
|
||||
|
||||
# Ensure service is stopped and disabled by upstart/init/etc.
|
||||
Service['quantum-l3-init_stopped'] -> Cs_resource["p_${::quantum::params::l3_agent_service}"]
|
||||
|
||||
|
@ -2,8 +2,7 @@ class quantum::agents::ovs (
|
||||
$package_ensure = 'present',
|
||||
$enabled = true,
|
||||
$bridge_uplinks = ['br-ex:eth2'],
|
||||
$bridge_mappings = [
|
||||
'physnet1:br-ex'],
|
||||
$bridge_mappings = ['physnet1:br-ex'],
|
||||
$integration_bridge = 'br-int',
|
||||
$enable_tunneling = true,
|
||||
$local_ip = undef,
|
||||
@ -46,14 +45,14 @@ class quantum::agents::ovs (
|
||||
skip_existing => true,
|
||||
# require => Service['quantum-plugin-ovs-service'],
|
||||
}
|
||||
|
||||
quantum_plugin_ovs { 'OVS/local_ip': value => $local_ip; }
|
||||
} else {
|
||||
quantum::plugins::ovs::bridge { $bridge_mappings: # require => Service['quantum-plugin-ovs-service'],
|
||||
}
|
||||
|
||||
quantum::plugins::ovs::port { $bridge_uplinks: # require => Service['quantum-plugin-ovs-service'],
|
||||
}
|
||||
quantum::plugins::ovs::bridge { $bridge_mappings: # Do not quote!!! may be array!
|
||||
#require => Service['quantum-plugin-ovs-service'],
|
||||
}
|
||||
quantum::plugins::ovs::port { $bridge_uplinks: # Do not quote!!! may be array!
|
||||
#require => Service['quantum-plugin-ovs-service'],
|
||||
}
|
||||
}
|
||||
|
||||
if $enabled {
|
||||
@ -91,7 +90,13 @@ class quantum::agents::ovs (
|
||||
primitive_class => 'ocf',
|
||||
provided_by => 'pacemaker',
|
||||
primitive_type => 'quantum-agent-ovs',
|
||||
require => File['quantum-ovs-agent'] ,
|
||||
require => File['quantum-ovs-agent'] ,
|
||||
multistate_hash => {
|
||||
'type' => 'clone',
|
||||
},
|
||||
ms_metadata => {
|
||||
'interleave' => 'true',
|
||||
},
|
||||
parameters => {
|
||||
}
|
||||
,
|
||||
@ -108,7 +113,6 @@ class quantum::agents::ovs (
|
||||
'stop' => {
|
||||
'timeout' => '480'
|
||||
}
|
||||
|
||||
}
|
||||
,
|
||||
}
|
||||
@ -122,20 +126,21 @@ class quantum::agents::ovs (
|
||||
}
|
||||
default: { fail("The $::osfamily operating system is not supported.") }
|
||||
}
|
||||
service { 'quantum-plugin-ovs-service_stopped':
|
||||
service { 'quantum-ovs-agent-service_stopped':
|
||||
name => $::quantum::params::ovs_agent_service,
|
||||
enable => false,
|
||||
hasstatus => false,
|
||||
}
|
||||
exec { 'quantum-plugin-ovs-service_stopped':
|
||||
exec { 'quantum-ovs-agent-service_stopped':
|
||||
#todo: rewrite as script, that returns zero or wait, when it can return zero
|
||||
name => "bash -c \"service ${::quantum::params::ovs_agent_service} stop || ( kill `pgrep -f quantum-openvswitch-agent` || : )\"",
|
||||
onlyif => "service ${::quantum::params::ovs_agent_service} status | grep \'${started_status}\'",
|
||||
path => ['/usr/bin', '/usr/sbin', '/bin', '/sbin'],
|
||||
returns => [0,""]
|
||||
}
|
||||
Package[$ovs_agent_package] ->
|
||||
Service['quantum-plugin-ovs-service_stopped'] ->
|
||||
Exec['quantum-plugin-ovs-service_stopped'] ->
|
||||
Service['quantum-ovs-agent-service_stopped'] ->
|
||||
Exec['quantum-ovs-agent-service_stopped'] ->
|
||||
Cs_resource["p_${::quantum::params::ovs_agent_service}"]
|
||||
|
||||
service { 'quantum-plugin-ovs-service':
|
||||
@ -159,4 +164,14 @@ class quantum::agents::ovs (
|
||||
}
|
||||
Class[quantum::waistline] -> Service[quantum-plugin-ovs-service]
|
||||
Package[$ovs_agent_package] -> Service[quantum-plugin-ovs-service]
|
||||
|
||||
service { 'quantum-ovs-agent-cleanup':
|
||||
name => 'quantum-ovs-cleanup',
|
||||
enable => $enabled,
|
||||
ensure => false,
|
||||
hasstatus => false,
|
||||
hasrestart => false,
|
||||
}
|
||||
Service['quantum-plugin-ovs-service'] -> Service['quantum-ovs-agent-cleanup']
|
||||
|
||||
}
|
||||
|
@ -101,8 +101,4 @@ class quantum (
|
||||
}
|
||||
|
||||
# SELINUX=permissive
|
||||
if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
|
||||
class { 'selinux' : }
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ define quantum::network::setup (
|
||||
$alloc_pool = undef,
|
||||
$enable_dhcp = 'True',
|
||||
$nameservers = undef,
|
||||
$shared = 'False',
|
||||
) {
|
||||
|
||||
Quantum_l3_agent_config <||> ->Quantum_net <||>
|
||||
@ -28,6 +29,7 @@ define quantum::network::setup (
|
||||
network_type => $network_type,
|
||||
segment_id => $segment_id,
|
||||
router_ext => $router_external,
|
||||
shared => $shared,
|
||||
}
|
||||
|
||||
# validate allocation pool
|
||||
|
@ -17,6 +17,16 @@ class rsyslog::config {
|
||||
require => Class["rsyslog::install"],
|
||||
notify => Class["rsyslog::service"],
|
||||
}
|
||||
|
||||
file { '/var/lib/rsyslog' :
|
||||
owner => root,
|
||||
group => $::rsyslog::params::run_group,
|
||||
ensure => directory,
|
||||
path => $::rsyslog::params::rsyslog_queues_dir,
|
||||
require => Class["rsyslog::install"],
|
||||
notify => Class["rsyslog::service"],
|
||||
}
|
||||
|
||||
if $osfamily == "Debian"
|
||||
{
|
||||
file { $rsyslog::params::rsyslog_default:
|
||||
|
@ -6,6 +6,7 @@ class rsyslog::params {
|
||||
$package_status = 'latest'
|
||||
$rsyslog_d = '/etc/rsyslog.d/'
|
||||
$rsyslog_conf = '/etc/rsyslog.conf'
|
||||
$rsyslog_queues_dir = '/var/lib/rsyslog'
|
||||
$rsyslog_default = '/etc/default/rsyslog'
|
||||
$run_user = 'root'
|
||||
$run_group = 'root'
|
||||
@ -22,6 +23,7 @@ class rsyslog::params {
|
||||
$package_status = 'present'
|
||||
$rsyslog_d = '/etc/syslog.d/'
|
||||
$rsyslog_conf = '/etc/syslog.conf'
|
||||
$rsyslog_queues_dir = '/var/lib/rsyslog'
|
||||
$rsyslog_default = '/etc/defaults/syslogd'
|
||||
$run_user = 'root'
|
||||
$run_group = 'wheel'
|
||||
|
@ -22,6 +22,25 @@ $DirCreateMode 0755
|
||||
$PrivDropToUser <%= scope.lookupvar('rsyslog::params::run_user') %>
|
||||
$PrivDropToGroup <%= scope.lookupvar('rsyslog::params::run_group') %>
|
||||
|
||||
#
|
||||
# Disk-Assisted Memory Queues, async writes, no escape chars
|
||||
#
|
||||
$OMFileASyncWriting on
|
||||
#$EscapeControlCharactersOnReceive off
|
||||
$MainMsgQueueType LinkedList
|
||||
$WorkDirectory <%= scope.lookupvar('rsyslog::params::spool_dir') %>
|
||||
$MainMsgQueueFileName mainmsgqueue
|
||||
$MainMsgQueueSaveOnShutdown on
|
||||
$MainMsgQueueDequeueSlowdown 1000
|
||||
$MainMsgQueueWorkerThreads 2
|
||||
$MainMsgQueueDequeueBatchSize 128
|
||||
$ActionQueueType LinkedList
|
||||
$WorkDirectory <%= scope.lookupvar('rsyslog::params::spool_dir') %>
|
||||
$ActionQueueFileName acsdbq
|
||||
$ActionQueueDequeueSlowdown 1000
|
||||
$ActionQueueWorkerThreads 2
|
||||
$ActionQueueDequeueBatchSize 128
|
||||
|
||||
#
|
||||
# Include all config files in <%= scope.lookupvar('rsyslog::params::rsyslog_d') %>
|
||||
#
|
||||
|
@ -14,13 +14,14 @@ class Puppet::Provider::SwiftRingBuilder < Puppet::Provider
|
||||
if File.exists?(builder_file_path)
|
||||
if rows = swift_ring_builder(builder_file_path).split("\n")[4..-1]
|
||||
rows.each do |row|
|
||||
if row =~ /^\s+(\d+)\s+(\d+)\s+(\S+)\s+(\d+)\s+(\S+)\s+(\d+\.\d+)\s+(\d+)\s+(-?\d+\.\d+)\s+(\S*)$/
|
||||
object_hash["#{$3}:#{$4}"] = {
|
||||
if row =~ /^\s+(\d+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(\d+)\s+(\S+)\s+(\d+\.\d+)\s+(\d+)\s+(-?\d+\.\d+)\s+(\S*)$/
|
||||
object_hash["#{$4}:#{$5}"] = {
|
||||
:id => $1,
|
||||
:zone => $2,
|
||||
:partitions => $7,
|
||||
:balance => $8,
|
||||
:meta => $9
|
||||
:region => $2,
|
||||
:zone => $3,
|
||||
:partitions => $8,
|
||||
:balance => $9,
|
||||
:meta => $10,
|
||||
}
|
||||
else
|
||||
Puppet.warning("Unexpected line: #{row}")
|
||||
@ -89,7 +90,7 @@ class Puppet::Provider::SwiftRingBuilder < Puppet::Provider
|
||||
def used_devs
|
||||
if devs = swift_ring_builder(builder_file_path).split("\n")[4..-1]
|
||||
@used_devices = devs.collect do |line|
|
||||
line.strip.split(/\s+/)[4] if line.match(/#{resource[:name].split(':')[0]}/)
|
||||
line.strip.split(/\s+/)[5] if line.match(/#{resource[:name].split(':')[0]}/)
|
||||
end.compact.sort
|
||||
else
|
||||
[]
|
||||
|
@ -51,7 +51,7 @@ class swift::proxy::authtoken(
|
||||
auth_host => $auth_host,
|
||||
auth_port => $auth_port,
|
||||
auth_protocol => $auth_protocol,
|
||||
signing_dir => '/tmp/keystone_signing_swift',
|
||||
signing_dir => '/etc/swift',
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
[filter:keystone]
|
||||
paste.filter_factory = keystone.middleware.swift_auth:filter_factory
|
||||
use = egg:swift#keystoneauth
|
||||
operator_roles = <%= operator_roles.to_a.join(', ') %>
|
||||
is_admin = <%= is_admin %>
|
||||
cache = <%= cache %>
|
||||
|
@ -48,10 +48,10 @@ copyright = u'2013, Mirantis'
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '2.2'
|
||||
version = '3.0'
|
||||
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '2.2'
|
||||
release = '3.0'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
@ -185,7 +185,7 @@ latex_elements = {
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'fuel.tex', u'Fuel Documentation',
|
||||
u'Nick Bogdanov', 'manual'),
|
||||
u'Mirantis', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
|
@ -10,8 +10,8 @@ Reference Architecture
|
||||
.. include:: /pages/reference-architecture/0020-logical-setup.rst
|
||||
.. include:: /pages/reference-architecture/0030-cluster-sizing.rst
|
||||
.. include:: /pages/reference-architecture/0040-network-setup.rst
|
||||
.. include:: /pages/reference-architecture/0010-technical-considerations-overview.rst
|
||||
.. include:: /pages/reference-architecture/0050-technical-considerations-overview.rst
|
||||
.. include:: /pages/reference-architecture/0060-quantum-vs-nova-network.rst
|
||||
.. include:: /pages/reference-architecture/0050-cinder-vs-nova-volume.rst
|
||||
.. include:: /pages/reference-architecture/0070-swift-notes.rst
|
||||
.. include:: /pages/reference-architecture/0070-cinder-vs-nova-volume.rst
|
||||
.. include:: /pages/reference-architecture/0080-swift-notes.rst
|
||||
|
||||
|
@ -10,12 +10,9 @@ Create a multi-node OpenStack cluster using Fuel
|
||||
.. include:: /pages/installation-instructions/0015-before-you-start.rst
|
||||
.. include:: /pages/installation-instructions/0020-machines.rst
|
||||
.. include:: /pages/installation-instructions/0040-installing-configuring-puppet-master.rst
|
||||
.. include:: /pages/installation-instructions/0042-installing-the-iso.rst
|
||||
.. include:: /pages/installation-instructions/0045-configuring-the-iso.rst
|
||||
.. include:: /pages/installation-instructions/0050-configuring-cobbler.rst
|
||||
.. include:: /pages/installation-instructions/0055-installing-os-using-cobbler.rst
|
||||
.. include:: /pages/installation-instructions/0057-prepare-for-deployment.rst
|
||||
.. include:: /pages/installation-instructions/0060-deploying-openstack.rst
|
||||
.. include:: /pages/installation-instructions/0062-orchestration.rst
|
||||
.. include:: /pages/installation-instructions/0065-testing-openstack.rst
|
||||
.. include:: /pages/installation-instructions/0060-understand-the-manifest.rst
|
||||
.. include:: /pages/installation-instructions/0070-orchestration.rst
|
||||
.. include:: /pages/installation-instructions/0080-testing-openstack.rst
|
||||
|
||||
|
11
docs/pages/0058-advanced-configuration.rst
Normal file
11
docs/pages/0058-advanced-configuration.rst
Normal file
@ -0,0 +1,11 @@
|
||||
.. _Production:
|
||||
|
||||
Advanced Configuration Topics
|
||||
=============================
|
||||
|
||||
.. contents:: :local:
|
||||
|
||||
.. include:: /pages/advanced-topics/0010-introduction.rst
|
||||
.. include:: /pages/advanced-topics/0020-custom-plug-ins.rst
|
||||
.. include:: /pages/advanced-topics/0030-quantum-HA.rst
|
||||
.. include:: /pages/advanced-topics/0040-bonding.rst
|
@ -11,5 +11,5 @@ Known Issues and Workarounds
|
||||
.. include:: /pages/frequently-asked-questions/0010-rabbitmq.rst
|
||||
.. include:: /pages/frequently-asked-questions/0020-galera.rst
|
||||
.. include:: /pages/frequently-asked-questions/0070-common-technical-issues.rst
|
||||
.. include:: /pages/frequently-asked-questions/0020-other-questions.rst
|
||||
.. include:: /pages/frequently-asked-questions/0080-other-questions.rst
|
||||
|
||||
|
1
docs/pages/advanced-topics/0010-introduction.rst
Normal file
1
docs/pages/advanced-topics/0010-introduction.rst
Normal file
@ -0,0 +1 @@
|
||||
This section explains how to perform tasks that go beyond a simple OpenStack cluster, from configuring OpenStack Networking for high-availability to adding your own custom components to your cluster using Fuel.
|
322
docs/pages/advanced-topics/0020-custom-plug-ins.rst
Normal file
322
docs/pages/advanced-topics/0020-custom-plug-ins.rst
Normal file
@ -0,0 +1,322 @@
|
||||
Adding and configuring custom services
|
||||
--------------------------------------
|
||||
|
||||
Fuel is designed to help you easily install a standard OpenStack cluster, but what if your cluster is not standard? What if you need services or components that are not included with the standard Fuel distribution? This document is designed to give you all of the information you need in order to add custom services and packages to a Fuel-deployed cluster.
|
||||
|
||||
Fuel usage scenarios and how they affect installation
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Two basic Fuel usage scenarios exist.
|
||||
|
||||
In the first scenario, a deployment engineer takes the Fuel ISO image, deploys the master node, makes necessary changes to configuration files, and then deploys OpenStack. In this scenario, each node gets a clean OpenStack installation.
|
||||
|
||||
In the second scenario, the master node and other nodes in the cluster have already been installed, and the deployment engineer has to deploy OpenStack to an existing configuration.
|
||||
|
||||
For the purposes of this discussion, the main difference between these two scenarios is that service in the second scenario may be using an operating system that has already been customized; for the clean install of the first scenario, any customizations have to be performed on-the-fly, as part of the deployment.
|
||||
|
||||
In most cases, best practices dictate that you deploy and test OpenStack first, and then add any custom services. Fuel works using puppet manifests, so the simplest way to install a new service is to edit the current site.pp file on the Puppet master machine and start an additional deployment paths on the target node.
|
||||
|
||||
While that is the ideal means for installing a new service or component, it's not an option in situations in which OpenStack actually requires the new service or component. For example, hardware drivers and management software often must be installed before OpenStack itself. You still, however, have the option to create a separate customized site.pp file and run a deployment pass before installing OpenStack. One advantage to this method is that any version mismatches between the component and OpenStack dependencies should be easy to isolate.
|
||||
|
||||
Finally, if this is not an option, you can inject a custom component installation into the existing fuel manifests. If you elect to go this route, you'll need to be aware of software source compatibility issues, as well as installation stages, component versions, incompatible dependencies, and declared resource names.
|
||||
|
||||
In short, simple custom component installation may be accomplished by editing the site.pp file, but more complex components should be added as new Fuel components.
|
||||
|
||||
Let's look at what you need to know.
|
||||
|
||||
Installing the new service along with Fuel
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When it comes to installing your new service or component alongside Fuel, you have several options. How you go about it depends on where in the process the component needs to be available. Let's look at each step and how it can impact your installation.
|
||||
|
||||
**Boot the master node**
|
||||
|
||||
In most cases, you will be installing the master node from the Fuel ISO. This is a semiautomatic step, and doesn't allow for any custom components. If for some reason you need to install a node at this level, you will need to use the manual Fuel installation procedure.
|
||||
|
||||
**Cobbler configuration**
|
||||
|
||||
If your customizations need to take place before the install of the operating system, or even as part of the operating system install, you can do them at this step. This is also where you would make customizations to other services. At this level, you are making changes to the operating system kickstart/pre-seed files, and may include any custom software source and components required to install the operating system for a node. Anything that needs to be installed before OpenStack should be configured during this step.
|
||||
|
||||
**OpenStack installation**
|
||||
|
||||
It is during this step that you perform any Puppet, Astute, or mCollective configuration. In most cases, this means customizing the Puppet site.pp file to add any custom components during the actual OpenStack installation.
|
||||
|
||||
This step actually includes several different stages. (In fact, Puppet STDLib defines several additional default stages that fuel does not use.) These stages include:
|
||||
|
||||
0. ``Puppetlabs-repo``. mCollective uses this stage to add the Puppetlabs repositories during operating system and Puppet deployment.
|
||||
|
||||
1. ``Openstack-custom-repo``. Additional repositories required by OpenStack are configured at this stage. Additionally, to avoid compatibility issues, the Puppetlabs repositories are switched off at this stage. As a general rule, it is a good idea to turn off any unnecessary software repositories defined for operating system installation.
|
||||
|
||||
2. ``FUEL``. During this stage, Fuel performs any actions defined for the current operating system.
|
||||
|
||||
3. ``Netconfig``. During this stage, Fuel performs all network configuration actions. This means that you should include any custom components that are related to the network in this stage.
|
||||
|
||||
4. ``Main``. The actual OpenStack installation process happens during this stage. Install any non-network-related components during this stage or after it.
|
||||
|
||||
**Post-OpenStack install**
|
||||
|
||||
At this point, OpenStack is installed. You may add any components you like at this point, as long as they don't break OpenStack itself.
|
||||
|
||||
Defining a new component
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In general, we recommend you follow these steps to define a new component:
|
||||
|
||||
#. **Custom stages. Optional.**
|
||||
|
||||
Declare a custom stage or stages to help Puppet understand the required installation sequence.
|
||||
Stages are special markers indicating the sequence of actions. Best practice is to use the input parameter Before for every stage, to help define the correct sequence. The default built-in stage is "main". Every Puppet action is automatically assigned to the main stage if no stage is explicitly specified for the action. However, because Fuel installs almost all of OpenStack during the main stage, custom stages may not help, so future plans include breaking the OpenStack installation to several stages.
|
||||
|
||||
Don't forget to take into account other existing stages; training several parallel sequences of stages increases the chances that Puppet will order them in correctly if you do not explicitly specify the order.
|
||||
|
||||
*Example*::
|
||||
|
||||
stage {'Custom stage 1':
|
||||
before => Stage['Custom stage 2'],
|
||||
}
|
||||
stage {'Custom stage 2':
|
||||
before => Stage['main'],
|
||||
}
|
||||
|
||||
Note that there are several limitations to stages, and they should be used with caution and only with the simplest of classes. You can find more information here: http://docs.puppetlabs.com/puppet/2.7/reference/lang_run_stages.html.
|
||||
|
||||
#. **Custom repositories. Optional.**
|
||||
|
||||
If the custom component requires a custom software source, you may declare a new repository and add it during one of the early stages of the installation.
|
||||
|
||||
#. **Common variable definition**
|
||||
|
||||
It is a good idea to have all common variables defined in a single place. Unlike variables in many other languages, Puppet variables are actually constants, and may be assigned only once inside a given scope.
|
||||
|
||||
#. **OS and condition-dependent variable definition**
|
||||
|
||||
It is also a good idea to assign all common operating system or condition-dependent variables to a single location, preferably near the other common variables. Also, be sure to always use a default section when defining conditional operators.
|
||||
|
||||
*Example*::
|
||||
|
||||
case $::osfamily {
|
||||
# RedHat in most cases should work for CentOS and Fedora as well
|
||||
'RedHat': {
|
||||
# List of packages to get from URL/path.
|
||||
# Separate list should be defined for each separate URL!
|
||||
$custom_package_list_from_url = ['qpid-cpp-server-0.14-16.el6.x86_64.rpm']
|
||||
}
|
||||
'Debian': {
|
||||
# List of packages to get from URL/path.
|
||||
# Separate list should be defined for each separate URL!
|
||||
$custom_package_list_from_url = [ "qpidd_0.14-2_amd64.deb" ]
|
||||
}
|
||||
default: {
|
||||
fail("Module install_custom_package does not support ${::operatingsystem}")
|
||||
}
|
||||
}
|
||||
|
||||
#. **Define installation procedures for independent custom components as classes**
|
||||
|
||||
You can think of public classes as singleton collections, or simply as a named block of code with its own namespace. Each class should be defined only once, but every class may be used with different input variable sets. The best practice is to define a separate class for every component, define required sub-classes for sub-components, and include class-dependent required resources within the actual class/subclass.
|
||||
|
||||
*Example*::
|
||||
|
||||
class add_custom_service (
|
||||
# Input parameter definitions:
|
||||
# Name of the service to place behind HAProxy. Mandatory.
|
||||
# This name appears as a new HAProxy configuration block in /etc/haproxy/haproxy.cfg.
|
||||
$service_name_in_haproxy_config,
|
||||
$custom_package_download_url,
|
||||
$custom_package_list_from_url,
|
||||
#The list of remaining input parameters
|
||||
...
|
||||
) {
|
||||
# HAProxy::params is a container class holding default parameters for the haproxy class. It adds and populates the Global and Default sections in /etc/haproxy/haproxy.cfg.
|
||||
# If you install a custom service over the already deployed HAProxy configuration, it is probably better to comment out the following string:
|
||||
include haproxy::params
|
||||
#Class resources definitions:
|
||||
# Define the list of package names to be installed
|
||||
define install_custom_package_from_url (
|
||||
$custom_package_download_url,
|
||||
$package_provider = undef
|
||||
) {
|
||||
exec { "download-${name}" :
|
||||
command => "/usr/bin/wget -P/tmp ${custom_package_download_url}/${name}",
|
||||
creates => "/tmp/${name}",
|
||||
} ->
|
||||
install_custom_package { "${name}" :
|
||||
provider => $package_provider,
|
||||
source => "/tmp/${name}",
|
||||
}
|
||||
}
|
||||
define install_custom_package (
|
||||
$package_provider = undef,
|
||||
$package_source = undef
|
||||
) {
|
||||
package { "custom-${name}" :
|
||||
ensure => present,
|
||||
provider => $package_provider,
|
||||
source => $package_source
|
||||
}
|
||||
}
|
||||
|
||||
#Here we actually install all the packages from a single URL.
|
||||
if is_array($custom_package_list_from_url) {
|
||||
install_custom_package_from_url { $custom_package_list_from_url :
|
||||
provider => $package_provider,
|
||||
custom_package_download_url => $custom_package_download_url,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#. **Target nodes**
|
||||
|
||||
Every component should be explicitly assigned to a particular target node or nodes.
|
||||
To do that, declare the node or nodes within site.pp. When Puppet runs the manifest for each node, it compares each node definition with the name of the current hostname and applies only to classes assigned to the current node. Node definitions may include regular expressions. For example, you can apply the class 'add custom service' to all controller nodes with hostnames fuel-controller-00 to fuel-controller-xxx, where xxx = any integer value using the following definition:
|
||||
|
||||
*Example*::
|
||||
|
||||
node /fuel-controller-[\d+]/ {
|
||||
include stdlib
|
||||
class { 'add_custom_service':
|
||||
stage => 'Custom stage 1',
|
||||
service_name_in_haproxy_config => $service_name_in_haproxy_config,
|
||||
custom_package_download_url => $custom_package_download_url,
|
||||
custom_package_list_from_url => $custom_package_list_from_url,
|
||||
}
|
||||
}
|
||||
|
||||
Fuel API Reference
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
**add_haproxy_service**
|
||||
Location: Top level
|
||||
|
||||
As the name suggests, this function enables you to create a new HAProxy service. The service is defined in the ``/etc/haproxy/haproxy.cfg`` file, and generally looks something like this::
|
||||
|
||||
listen keystone-2
|
||||
bind 10.0.74.253:35357
|
||||
bind 10.0.0.110:35357
|
||||
balance roundrobin
|
||||
option httplog
|
||||
server fuel-controller-01.example.com 10.0.0.101:35357 check
|
||||
server fuel-controller-02.example.com 10.0.0.102:35357 check
|
||||
|
||||
To accomplish this, you might create a Fuel statement such as::
|
||||
|
||||
add_haproxy_service { 'keystone-2' :
|
||||
order => 30,
|
||||
balancers => {'fuel-controller-01.example.com' => '10.0.0.101',
|
||||
'fuel-controller-02.example.com' => '10.0.0.102'},
|
||||
virtual_ips => {'10.0.74.253', '10.0.0.110'},
|
||||
port => '35357',
|
||||
haproxy_config_options => { 'option' => ['httplog'], 'balance' => 'roundrobin' },
|
||||
balancer_port => '35357',
|
||||
balancermember_options => 'check',
|
||||
mode => 'tcp',
|
||||
define_cookies => false,
|
||||
define_backend => false,
|
||||
collect_exported => false
|
||||
}
|
||||
|
||||
Let's look at how the command works.
|
||||
|
||||
**Usage:** ::
|
||||
|
||||
add_haproxy_service { '<SERVICE_NAME>' :
|
||||
order => $order,
|
||||
balancers => $balancers,
|
||||
virtual_ips => $virtual_ips,
|
||||
port => $port,
|
||||
haproxy_config_options => $haproxy_config_options,
|
||||
balancer_port => $balancer_port,
|
||||
balancermember_options => $balancermember_options,
|
||||
mode => $mode, #Optional. Default is 'tcp'.
|
||||
define_cookies => $define_cookies, #Optional. Default false.
|
||||
define_backend => $define_backend,#Optional. Default false.
|
||||
collect_exported => $collect_exported, #Optional. Default false.
|
||||
}
|
||||
|
||||
**Parameters:**
|
||||
|
||||
``<'Service name'>``
|
||||
|
||||
The name of the new HAProxy listener section. In our example it was ``keystone-2``. If you want to include an IP address or port in the listener name, you have the option to use a name such as::
|
||||
|
||||
'stats 0.0.0.0:9000 #Listen on all IP's on port 9000'
|
||||
|
||||
``order``
|
||||
|
||||
This parameter determines the order of the file fragments. It is optional, but we strongly recommend setting it manually.
|
||||
Fuel already has several different order values from 1 to 100 hardcoded for HAProxy configuration. So if your HAProxy configuration fragments appear in the wrong places in ``/etc/haproxy/haproxy.cfg``, it is probably because of an incorrect order value. It is safe to set order values greater than 100 in order to place your custom configuration block at the end of ``haproxy.cfg``.
|
||||
|
||||
Puppet assembles configuration files from fragments. First it creates several configuration fragments and temporarily stores all of them as separate files. Every fragment has a name such as ``${order}-${fragment_name}``, so the order determines the number of the current fragment in the fragment sequence.
|
||||
After all the fragments are created, Puppet reads the fragment names and sorts them in ascending order, concatenating all the fragments in that order. So a fragment with a smaller order value always goes before all fragments with a greater order value.
|
||||
|
||||
The ``keystone-2`` fragment from the example above has ``order = 30`` so it's placed after the ``keystone-1`` section (``order = 20``) and the ``nova-api-1`` section (order = 40).
|
||||
|
||||
``balancers``
|
||||
|
||||
Balancers (or **Backends** in HAProxy terms) are a hash of ``{ "$::hostname" => $::ipaddress }`` values.
|
||||
The default is ``{ "<current hostname>" => <current ipaddress> }``, but that value is set for compatability only, and may not work correctly in HA mode. Instead, the default for HA mode is to explicitly set the Balancers as ::
|
||||
|
||||
Haproxy_service {
|
||||
balancers => $controller_internal_addresses
|
||||
}
|
||||
|
||||
which ``$controller_internal_addresses`` representing a hash of all the controllers with a corresponding internal IP address; this value is set in ``site.pp``.
|
||||
|
||||
So the ``balancers`` parameter is a list of HAProxy listener balance members (hostnames) with corresponding IP addresses. The following strings from the ``keystone-2`` listener example represent balancers::
|
||||
|
||||
server fuel-controller-01.example.com 10.0.0.101:35357 check
|
||||
server fuel-controller-02.example.com 10.0.0.102:35357 check
|
||||
|
||||
Every key pair in the ``balancers`` hash adds a new string to the list of listener section balancers. Different options may be set for every string.
|
||||
|
||||
``virtual_ips``
|
||||
|
||||
This parameter represents an array of IP addresses (or **Frontends** in HAProxy terms) of the current listener. Every IP address in this array adds a new string to the bind section of the current listeners. The following strings from the ``keystone-2`` listener example represent virtual IPs::
|
||||
|
||||
bind 10.0.74.253:35357
|
||||
bind 10.0.0.110:35357
|
||||
|
||||
``port``
|
||||
|
||||
This parameters specifies the frontend port for the listeners. Currently you must set the same port frontends.
|
||||
The following strings from the ``keystone-2`` listener example represent the frontend port, where the port is 35357::
|
||||
|
||||
bind 10.0.74.253:35357
|
||||
bind 10.0.0.110:35357
|
||||
|
||||
``haproxy_config_options``
|
||||
|
||||
This parameter represents a hash of key pairs of HAProxy listener options in the form ``{ 'option name' => 'option value' }``. Every key pair from this hash adds a new string to the listener options.
|
||||
Please note: Every HAProxy option may require a different input value type, such as strings or a list of multiple options per single string.
|
||||
|
||||
The '`keystone-2`` listener example has the ``{ 'option' => ['httplog'], 'balance' => 'roundrobin' }`` option array and this array is represented as the following in the resulting /etc/haproxy/haproxy.cfg:
|
||||
balance roundrobin
|
||||
option httplog
|
||||
|
||||
``balancer_port``
|
||||
|
||||
This parameter represents the balancer (backend) port. By default, the balancer_port is the same as the frontend ``port``. The following strings from the ``keystone-2`` listener example represent ``balancer_port``, where port is ``35357``::
|
||||
|
||||
server fuel-controller-01.example.com 10.0.0.101:35357 check
|
||||
server fuel-controller-02.example.com 10.0.0.102:35357 check
|
||||
|
||||
``balancermember_options``
|
||||
|
||||
This is a string of options added to each balancer (backend) member. The ``keystone-2`` listener example has the single ``check`` option::
|
||||
|
||||
server fuel-controller-01.example.com 10.0.0.101:35357 check
|
||||
server fuel-controller-02.example.com 10.0.0.102:35357 check
|
||||
|
||||
``mode``
|
||||
|
||||
This optional parameter represents the HAProxy listener mode. The default value is ``tcp``, but Fuel writes ``mode http`` to the defaults section of ``/etc/haproxy/haproxy.cfg``. You can set the same option via ``haproxy_config_options``. A separate mode parameter is required to set some modes by default on every new listener addition. The ``keystone-2`` listener example has no ``mode`` option and so it works in the default Fuel-configured HTTP mode.
|
||||
|
||||
``define_cookies``
|
||||
|
||||
This optional boolean parameter is a Fuel-only feature. The default is ``false``, but if set to ``true``, Fuel directly adds ``cookie ${hostname}`` to every balance member (backend).
|
||||
|
||||
The ``keystone-2`` listener example has no ``define_cookies`` option. Typically, frontend cookies are added with ``haproxy_config_options`` and backend cookies with ``balancermember_options``.
|
||||
|
||||
``collect_exported``
|
||||
|
||||
This optional boolean parameter has a default value of ``false``. True means 'collect exported @@balancermember resources' (when every balancermember node exports itself), while false means 'rely on the existing declared balancermember resources' (for when you know the full set of balancermembers in advance and use ``haproxy::balancermember`` with array arguments, which allows you to deploy everything in one run).
|
7
docs/pages/advanced-topics/0030-quantum-HA.rst
Normal file
7
docs/pages/advanced-topics/0030-quantum-HA.rst
Normal file
@ -0,0 +1,7 @@
|
||||
OpenStack Networking HA
|
||||
-----------------------
|
||||
|
||||
Fuel 2.1 introduces support for OpenStack Networking (formerly known as Quantum) in a high-availability configuration. To accomplish this, Fuel uses a combination of Pacemaker and Corosync to ensure that if the networking service goes down, it will be restarted, either on the existing node or on separate node.
|
||||
|
||||
This document explains how to configure these options in your own installation.
|
||||
|
283
docs/pages/advanced-topics/0040-bonding.rst
Normal file
283
docs/pages/advanced-topics/0040-bonding.rst
Normal file
@ -0,0 +1,283 @@
|
||||
L23network
|
||||
----------
|
||||
|
||||
NOTE: THIS DOCUMENT HAS NOT BEEN EDITED AND IS NOT READY FOR PUBLIC CONSUMPTION.
|
||||
|
||||
Puppet module for configuring network interfaces on 2nd and 3rd level (802.1q vlans, access ports, NIC-bonding, assign IP addresses, dhcp, and interfaces without IP addresses).
|
||||
|
||||
Can work together with Open vSwitch or standard linux way.
|
||||
|
||||
At this moment we support Centos 6.3 (RHEL6) and Ubuntu 12.04 or above.
|
||||
|
||||
|
||||
Usage
|
||||
^^^^^
|
||||
|
||||
Place this module at /etc/puppet/modules or on another path that contains your puppet modules.
|
||||
|
||||
Include L23network module and initialize it. I recommend to do it in an early stage::
|
||||
|
||||
#Network configuration
|
||||
stage {'netconfig':
|
||||
before => Stage['main'],
|
||||
}
|
||||
class {'l23network': stage=> 'netconfig'}
|
||||
|
||||
If you do not plan to use Open vSwitch -- you can disable it::
|
||||
|
||||
class {'l23network': use_ovs=>false, stage=> 'netconfig'}
|
||||
|
||||
|
||||
|
||||
|
||||
L2 network configuation (Open vSwitch only)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Current layout is:
|
||||
* *bridges* -- A "Bridge" is a virtual ethernet L2 switch. You can plug ports into it.
|
||||
* *ports* -- A Port is an interface you plug into the bridge (switch). It's a virtual. (virtual what?)
|
||||
* *interface* -- A physical implementation of port.
|
||||
|
||||
Then in your manifest you can either use the things as parameterized classes::
|
||||
|
||||
class {"l23network": }
|
||||
|
||||
l23network::l2::bridge{"br-mgmt": }
|
||||
l23network::l2::port{"eth0": bridge => "br-mgmt"}
|
||||
l23network::l2::port{"mmm0": bridge => "br-mgmt"}
|
||||
l23network::l2::port{"mmm1": bridge => "br-mgmt"}
|
||||
|
||||
l23network::l2::bridge{"br-ex": }
|
||||
l23network::l2::port{"eth0": bridge => "br-ex"}
|
||||
l23network::l2::port{"eth1": bridge => "br-ex", ifname_order_prefix='ovs'}
|
||||
l23network::l2::port{"eee0": bridge => "br-ex", skip_existing => true}
|
||||
l23network::l2::port{"eee1": bridge => "br-ex", type=>'internal'}
|
||||
|
||||
You can define type for the port. Port type can be
|
||||
'system', 'internal', 'tap', 'gre', 'ipsec_gre', 'capwap', 'patch', 'null'.
|
||||
If you do not define type for port (or define '') -- ovs-vsctl will have default behavior
|
||||
(see http://openvswitch.org/cgi-bin/ovsman.cgi?page=utilities%2Fovs-vsctl.8).
|
||||
|
||||
You can use *skip_existing* option if you do not want to interrupt configuration while adding an existing port or bridge.
|
||||
|
||||
|
||||
|
||||
L3 network configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
::
|
||||
|
||||
### Simple IP address definition, DHCP or address-less interfaces
|
||||
l23network::l3::ifconfig {"eth0": ipaddr=>'192.168.1.1/24'}
|
||||
l23network::l3::ifconfig {"xXxXxXx":
|
||||
interface => 'eth1',
|
||||
ipaddr => '192.168.2.1',
|
||||
netmask => '255.255.255.0'
|
||||
}
|
||||
l23network::l3::ifconfig {"eth2": ipaddr=>'dhcp'}
|
||||
l23network::l3::ifconfig {"eth3": ipaddr=>'none'}
|
||||
|
||||
Option *ipaddr* can contains IP address, 'dhcp', or 'none' string. In this example we describe configuration of 4 network interfaces:
|
||||
* Interface *eth0* have short CIDR-notated form of IP address definition.
|
||||
* Interface *eth1*
|
||||
* Interface *eth2* will be configured to use dhcp protocol.
|
||||
* Interface *eth3* will be configured as interface without IP address. Often you will need to create "master" interface for 802.1q vlans (in native linux implementation) or as slave interface for bonding.
|
||||
|
||||
CIDR-notated form of IP address has more priority, that classic *ipaddr* and *netmask* definition.
|
||||
If you omitted *natmask* and did not use CIDR-notated form -- default *netmask* value will be used as '255.255.255.0'.::
|
||||
|
||||
### Multiple IP addresses for one interface (aliases)
|
||||
|
||||
l23network::l3::ifconfig {"eth0":
|
||||
ipaddr => ['192.168.0.1/24', '192.168.1.1/24', '192.168.2.1/24']
|
||||
}
|
||||
|
||||
You can pass a list of CIDR-notated IP addresses to the *ipaddr* parameter to assign many IP addresses to one interface. This will create aliases (not subinterfaces). Array can contain one or more elements. ::
|
||||
|
||||
### UP and DOWN interface order
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr=>'192.168.1.1/24'
|
||||
}
|
||||
l23network::l3::ifconfig {"br-ex":
|
||||
ipaddr=>'192.168.10.1/24',
|
||||
ifname_order_prefix='ovs'
|
||||
}
|
||||
l23network::l3::ifconfig {"aaa0":
|
||||
ipaddr=>'192.168.20.1/24',
|
||||
ifname_order_prefix='zzz'
|
||||
}
|
||||
|
||||
Centos and Ubuntu (at startup OS) start and configure network interfaces in alphabetical order
|
||||
by interface configuration file names. In the example above we change configuration process order by *ifname_order_prefix* keyword. We will have this order::
|
||||
|
||||
ifcfg-eth1
|
||||
ifcfg-ovs-br-ex
|
||||
ifcfg-zzz-aaa0
|
||||
|
||||
And OS will configure interfaces br-ex and aaa0 after eth0::
|
||||
|
||||
### Default gateway
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr => '192.168.2.5/24',
|
||||
gateway => '192.168.2.1',
|
||||
check_by_ping => '8.8.8.8',
|
||||
check_by_ping_timeout => '30'
|
||||
}
|
||||
|
||||
In this example we define default *gateway* and options for waiting so that the network stays up.
|
||||
Parameter *check_by_ping* define IP address, that will be pinged. Puppet will be blocked for waiting response for *check_by_ping_timeout* seconds.
|
||||
Parameter *check_by_ping* can be IP address, 'gateway', or 'none' string for disabling checking.
|
||||
By default gateway will be pinged. ::
|
||||
|
||||
### DNS-specific options
|
||||
|
||||
l23network::l3::ifconfig {"eth1":
|
||||
ipaddr => '192.168.2.5/24',
|
||||
dns_nameservers => ['8.8.8.8','8.8.4.4'],
|
||||
dns_search => ['aaa.com','bbb.com'],
|
||||
dns_domain => 'qqq.com'
|
||||
}
|
||||
|
||||
Also we can specify DNS nameservers, and search list that will be inserted (by resolvconf lib) to /etc/resolv.conf .
|
||||
Option *dns_domain* implemented only in Ubuntu. ::
|
||||
|
||||
### DHCP-specific options
|
||||
|
||||
l23network::l3::ifconfig {"eth2":
|
||||
ipaddr => 'dhcp',
|
||||
dhcp_hostname => 'compute312',
|
||||
dhcp_nowait => false,
|
||||
}
|
||||
|
||||
|
||||
|
||||
Bonding
|
||||
^^^^^^^
|
||||
|
||||
### Using standard linux bond (ifenslave)
|
||||
For bonding two interfaces you need to:
|
||||
* Specify these interfaces as interfaces without IP addresses
|
||||
* Specify that the interfaces depend on the master-bond-interface
|
||||
* Assign IP address to the master-bond-interface.
|
||||
* Specify bond-specific properties for master-bond-interface (if defaults are not suitable for you)
|
||||
|
||||
for example (defaults included)::
|
||||
|
||||
l23network::l3::ifconfig {'eth1': ipaddr=>'none', bond_master=>'bond0'} ->
|
||||
l23network::l3::ifconfig {'eth2': ipaddr=>'none', bond_master=>'bond0'} ->
|
||||
l23network::l3::ifconfig {'bond0':
|
||||
ipaddr => '192.168.232.1',
|
||||
netmask => '255.255.255.0',
|
||||
bond_mode => 0,
|
||||
bond_miimon => 100,
|
||||
bond_lacp_rate => 1,
|
||||
}
|
||||
|
||||
|
||||
More information about bonding network interfaces you can get in manuals for your operating system:
|
||||
* https://help.ubuntu.com/community/UbuntuBonding
|
||||
* http://wiki.centos.org/TipsAndTricks/BondingInterfaces
|
||||
|
||||
### Using Open vSwitch
|
||||
For bonding two interfaces you need:
|
||||
* Specify OVS bridge
|
||||
* Specify special resource "bond" and add it to bridge. Specify bond-specific parameters.
|
||||
* Assign IP address to the newly-created network interface (if needed).
|
||||
|
||||
In this example we add "eth1" and "eth2" interfaces to bridge "bridge0" as bond "bond1". ::
|
||||
|
||||
l23network::l2::bridge{'bridge0': } ->
|
||||
l23network::l2::bond{'bond1':
|
||||
bridge => 'bridge0',
|
||||
ports => ['eth1', 'eth2'],
|
||||
properties => [
|
||||
'lacp=active',
|
||||
'other_config:lacp-time=fast'
|
||||
],
|
||||
} ->
|
||||
l23network::l3::ifconfig {'bond1':
|
||||
ipaddr => '192.168.232.1',
|
||||
netmask => '255.255.255.0',
|
||||
}
|
||||
|
||||
Open vSwitch provides lot of parameters for different configurations.
|
||||
We can specify them in the "properties" option as a list of parameter=value
|
||||
(or parameter:key=value) strings.
|
||||
The most of them you can see in [open vSwitch documentation page](http://openvswitch.org/support/).
|
||||
|
||||
|
||||
|
||||
802.1q vlan access ports
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
### Using standard linux way
|
||||
We can use tagged vlans over ordinary network interfaces (or over bonds).
|
||||
L23networks support two variants of naming vlan interfaces:
|
||||
* *vlanXXX* -- 802.1q tag gives from the vlan interface name, but you need to specify
|
||||
parent interface name in the **vlandev** parameter.
|
||||
* *eth0.101* -- 802.1q tag and parent interface name gives from the vlan interface name
|
||||
|
||||
If you need to use 802.1q vlans over bonds -- you can use only the first variant.
|
||||
|
||||
In this example we can see both variants: ::
|
||||
|
||||
l23network::l3::ifconfig {'vlan6':
|
||||
ipaddr => '192.168.6.1',
|
||||
netmask => '255.255.255.0',
|
||||
vlandev => 'bond0',
|
||||
}
|
||||
l23network::l3::ifconfig {'vlan5':
|
||||
ipaddr => 'none',
|
||||
vlandev => 'bond0',
|
||||
}
|
||||
L23network:L3:Ifconfig['bond0'] -> L23network:L3:Ifconfig['vlan6'] -> L23network:L3:Ifconfig['vlan5']
|
||||
|
||||
l23network::l3::ifconfig {'eth0':
|
||||
ipaddr => '192.168.0.5',
|
||||
netmask => '255.255.255.0',
|
||||
gateway => '192.168.0.1',
|
||||
} ->
|
||||
l23network::l3::ifconfig {'eth0.101':
|
||||
ipaddr => '192.168.101.1',
|
||||
netmask => '255.255.255.0',
|
||||
} ->
|
||||
l23network::l3::ifconfig {'eth0.102':
|
||||
ipaddr => 'none',
|
||||
}
|
||||
|
||||
### Using Open vSwitch
|
||||
In the Open vSwitch all internal traffic is virtually tagged.
|
||||
For creating the 802.1q tagged access port you need to specify vlan tag when adding a port to a bridge.
|
||||
In this example we create two ports with tags 10 and 20, and assign an IP address to interface with tag 10::
|
||||
|
||||
l23network::l2::bridge{'bridge0': } ->
|
||||
l23network::l2::port{'vl10':
|
||||
bridge => 'bridge0',
|
||||
type => 'internal',
|
||||
port_properties => [
|
||||
'tag=10'
|
||||
],
|
||||
} ->
|
||||
l23network::l2::port{'vl20':
|
||||
bridge => 'bridge0',
|
||||
type => 'internal',
|
||||
port_properties => [
|
||||
'tag=20'
|
||||
],
|
||||
} ->
|
||||
l23network::l3::ifconfig {'vl10':
|
||||
ipaddr => '192.168.101.1/24',
|
||||
} ->
|
||||
l23network::l3::ifconfig {'vl20':
|
||||
ipaddr => 'none',
|
||||
}
|
||||
|
||||
Information about vlans in open vSwitch you can get in [open vSwitch documentation page](http://openvswitch.org/support/config-cookbooks/vlan-configuration-cookbook/).
|
||||
|
||||
**IMPORTANT:** You can't use vlan interface names like vlanXXX if you do not want double-tagging of your network traffic.
|
||||
|
||||
---
|
||||
When I began to write this module, I checked https://github.com/ekarlso/puppet-vswitch. Elcarso, big thanks...
|
||||
|
||||
|
@ -1,8 +1,4 @@
|
||||
|
||||
If you already have Puppet Master installed, you can skip this
|
||||
installation step and go directly to :ref:`Installing the OS Using Fuel <Install-OS-Using-Fuel>`.
|
||||
|
||||
|
||||
|
||||
Installing Puppet Master is a one-time procedure for the entire
|
||||
infrastructure. Once done, Puppet Master will act as a single point of
|
||||
@ -13,7 +9,41 @@ these installation steps again.
|
||||
Initial Setup
|
||||
-------------
|
||||
|
||||
For VirtualBox, follow these steps to create the virtual hardware:
|
||||
On VirtualBox (https://www.virtualbox.org/wiki/Downloads), please create or make sure the following
|
||||
hostonly adapters exist and are configured correctly:
|
||||
|
||||
* VirtualBox -> File -> Preferences...
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet0)
|
||||
|
||||
* IPv4 Address: 10.0.0.1
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet1)
|
||||
|
||||
* IPv4 Address: 10.0.1.1
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet2)
|
||||
|
||||
* IPv4 Address: 0.0.0.0
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
In this example, only the first two adapters will be used, but you can choose to use the third to handle your storage network traffic.
|
||||
|
||||
After creating these interfaces, reboot the host machine to make sure that
|
||||
DHCP isn't running in the background.
|
||||
|
||||
Installing on Windows isn't recommended, but if you're attempting it,
|
||||
you will also need to set up the IP address & network mask under
|
||||
Control Panel > Network and Internet > Network and Sharing Center for the
|
||||
Virtual HostOnly Network adapter.
|
||||
|
||||
|
||||
Next, follow these steps to create the virtual hardware:
|
||||
|
||||
|
||||
* Machine -> New
|
||||
@ -29,16 +59,13 @@ For VirtualBox, follow these steps to create the virtual hardware:
|
||||
|
||||
* Machine -> Settings -> Network
|
||||
|
||||
* Adapter 1
|
||||
|
||||
|
||||
* Adapter 1
|
||||
* Enable Network Adapter
|
||||
* Attached to: Host-only Adapter
|
||||
* Name: vboxnet0
|
||||
|
||||
|
||||
|
||||
* Adapter 2
|
||||
* Adapter 2
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: eth0 (or whichever physical network has your internet connection)
|
||||
@ -50,13 +77,12 @@ OS Installation
|
||||
---------------
|
||||
|
||||
|
||||
* Pick and download an operating system image. This image will be used as the base OS for the Puppet master node. These insructions assume that you are using CentOS 6.3, but you can also use Ubuntu 12.04 or RHEL 6.3.
|
||||
* Pick and download an operating system image. This image will be used as the base OS for the Puppet master node. These insructions assume that you are using CentOS 6.4, but you can also use Ubuntu 12.04.
|
||||
|
||||
**PLEASE NOTE**: These are the only operating systems on which Fuel has been certified. Using other operating systems can, and in many cases will, produce unpredictable results.
|
||||
**PLEASE NOTE**: These are the only operating systems on which Fuel 3.0 has been certified. Using other operating systems can, and in many cases will, produce unpredictable results.
|
||||
|
||||
|
||||
|
||||
* `CentOS 6.3 <http://isoredirect.centos.org/centos/6/isos/x86_64/>`_: download CentOS-6.3-x86_64-minimal.iso
|
||||
* `CentOS 6.4 <http://isoredirect.centos.org/centos/6/isos/x86_64/>`_: download CentOS-6.4-x86_64-minimal.iso
|
||||
* `Ubuntu 12.04 Precise Pangolin <https://help.ubuntu.com/community/Installation/MinimalCD>`_: download the Ubuntu Minimal CD
|
||||
|
||||
|
||||
* Mount the downloaded ISO to the machine's CD/DVD drive. In case of VirtualBox, mount it to the fuel-pm virtual machine:
|
||||
@ -72,16 +98,13 @@ OS Installation
|
||||
* Boot the server (or VM) from the CD/DVD drive and install the chosen OS. Be sure to choose the root password carefully.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
* Set up the eth0 interface. This interface will be used for communication between the Puppet Master and Puppet clients, as well as for Cobbler.
|
||||
|
||||
``vi/etc/sysconfig/network-scripts/ifcfg-eth0``::
|
||||
``vi /etc/sysconfig/network-scripts/ifcfg-eth0``::
|
||||
|
||||
DEVICE="eth0"
|
||||
BOOTPROTO="static"
|
||||
IPADDR="10.20.0.100"
|
||||
IPADDR="10.0.0.100"
|
||||
NETMASK="255.255.255.0"
|
||||
ONBOOT="yes"
|
||||
TYPE="Ethernet"
|
||||
@ -116,7 +139,7 @@ OS Installation
|
||||
|
||||
* Add DNS for Internet hostnames resolution::
|
||||
|
||||
vi/etc/resolv.conf
|
||||
vi /etc/resolv.conf
|
||||
|
||||
|
||||
|
||||
@ -130,7 +153,7 @@ OS Installation
|
||||
|
||||
* Check that a ping to your host machine works. This means that the management segment is available::
|
||||
|
||||
ping 10.20.0.1
|
||||
ping 10.0.0.1
|
||||
|
||||
|
||||
|
||||
@ -147,30 +170,34 @@ OS Installation
|
||||
|
||||
* Next, set up the packages repository:
|
||||
|
||||
``vi /etc/yum.repos.d/puppet.repo``::
|
||||
|
||||
|
||||
|
||||
``vi/etc/yum.repos.d/puppet.repo``::
|
||||
[puppetlabs-dependencies]
|
||||
name=Puppet Labs Dependencies
|
||||
baseurl=http://yum.puppetlabs.com/el/$releasever/dependencies/$basearch/
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
|
||||
[puppetlabs]
|
||||
name=Puppet Labs Packages
|
||||
baseurl=http://yum.puppetlabs.com/el/$releasever/products/$basearch/
|
||||
enabled=1 gpgcheck=1 gpgkey=http://yum.puppetlabs.com/RPM-GPG-KEY-puppetlabs
|
||||
|
||||
|
||||
|
||||
enabled=1
|
||||
gpgcheck=0
|
||||
|
||||
* Install Puppet Master::
|
||||
|
||||
|
||||
rpm -Uvh http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
|
||||
rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
|
||||
yum upgrade
|
||||
yum install puppet-server-2.7.19
|
||||
service puppetmaster
|
||||
start chkconfig puppetmaster on
|
||||
service puppetmaster start
|
||||
chkconfig puppetmaster on
|
||||
service iptables stop
|
||||
chkconfig iptables off
|
||||
|
||||
* Install PuppetDB::
|
||||
|
||||
yum install puppetdb puppetdb-terminus
|
||||
chkconfig puppetdb on
|
||||
|
||||
|
||||
|
||||
|
@ -106,7 +106,7 @@ Puppet to use a technique called stored configuration.
|
||||
|
||||
|
||||
|
||||
* Finally, set up SSL for PuppetDB and restart the puppetmaster and puppetdb services::
|
||||
* Set up SSL for PuppetDB and restart the puppetmaster and puppetdb services::
|
||||
|
||||
|
||||
service puppetmaster restart
|
||||
@ -116,18 +116,31 @@ Puppet to use a technique called stored configuration.
|
||||
|
||||
|
||||
|
||||
* Finally, if you are planning to install Cobbler on the Puppet Master node as well (as we are in this example), make configuration changes on the Puppet Master so that it actually knows how to provision software onto itself:
|
||||
|
||||
``vi /etc/puppet/puppet.conf``::
|
||||
|
||||
|
||||
[main]
|
||||
# server
|
||||
server = fuel-pm.localdomain
|
||||
|
||||
# enable plugin sync
|
||||
pluginsync = true
|
||||
|
||||
|
||||
* **IMPORTANT**: Note that while these operations appear to finish quickly, it can actually take several minutes for puppetdb to complete its startup process. You'll know it has finished starting up when you can successfully telnet to port 8081::
|
||||
|
||||
telnet pm.localdomain 8081
|
||||
yum install telnet
|
||||
telnet fuel-pm.localdomain 8081
|
||||
|
||||
|
||||
Testing Puppet
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
Put a simple configuration into Puppet -- replace localdomain
|
||||
with your domain name -- so that when you run puppet on various nodes,
|
||||
it will display the appropriate Hello world message:
|
||||
|
||||
Add a simple configuration to Puppet so that when you run puppet on various nodes,
|
||||
it will display a "Hello world" message:
|
||||
|
||||
``vi /etc/puppet/manifests/site.pp``::
|
||||
|
||||
@ -135,38 +148,9 @@ it will display the appropriate Hello world message:
|
||||
node /fuel-pm.localdomain/ {
|
||||
notify{"Hello world from fuel-pm": }
|
||||
}
|
||||
node /fuel-controller-01.localdomain/ {
|
||||
notify{"Hello world from fuel-controller-01": }
|
||||
}
|
||||
node /fuel-controller-02.localdomain/ {
|
||||
notify{"Hello world from fuel-controller-02": }
|
||||
}
|
||||
node /fuel-controller-03.localdomain/ {
|
||||
notify{"Hello world from fuel-controller-03": }
|
||||
}
|
||||
node /fuel-compute-01.localdomain/ {
|
||||
notify{"Hello world from fuel-compute-01": }
|
||||
}
|
||||
|
||||
|
||||
|
||||
If you are planning to install Cobbler on the Puppet Master node as
|
||||
well (as we are in this example), make configuration changes on the
|
||||
Puppet Master so that it actually knows how to provision software onto
|
||||
itself (replace your-domain-name. com with your domain name):
|
||||
|
||||
|
||||
|
||||
``vi /etc/puppet/puppet.conf``::
|
||||
|
||||
|
||||
[main]
|
||||
# server
|
||||
server = fuel-pm.localdomain
|
||||
|
||||
# enable plugin sync
|
||||
pluginsync = true
|
||||
|
||||
|
||||
Finally, to make sure everything is working properly, run puppet agent
|
||||
and to see the ``Hello World from fuel-pm`` output::
|
||||
|
@ -1,4 +1,4 @@
|
||||
Installing & Configuring Cobbler
|
||||
Installing Fuel and Cobbler
|
||||
--------------------------------
|
||||
|
||||
Cobbler performs bare metal provisioning and initial installation of
|
||||
|
@ -0,0 +1,4 @@
|
||||
.. include:: /pages/creating-fuel-pm/0010-creating-fuel-pm-from-scratch.rst
|
||||
.. include:: /pages/creating-fuel-pm/0045-configuring-fuel-pm.rst
|
||||
.. include:: /pages/creating-fuel-pm/0050-installing-configuring-cobbler.rst
|
||||
.. include:: /pages/creating-fuel-pm/0060-register-with-fuel.rst
|
@ -0,0 +1,6 @@
|
||||
Known Issues and Workarounds
|
||||
----------------------------
|
||||
|
||||
.. include:: /pages/frequently-asked-questions/0010-rabbitmq.rst
|
||||
.. include:: /pages/frequently-asked-questions/0020-galera.rst
|
||||
|
@ -125,3 +125,12 @@ In most casts, Fuel creates the XFS partition for you. If for some reason you n
|
||||
noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
|
||||
mount -a
|
||||
|
||||
|
||||
Redeploying a node from scratch
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Compute and Cinder nodes in an HA configuration and controller in any configuration cannot be redeployed without completely redeploying the cluster. However, in a non-HA situation you can redeploy a compute or Cinder node. Simply follow these steps:
|
||||
|
||||
#. Remove the certificate for the node by executing the command ``puppet cert clean <hostname>`` on fuel-pm.
|
||||
#. Re-boot the node over the network so it can be picked up by cobbler.
|
||||
#. Run the puppet agent on the target node using ``puppet agent --test``.
|
||||
|
@ -1,13 +1,13 @@
|
||||
How installation works
|
||||
----------------------
|
||||
|
||||
While version 2.0 of Fuel provided the ability to simplify installation of OpenStack, version 2.1 includes orchestration capabilities that simplify deployment an OpenStack cluster. The deployment process follows this general procedure:
|
||||
While version 2.0 of Fuel provided the ability to simplify installation of OpenStack, versions 2.1 and above include orchestration capabilities that simplify deployment an OpenStack cluster. The deployment process follows this general procedure:
|
||||
|
||||
#. Design your architecture.
|
||||
#. Install Fuel onto the fuel-pm machine.
|
||||
#. Configure Fuel.
|
||||
#. Create the basic configuration and load it into Cobbler.
|
||||
#. PXE-boot the servers so Cobbler can install the operating system.
|
||||
#. PXE-boot the servers so Cobbler can install the operating system and prepare them for orchestration.
|
||||
#. Use Fuel's included templates and the configuration to populate Puppet's site.pp file.
|
||||
#. Customize the site.pp file if necessary.
|
||||
#. Use the orchestrator to coordinate the installation of the appropriate OpenStack components on each node.
|
||||
|
@ -5,18 +5,19 @@ Before you start
|
||||
Before you begin your installation, you will need to make a number of important
|
||||
decisions:
|
||||
|
||||
* **OpenStack features.** You must choose which of the optional OpenStack features you want. For example, you must decide whether you want to install Swift, whether you want Glance to use Swift for image storage, whether you want Cinder for block storage, and whether you want nova-network or Quantum to handle your network connectivity. In the case of this example, we will be installing Swift, and Glance will be using it. We'll also be using Cinder for block storage. Because it can be easily installed using orchestration, we will also be using Quantum.
|
||||
* **OpenStack features.** Your first decision is which of the optional OpenStack features you want. For example, you must decide whether you want to install Swift, whether you want Glance to use Swift for image storage, whether you want Cinder for block storage, and whether you want nova-network or Quantum to handle your network connectivity. In the case of this example, we will be installing Swift, and Glance will be using it. We'll also be using Cinder for block storage. Because it can be easily installed using orchestration, we will also be using Quantum.
|
||||
|
||||
* **Deployment configuration.** The first decision is whether your deployment requires high availability. If you do choose to do an HA deployment, you have a choice regarding the number of controllers you want to have. Following the recommendations in the previous section for a typical HA deployment configuration, we will use 3 OpenStack controllers.
|
||||
* **Deployment configuration.** Next you need to decide whether your deployment requires high availability. If you do choose to do an HA deployment, you have a choice regarding the number of controllers you want to include. Following the recommendations in the previous section for a typical HA deployment configuration, we will use 3 OpenStack controllers.
|
||||
|
||||
* **Cobbler server and Puppet Master.** The heart of a Fuel install is the combination of Puppet Master and Cobbler used to create your resources. Although Cobbler and Puppet Master can be installed on separate machines, it is common practice to install both on a single machine for small to medium size clouds, and that's what we'll be doing in this example. (By default, the Fuel ISO creates a single server with both services.)
|
||||
|
||||
* **Domain name.** Puppet clients generate a Certificate Signing Request (CSR), which is then signed by Puppet Master. The signed certificate can then be used to authenticate the client during provisioning. Certificate generation requires a fully qualified hostname, so you must choose a domain name to be used in your installation. We'll leave this up to you.
|
||||
* **Network addresses.** OpenStack requires a minimum of three networks. If you are deploying on physical hardware two of them -- the public network and the internal, or management network -- must be routable in your networking infrastructure. Also, if you intend for your cluster to be accessible from the Internet, you'll want the public network to be on the proper network segment. For simplicity in this case, this example assumes an Ineternet router at 192.168.0.1. Additionally, a set of private network addresses should be selected for automatic assignment to guest VMs. (These are fixed IPs for the private network). In our case, we are allocating network addresses as follows:
|
||||
* **Domain name.** Puppet clients generate a Certificate Signing Request (CSR), which is then signed by Puppet Master. The signed certificate can then be used to authenticate the client during provisioning. Certificate generation requires a fully qualified hostname, so you must choose a domain name to be used in your installation. Future versions of Fuel will enable you to choose this domain name on your own; by default, Fuel 3.0 uses ``localdomain``.
|
||||
|
||||
* **Network addresses.** OpenStack requires a minimum of three networks. If you are deploying on physical hardware, two of them -- the public network and the internal, or management network -- must be routable in your networking infrastructure. Also, if you intend for your cluster to be accessible from the Internet, you'll want the public network to be on the proper network segment. For simplicity in this case, this example assumes an Internet router at 192.168.0.1. Additionally, a set of private network addresses should be selected for automatic assignment to guest VMs. (These are fixed IPs for the private network). In our case, we are allocating network addresses as follows:
|
||||
|
||||
* Public network: 192.168.0.0/24
|
||||
* Internal network: 10.20.0.0/24
|
||||
* Private network: 10.20.1.0/24
|
||||
* Internal network: 10.0.0.0/24
|
||||
* Private network: 10.0.1.0/24
|
||||
|
||||
* **Network interfaces.** All of those networks need to be assigned to the available NIC cards on the allocated machines. Additionally, if a fourth NIC is available, Cinder or block storage traffic can also be separated and delegated to the fourth NIC. In our case, we're assigning networks as follows:
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
Infrastructure allocation
|
||||
-------------------------
|
||||
Infrastructure allocation and installation
|
||||
------------------------------------------
|
||||
|
||||
The next step is to make sure that you have all of the required
|
||||
hardware and software in place.
|
||||
@ -10,54 +10,69 @@ Software
|
||||
|
||||
You can download the latest release of the Fuel ISO from http://fuel.mirantis.com/your-downloads/.
|
||||
|
||||
Alternatively, if you can't use the pre-built ISO, Mirantis also offers the Fuel Library as a tar.gz file downloadable from `Downloads <http://fuel.mirantis.com/your-downloads/>`_ section of the Fuel portal.
|
||||
Alternatively, if you can't use the pre-built ISO, Mirantis also offers the Fuel Library as a tar.gz file downloadable from `Downloads <http://fuel.mirantis.com/your-downloads/>`_ section of the Fuel portal. Using this file requires a bit more manual effort, but will yeild the same results as using the ISO.
|
||||
|
||||
|
||||
Hardware for a virtual installation
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Network setup
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
For a virtual installation, you need only a single machine. You can get
|
||||
by on 8GB of RAM, but 16GB will be better.
|
||||
|
||||
To actually perform the
|
||||
installation, you need a way to create Virtual Machines. This guide
|
||||
assumes that you are using version 4.2.6 of VirtualBox, which you can download from
|
||||
|
||||
https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
Make sure to also install the Extension Pack.
|
||||
|
||||
You'll need to run VirtualBox on a stable host system. Mac OS 10.7.x,
|
||||
CentOS 6.3, or Ubuntu 12.04 are preferred; results in other operating
|
||||
systems are unpredictable.
|
||||
|
||||
You will need to allocate the following resources:
|
||||
|
||||
* 1 server to host both Puppet Master and Cobbler. The minimum configuration for this server is:
|
||||
|
||||
* 32-bit or 64-bit architecture
|
||||
* 1+ CPU or vCPU
|
||||
* 1024+ MB of RAM
|
||||
* 16+ GB of HDD for OS, and Linux distro storage
|
||||
|
||||
* 3 servers to act as OpenStack controllers (called fuel-controller-01, fuel-controller-02, and fuel-controller-03). The minimum configuration for a controller in Compact mode is:
|
||||
|
||||
* 64-bit architecture
|
||||
* 1+ CPU 1024+ MB of RAM
|
||||
* 8+ GB of HDD for base OS
|
||||
* 10+ GB of HDD for Swift
|
||||
|
||||
* 1 server to act as the OpenStack compute node (called fuel-compute-01). The minimum configuration for a compute node with Cinder deployed on it is:
|
||||
* 64-bit architecture
|
||||
* 2048+ MB of RAM
|
||||
* 50+ GB of HDD for OS, instances, and ephemeral storage
|
||||
* 50+ GB of HDD for Cinder
|
||||
|
||||
Instructions for creating these resources will be provided in :ref:`Installing the OS using Fuel <Install-OS-Using-Fuel>`.
|
||||
OpenStack requires a minimum of three distinct networks: internal (or
|
||||
management), public, and private. The simplest and best mapping is to
|
||||
assign each network to a different physical interface. However, not
|
||||
all machines have three NICs, and OpenStack can be configured and
|
||||
deployed with only two physical NICs, collapsing the internal and
|
||||
public traffic onto a single NIC.
|
||||
|
||||
|
||||
Hardware for a physical infrastructure installation
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you are deploying to a simulation environment, however, it makes
|
||||
sense to just allocate three NICs to each VM in your OpenStack
|
||||
infrastructure, one each for the internal, public, and private networks respectively.
|
||||
|
||||
|
||||
|
||||
Finally, we must assign network ranges to the internal, public, and private
|
||||
networks, and ip addresses to fuel-pm, fuel-controllers, and fuel-compute nodes. For a real deployment using physical infrastructure you must work with your IT department to determine which IPs to use, but
|
||||
for the purposes of this exercise we will assume the below network and
|
||||
ip assignments:
|
||||
|
||||
|
||||
#. 10.0.0.0/24: management or internal network, for communication between Puppet master and Puppet clients, as well as PXE/TFTP/DHCP for Cobbler.
|
||||
#. 192.168.0.0/24: public network, for the High Availability (HA) Virtual IP (VIP), as well as floating IPs assigned to OpenStack guest VMs
|
||||
#. 10.0.1.0/24: private network, fixed IPs automatically assigned to guest VMs by OpenStack upon their creation
|
||||
|
||||
|
||||
|
||||
|
||||
Next we need to allocate a static IP address from the internal network
|
||||
to eth0 for fuel-pm, and eth1 for the controller, compute, and (if necessary) quantum
|
||||
nodes. For High Availability (HA) we must choose and assign an IP
|
||||
address from the public network to HAProxy running on the controllers.
|
||||
You can configure network addresses/network mask according to your
|
||||
needs, but our instructions will assume the following network settings
|
||||
on the interfaces:
|
||||
|
||||
|
||||
|
||||
#. eth0: internal management network, where each machine will have a static IP address
|
||||
|
||||
* 10.0.0.100 for Puppet Master
|
||||
* 10.0.0.101-10.0.0.103 for the controller nodes
|
||||
* 10.0.0.110-10.0.0.126 for the compute nodes
|
||||
* 10.0.0.10 internal Virtual IP for component access
|
||||
* 255.255.255.0 network mask
|
||||
|
||||
#. eth1: public network
|
||||
|
||||
* 192.168.0.10 public Virtual IP for access to the Horizon GUI (OpenStack management interface)
|
||||
|
||||
#. eth2: for communication between OpenStack VMs without IP address with promiscuous mode enabled.
|
||||
|
||||
|
||||
|
||||
|
||||
Physical installation infrastructure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The amount of hardware necessary for an installation depends on the
|
||||
choices you have made above. This sample installation requires the
|
||||
@ -74,7 +89,7 @@ following hardware:
|
||||
|
||||
* 64-bit architecture
|
||||
* 1+ CPU
|
||||
* 1024+ MB of RAM
|
||||
* 1024+ MB of RAM (2048+ MB preferred)
|
||||
* 400+ GB of HDD
|
||||
|
||||
* 1 server to act as the OpenStack compute node (called fuel-compute-01). The minimum configuration for a compute node with Cinder deployed on it is:
|
||||
@ -88,14 +103,121 @@ following hardware:
|
||||
additional server with specifications comparable to the controller
|
||||
nodes.)
|
||||
|
||||
Make sure your hardware is capable of PXE booting over the network from Cobbler. You'll also need each server's mac addresses.
|
||||
|
||||
|
||||
For a list of certified hardware configurations, please `contact the
|
||||
Mirantis Services team <http://www.mirantis.com/contact/>`_.
|
||||
|
||||
Providing the OpenStack nodes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Virtual installation infrastructure
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For a virtual installation, you need only a single machine. You can get
|
||||
by on 8GB of RAM, but 16GB will be better.
|
||||
|
||||
To actually perform the
|
||||
installation, you need a way to create Virtual Machines. This guide
|
||||
assumes that you are using version 4.2.12 of VirtualBox, which you can download from
|
||||
|
||||
https://www.virtualbox.org/wiki/Downloads
|
||||
|
||||
Make sure to also install the Extension Pack.
|
||||
|
||||
You'll need to run VirtualBox on a stable host system. Mac OS 10.7.x,
|
||||
CentOS 6.3+, or Ubuntu 12.04 are preferred; results in other operating
|
||||
systems are unpredictable.
|
||||
|
||||
|
||||
Configuring VirtualBox
|
||||
++++++++++++++++++++++
|
||||
|
||||
If you are on VirtualBox, please create or make sure the following
|
||||
hostonly adapters exist and are configured correctly:
|
||||
|
||||
* VirtualBox -> File -> Preferences...
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet0)
|
||||
|
||||
* IPv4 Address: 10.0.0.1
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet1)
|
||||
|
||||
* IPv4 Address: 10.0.1.1
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
* Network -> Add HostOnly Adapter (vboxnet2)
|
||||
|
||||
* IPv4 Address: 0.0.0.0
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
In this example, only the first two adapters will be used, but you can choose to use the third to handle your storage network traffic.
|
||||
|
||||
After creating these interfaces, reboot the host machine to make sure that
|
||||
DHCP isn't running in the background.
|
||||
|
||||
Installing on Windows isn't recommended, but if you're attempting it,
|
||||
you will also need to set up the IP address & network mask under
|
||||
Control Panel > Network and Internet > Network and Sharing Center for the
|
||||
Virtual HostOnly Network adapter.
|
||||
|
||||
|
||||
|
||||
Creating fuel-pm
|
||||
++++++++++++++++
|
||||
|
||||
The process of creating a virtual machine to host Fuel in VirtualBox depends on
|
||||
whether your deployment is purely virtual or consists of a physical or virtual
|
||||
fuel-pm controlling physical hardware. If your deployment is purely
|
||||
virtual then Adapter 1 may be a Hostonly adapter attached to
|
||||
vboxnet0, but if your deployment infrastructure consists of a virtual
|
||||
fuel-pm controlling physical machines, Adapter 1 must be a Bridged
|
||||
Adapter, connected to whatever network interface of the host machine
|
||||
is connected to your physical machines.
|
||||
|
||||
To create fuel-pm, start up VirtualBox and create a new machine as follows:
|
||||
|
||||
* Machine -> New...
|
||||
|
||||
* Name: fuel-pm
|
||||
* Type: Linux
|
||||
* Version: Red Hat (64 Bit)
|
||||
* Memory: 2048 MB
|
||||
* Drive space: 16 GB HDD
|
||||
|
||||
* Machine -> Settings... -> Network
|
||||
|
||||
* Adapter 1
|
||||
|
||||
* Physical network
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: The host machine's network with access to the network on which the physical machines reside
|
||||
* VirtualBox installation
|
||||
* Enable Network Adapter
|
||||
* Attached to: Hostonly Adapter
|
||||
* Name: vboxnet0
|
||||
|
||||
* Adapter 2
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: eth0 (or whichever physical network is attached to the Internet)
|
||||
|
||||
* Machine -> Storage
|
||||
|
||||
* Attach the downloaded ISO as a drive
|
||||
|
||||
If you can't (or would rather not) install from the ISO, you can find instructions for installing from the Fuel Library in :ref:`Appendix A <Create-PM>`.
|
||||
|
||||
|
||||
|
||||
Creating the OpenStack nodes
|
||||
++++++++++++++++++++++++++++
|
||||
|
||||
If you are using hardware, make sure it is capable of PXE booting over
|
||||
the network from Cobbler. You'll also need each server's mac address.
|
||||
|
||||
|
||||
|
||||
@ -119,11 +241,11 @@ record the corresponding mac address.
|
||||
* Name: fuel-controller-01 (you will need to repeat these steps for fuel-controller-02, fuel-controller-03, and fuel-compute-01)
|
||||
* Type: Linux
|
||||
* Version: Red Hat (64 Bit)
|
||||
* Memory: 1024MB
|
||||
* Memory: 2048MB
|
||||
* Drive space: 8GB
|
||||
|
||||
|
||||
|
||||
* Machine -> System -> Motherboard...
|
||||
* Machine -> Settings -> System
|
||||
|
||||
* Check Network in Boot sequence
|
||||
|
||||
@ -131,38 +253,28 @@ record the corresponding mac address.
|
||||
|
||||
* Controller: SATA
|
||||
|
||||
* Click the Add icon at the bottom of the Storage Tree pane
|
||||
* Click the Add icon at the bottom of the Storage Tree pane and choose Add Disk
|
||||
* Add a second VDI disk of 10GB for storage
|
||||
|
||||
* Machine -> Settings... -> Network
|
||||
|
||||
|
||||
* Machine -> Settings -> Network
|
||||
|
||||
* Adapter 1
|
||||
|
||||
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Hostonly Adapter
|
||||
* Name: vboxnet0
|
||||
|
||||
|
||||
|
||||
* Adapter 2
|
||||
|
||||
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: eth0 (physical network attached to the Internet. You can also use a gateway.)
|
||||
|
||||
|
||||
|
||||
* Adapter 3
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Hostonly Adapter
|
||||
* Name: vboxnet2
|
||||
* Name: vboxnet1
|
||||
* Advanced -> Promiscuous mode: Allow All
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
Installing & Configuring Puppet Master
|
||||
--------------------------------------
|
||||
Installing & Configuring Fuel
|
||||
-----------------------------
|
||||
Now that you know what you're going to install and where you're going to
|
||||
install it, it's time to begin putting the pieces together. To do that,
|
||||
you'll need to create the Puppet master and Cobbler servers, which will
|
||||
@ -18,160 +18,54 @@ network presence on the same network the physical machines will
|
||||
ultimately PXE boot from. In a simulation environment fuel-pm only
|
||||
needs virtual network (hostonlyif) connectivity.
|
||||
|
||||
The easiest way to create an instance of fuel-pm is to download the
|
||||
Mirantis ISO from http://fuel.mirantis.com/your-downloads/
|
||||
At this point, you should have either a physical or virtual machine that
|
||||
can be booted from the Mirantis ISO, downloaded from http://fuel.mirantis.com/your-downloads/ .
|
||||
|
||||
This ISO can be used to create fuel-pm on a physical or virtual
|
||||
machine based on CentOS6.3x86_64minimal.iso. If for some reason you
|
||||
machine based on CentOS 6.4. If for some reason you
|
||||
can't use this ISO, follow the instructions in :ref:`Creating the Puppet master <Create-PM>` to create
|
||||
your own fuel-pm, then skip ahead to :ref:`Configuring fuel-pm <Configuring-Fuel-PM>`.
|
||||
|
||||
Installing Fuel from the ISO
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Network setup
|
||||
^^^^^^^^^^^^^
|
||||
Start the new machine to install the ISO. The only real installation decision you will need to make is to specify the interface through which the installer can access the Internet. Choose eth1, as it's connected to the Internet-connected interface.
|
||||
|
||||
OpenStack requires a minimum of three distinct networks: internal (or
|
||||
management), public, and private. The simplest and best mapping is to
|
||||
assign each network to a different physical interface. However, not
|
||||
all machines have three NICs, and OpenStack can be configured and
|
||||
deployed with only two physical NICs, collapsing the internal and
|
||||
public traffic onto a single NIC.
|
||||
Configuring fuel-pm from the ISO installation
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Once fuel-pm finishes installing, you'll be presented with a basic menu. You can use this menu to set the basic information Fuel will need to configure your installation. You can customize these steps for your own situation, of course, but here are the steps to take for the example installation:
|
||||
|
||||
#. Future versions of Fuel will enable you to change the hostname and domain name for your admin node and cluster, respectively. For now, your admin node must be called ``fuel-pm``, and your domain name must be ``localdomain``.
|
||||
#. To configure the management interface, choose 2.
|
||||
|
||||
If you are deploying to a simulation environment, however, it makes
|
||||
sense to just allocate three NICs to each VM in your OpenStack
|
||||
infrastructure. For VirtualBox, this means creating three Host Only
|
||||
interfaces, vboxnet0, vboxnet1, and vboxnet2, for the internal,
|
||||
public, and private networks respectively.
|
||||
* The example specifies eth0 as the internal, or management interface, so enter that.
|
||||
* The management network in the example is using static IP addresses, so specify no for for using DHCP.
|
||||
* Enter the IP address of 10.0.0.100 for the Puppet Master, and the netmask of 255.255.255.0. Future versions of Fuel will enable you to choose a different IP range for your management interface.
|
||||
* Set the gateway and DNS servers if desired. In this example, we'll use the router at 192.168.0.1 as the gateway.
|
||||
|
||||
#. To configure the external interface, which VMs will use to send traffic to and from the internet, choose 3. Set the interface to eth1. By default, this interface uses DHCP, which is what the example calls for.
|
||||
|
||||
#. To choose the start and end addresses to be used during PXE boot, choose 4. In the case of this example, the start address is 10.0.0.201 and the end address is 10.0.0.254. Later, these nodes will receive IP addresses from Cobbler.
|
||||
|
||||
Finally, we must assign network ranges to the internal, public, and private
|
||||
networks, and ip addresses to fuel-pm, fuel-controllers, and fuel-compute nodes. For a real deployment using physical infrastructure you must work with your IT department to determine which IPs to use, but
|
||||
for the purposes of this exercise we will assume the below network and
|
||||
ip assignments:
|
||||
#. Future versions of Fuel will enable you to choose a custom set of repositories.
|
||||
|
||||
#. If you need to specify a proxy through which fuel-pm will access the Internet, press 6.
|
||||
|
||||
#. 10.20.0.0/24: management or internal network, for communication between Puppet master and Puppet clients, as well as PXE/TFTP/DHCP for Cobbler
|
||||
#. 192.168.0.0/24: public network, for the High Availability (HA) Virtual IP (VIP), as well as floating IPs assigned to OpenStack guest VMs
|
||||
#. 10.20.1.0/24: private network, fixed IPs automatically assigned to guest VMs by OpenStack upon their creation
|
||||
#. Once you've finished editing, choose 9 to save your changes and exit the menu.
|
||||
|
||||
Please note: Even though defaults are shown, you must set actual values; if you simply press "enter" you will wind up with empty values.
|
||||
|
||||
To re-enter the menu at any time, type::
|
||||
|
||||
bootstrap_admin_node.sh
|
||||
|
||||
|
||||
|
||||
|
||||
Next we need to allocate a static IP address from the internal network
|
||||
to eth0 for fuel-pm, and eth1 for the controller, compute, and (if necessary) quantum
|
||||
nodes. For High Availability (HA) we must choose and assign an IP
|
||||
address from the public network to HAProxy running on the controllers.
|
||||
You can configure network addresses/network mask according to your
|
||||
needs, but our instructions will assume the following network settings
|
||||
on the interfaces:
|
||||
|
||||
|
||||
|
||||
#. eth0: internal management network, where each machine will have a static IP address
|
||||
|
||||
* 10.20.0.100 for Puppet Master
|
||||
* 10.20.0.101-10.0.0.103 for the controller nodes
|
||||
* 10.20.0.110-10.0.0.126 for the compute nodes
|
||||
* 10.20.0.10 internal Virtual IP for component access
|
||||
* 255.255.255.0 network mask
|
||||
|
||||
#. eth1: public network
|
||||
|
||||
* 192.168.0.10 public Virtual IP for access to the Horizon GUI (OpenStack management interface)
|
||||
|
||||
#. eth2: for communication between OpenStack VMs without IP address with promiscuous mode enabled.
|
||||
|
||||
|
||||
|
||||
If you are on VirtualBox, please create or make sure the following
|
||||
hostonly adapters exist and are configured correctly:
|
||||
|
||||
|
||||
|
||||
|
||||
If you are on VirtualBox, create the following adapters:
|
||||
|
||||
* VirtualBox -> Preferences...
|
||||
* Network -> Add HostOnly Adapter (vboxnet0)
|
||||
* IPv4 Address: 10.20.0.1
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
* Network -> Add HostOnly Adapter (vboxnet1)
|
||||
* IPv4 Address: 10.20.1.1
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
* Network -> Add HostOnly Adapter (vboxnet2)
|
||||
* IPv4 Address: 0.0.0.0
|
||||
* IPv4 Network Mask: 255.255.255.0
|
||||
* DHCP server: disabled
|
||||
|
||||
After creating this interface, reboot the host machine to make sure that
|
||||
DHCP isn't running in the background.
|
||||
|
||||
Installing on Windows isn't recommended, but if you're attempting it,
|
||||
you will also need to set up the IP address & network mask under
|
||||
Control Panel > Network and Internet > Network and Sharing Center for the
|
||||
Virtual HostOnly Network adapter.
|
||||
|
||||
|
||||
Creating fuel-pm on a Physical Machine
|
||||
--------------------------------------
|
||||
|
||||
If you plan to provision the Puppet master on hardware, you need to
|
||||
create a bootable DVD or USB disk from the downloaded ISO, then make
|
||||
sure that you can boot your server from the DVD or USB drive.
|
||||
|
||||
|
||||
Creating fuel-pm on a Virtual Machine
|
||||
-------------------------------------
|
||||
|
||||
The process of creating a virtual machine to host Fuel in VirtualBox depends on
|
||||
whether your deployment is purely virtual or consists of a virtual
|
||||
fuel-pm controlling physical hardware. If your deployment is purely
|
||||
virtual then Adapter 1 may be a Hostonly adapter attached to
|
||||
vboxnet0, but if your deployment infrastructure consists of a virtual
|
||||
fuel-pm controlling physical machines Adapter 1 must be a Bridged
|
||||
Adapter, connected to whatever network interface of the host machine
|
||||
is connected to your physical machines.
|
||||
|
||||
Start up VirtualBox and create a new machine as follows:
|
||||
|
||||
* Machine -> New...
|
||||
|
||||
* Name: fuel-pm
|
||||
* Type: Linux
|
||||
* Version: Red Hat (32 or 64 Bit)
|
||||
* Memory: 2048 MB
|
||||
* Drive space: 16 GB HDD
|
||||
|
||||
* Machine -> Settings... -> Network
|
||||
|
||||
* Adapter 1
|
||||
|
||||
* Physical network
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: The host machine's network with access to the network on which the physical machines reside
|
||||
* VirtualBox installation
|
||||
* Enable Network Adapter
|
||||
* Attached to: Hostonly Adapter
|
||||
* Name: vboxnet0
|
||||
|
||||
* Adapter 2
|
||||
|
||||
* Enable Network Adapter
|
||||
* Attached to: Bridged Adapter
|
||||
* Name: eth0 (or whichever physical network is attached to the Internet)
|
||||
|
||||
* Machine -> Storage
|
||||
|
||||
* Attach the downloaded ISO as a drive
|
||||
|
||||
|
||||
If you can't (or would rather not) install from the ISO, you can find instructions for installing from the Fuel Library in :ref:`Appendix A <Create-PM>`.
|
||||
|
||||
|
||||
|
||||
|
@ -1,5 +0,0 @@
|
||||
Installing Fuel from the ISO
|
||||
----------------------------
|
||||
|
||||
Start the new machine to install the ISO. The only real installation decision you will need to make is to specify the interface through which the installer can access the Internet. Choose eth1, as it's connected to the Internet-connected interface.
|
||||
|
@ -1,36 +0,0 @@
|
||||
Configuring fuel-pm from the ISO installation
|
||||
---------------------------------------------
|
||||
|
||||
Once fuel-pm finishes installing, you'll be presented with a basic menu. You can use this menu to set the basic information Fuel will need to configure your installation. You can customize these steps for your own situation, of course, but here are the steps to take for the example installation:
|
||||
|
||||
#. To set the fully-qualified domain name for the master node and cloud domain, choose 1.
|
||||
|
||||
* Type ``fuel-pm`` for the hostname.
|
||||
* Set your own domain name.
|
||||
|
||||
#. To configure the management interface, choose 2.
|
||||
|
||||
* The example specifies eth0 as the internal, or management interface, so enter that.
|
||||
* The management network in the example is using static IP addresses, so specify no for for using DHCP.
|
||||
* Enter the IP address of 10.20.0.100 for the Puppet Master, and the netmask of 255.255.255.0.
|
||||
* Set the gateway and DNS servers if desired.
|
||||
|
||||
#. To configure the external interface, which will be used to send traffic to and from the internet, choose 3. Set the interface to eth1. By default, this interface uses DHCP, which is what the example calls for.
|
||||
|
||||
#. To choose the start and end addresses to be used during PXE boot, choose 4. In the case of this example, the start address is 10.20.0.110 and the end address is 10.20.0.126. Later, these notes will receive IP addresses from Cobbler.
|
||||
|
||||
Future versions of Fuel will enable you to choose a custom set of repositories.
|
||||
|
||||
Please note: You must set actual values; if you simply press "enter" you will wind up with empty values.
|
||||
|
||||
5. Once you've finished editing, choose 6 to save your changes and exit the menu.
|
||||
|
||||
To re-enter the menu at any time, type::
|
||||
|
||||
bootstrap_admin_node.sh
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,7 +1,15 @@
|
||||
.. _Install-OS-Using-Fuel:
|
||||
|
||||
Installing the OS using Fuel
|
||||
----------------------------
|
||||
|
||||
The first step in creating the actual OpenStack nodes is to let Fuel's Cobbler kickstart and preseed files assist in the installation of operating systems on the target servers.
|
||||
|
||||
|
||||
.. _Configuring-Cobbler:
|
||||
|
||||
Configuring Cobbler
|
||||
-------------------
|
||||
Configuring Cobbler with config.yaml
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Fuel uses a single file, ``config.yaml``, to both configure Cobbler and assist in the configuration of the ``site.pp`` file. This file appears in the ``/root`` directory when the master node (fuel-pm) is provisioned and configured.
|
||||
|
||||
@ -10,16 +18,16 @@ You'll want to configure this example for your own situation, but the example lo
|
||||
common:
|
||||
orchestrator_common:
|
||||
attributes:
|
||||
deployment_mode: ha_compute
|
||||
deployment_mode: ha_compact
|
||||
deployment_engine: simplepuppet
|
||||
task_uuid: deployment_task
|
||||
|
||||
Possible values for ``deployment_mode`` are ``singlenode_compute``, ``multinode_compute``, ``ha_compute``, ``ha_compact``, ``ha_full``, and ``ha_minimal``. Change the ``deployment_mode`` to ``ha_compute`` to tell Fuel to use HA architecture. The ``simplepuppet`` ``deployment_engine`` means that the orchestrator will be calling Puppet on each of the nodes.
|
||||
Possible values for ``deployment_mode`` are ``singlenode_compute``, ``multinode_compute``, ``ha_compute``, ``ha_compact``, ``ha_full``, and ``ha_minimal``. Change the ``deployment_mode`` to ``ha_compact`` to tell Fuel to use HA architecture. Specifying the ``simplepuppet`` deployment engine means that the orchestrator will be calling Puppet on each of the nodes.
|
||||
|
||||
Next you'll need to set OpenStack's networking information::
|
||||
|
||||
openstack_common:
|
||||
internal_virtual_ip: 10.20.0.10
|
||||
internal_virtual_ip: 10.0.0.10
|
||||
public_virtual_ip: 192.168.0.10
|
||||
create_networks: true
|
||||
fixed_range: 172.16.0.0/16
|
||||
@ -31,7 +39,7 @@ Change the virtual IPs to match the target networks, and set the fixed and float
|
||||
nv_physical_volumes:
|
||||
- /dev/sdb
|
||||
|
||||
By setting the ``nv_physical_volumes`` value, you are not only telling OpenStack to use this value for Cinder (you'll see more about that in the ``site.pp`` file), you're also telling Fuel where Cinder should be storing data.
|
||||
By setting the ``nv_physical_volumes`` value, you are not only telling OpenStack to use this value for Cinder (you'll see more about that in the ``site.pp`` file), but also where Cinder should store its data.
|
||||
|
||||
Later, we'll set up a new partition for Cinder, so tell Cobbler to create it here. ::
|
||||
|
||||
@ -41,9 +49,15 @@ Later, we'll set up a new partition for Cinder, so tell Cobbler to create it her
|
||||
pool_start: 192.168.0.110
|
||||
pool_end: 192.168.0.126
|
||||
|
||||
Set the ``public_net_router`` to point to the real router at the public network. The ``ext_bridge`` is the ip of the Quantum bridge. It should assigned to any available free IP on the public network that's outside the floating range. You also have the option to simply set it to ``0.0.0.0``. The ``pool_start`` and ``pool_end`` values represent the public addresses of your nodes, and should be within the ``floating_range``. ::
|
||||
Set the ``public_net_router`` to point to the real router at the public network. The ``ext_bridge`` is the IP of the Quantum bridge. It should assigned to any available free IP on the public network that's outside the floating range. You also have the option to simply set it to ``0.0.0.0``. The ``pool_start`` and ``pool_end`` values represent the public addresses of your nodes, and should be within the ``floating_range``. ::
|
||||
|
||||
segment_range: 900:999
|
||||
network_manager: nova.network.manager.FlatDHCPManager
|
||||
auto_assign_floating_ip: true
|
||||
quantum_netnode_on_cnt: true
|
||||
|
||||
Fuel provides two choices for your network manager: FlatDHCPManager, and VlanManager. By default, the system uses FlatDHCPManager. Here you can see that we're also telling OpenStack to automatically assing a floating IP to an instance when it's created, and to put the Quantum services on the controllers rather than a sepearate node. ::
|
||||
|
||||
use_syslog: false
|
||||
syslog_server: 127.0.0.1
|
||||
mirror_type: default
|
||||
@ -66,79 +80,82 @@ Depending on how you've set up your network, you can either set the ``default_ga
|
||||
nagios_master: fuel-controller-01.localdomain
|
||||
loopback: loopback
|
||||
cinder: true
|
||||
cinder_nodes: [ 'controller' ]
|
||||
cinder_nodes:
|
||||
- controller
|
||||
swift: true
|
||||
|
||||
The loopback setting determines how Swift stores data. If you set the value to ``loopback``, Swift will use 1gb files as storage devices. If you tuned Cobbler to create a partition for Swift and mounted it to ``/srv/nodes/``, then you should set ``loopback`` to ``false``.
|
||||
|
||||
In this example, you're using Cinder and including it on the compute nodes, so note that appropriately. Also, you're using Swift, so turn that on here. ::
|
||||
|
||||
repo_proxy: http://10.20.0.100:3128
|
||||
repo_proxy: http://10.0.0.100:3128
|
||||
|
||||
One improvement in Fuel 2.1 was the ability for the master node to cache downloads in order to speed up installs; by default the ``repo_proxy`` is set to point to fuel-pm in order to let that happen. ::
|
||||
One improvement in Fuel 2.1 was the ability for the master node to cache downloads in order to speed up installs; by default the ``repo_proxy`` is set to point to fuel-pm in order to let that happen. One consequence of that is that your deployment will actually go faster if you let one install complete, then do all the others, rather than running all of them concurrently. ::
|
||||
|
||||
deployment_id: '53'
|
||||
|
||||
Fuel enables you to manage multiple clusters; setting the ``deployment_id`` will let Fuel know which deployment you're working with. ::
|
||||
|
||||
dns_nameservers:
|
||||
- 10.20.0.100
|
||||
- 10.0.0.100
|
||||
- 8.8.8.8
|
||||
|
||||
The slave nodes should first look to the master node for DNS, so mark that as your first nameserver.
|
||||
|
||||
The next step is to define the nodes themselves. To do that, you'll list each node once for each role that needs to be installed. ::
|
||||
The next step is to define the nodes themselves. To do that, you'll list each node once for each role that needs to be installed. Note that by default the first node is called ``fuel-cobbler``; change it to ``fuel-pm``. ::
|
||||
|
||||
nodes:
|
||||
- name: fuel-pm
|
||||
role: cobbler
|
||||
internal_address: 10.20.0.100
|
||||
internal_address: 10.0.0.100
|
||||
public_address: 192.168.0.100
|
||||
- name: fuel-controller-01
|
||||
role: controller
|
||||
internal_address: 10.20.0.101
|
||||
internal_address: 10.0.0.101
|
||||
public_address: 192.168.0.101
|
||||
swift_zone: 1
|
||||
- name: fuel-controller-02
|
||||
role: controller
|
||||
internal_address: 10.20.0.102
|
||||
internal_address: 10.0.0.102
|
||||
public_address: 192.168.0.102
|
||||
swift_zone: 2
|
||||
- name: fuel-controller-03
|
||||
role: controller
|
||||
internal_address: 10.20.0.103
|
||||
internal_address: 10.0.0.103
|
||||
public_address: 192.168.0.103
|
||||
swift_zone: 3
|
||||
- name: fuel-controller-01
|
||||
role: quantum
|
||||
internal_address: 10.20.0.101
|
||||
internal_address: 10.0.0.101
|
||||
public_address: 192.168.0.101
|
||||
- name: fuel-compute-01
|
||||
role: compute
|
||||
internal_address: 10.20.0.110
|
||||
internal_address: 10.0.0.110
|
||||
public_address: 192.168.0.110
|
||||
|
||||
Notice that each node is listed multiple times; this is because each node fulfills multiple roles.
|
||||
Notice that each node can be listed multiple times; this is because each node fulfills multiple roles. Notice also that the IP address for fuel-compute-01 is *.110, not *.105.
|
||||
|
||||
The ``cobbler_common`` section applies to all machines::
|
||||
|
||||
cobbler_common:
|
||||
# for Centos
|
||||
profile: "centos63_x86_64"
|
||||
profile: "centos64_x86_64"
|
||||
# for Ubuntu
|
||||
# profile: "ubuntu_1204_x86_64"
|
||||
|
||||
Fuel can install CentOS or Ubuntu on your servers, or you can add a profile of your own. By default, ``config.yaml`` uses Ubuntu, but for our example we'll use CentOS. ::
|
||||
Fuel can install CentOS or Ubuntu on your servers, or you can add a profile of your own. By default, ``config.yaml`` uses CentOS. ::
|
||||
|
||||
netboot-enabled: "1"
|
||||
# for Ubuntu
|
||||
# ksmeta: "puppet_version=2.7.19-1puppetlabs2 \
|
||||
# for Centos
|
||||
name-servers: "10.20.0.100"
|
||||
name-servers: "10.0.0.100"
|
||||
name-servers-search: "localdomain"
|
||||
gateway: 10.20.0.100
|
||||
gateway: 192.168.0.1
|
||||
|
||||
Set the default nameserver to be fuel-pm, and change the domain name to your own domain name. Set the ``gateway`` to the public network's default gateway. Alternatively, if you don't plan to use your public networks actual gateway, you can set this value to be the IP address of the master node. **Please note:** You must specify a working gateway (or proxy) in order to install OpenStack, because the system will need to communicate with public repositories. ::
|
||||
Set the default nameserver to be fuel-pm, and change the domain name to your own domain name. Set the ``gateway`` to the public network's default gateway. Alternatively, if you don't plan to use your public networks actual gateway, you can set this value to be the IP address of the master node.
|
||||
|
||||
**Please note:** You must specify a working gateway (or proxy) in order to install OpenStack, because the system will need to communicate with public repositories. ::
|
||||
|
||||
ksmeta: "puppet_version=2.7.19-1puppetlabs2 \
|
||||
puppet_auto_setup=1 \
|
||||
@ -150,7 +167,7 @@ Change the fully-qualified domain name for the Puppet Master to reflect your own
|
||||
ntp_enable=1 \
|
||||
mco_auto_setup=1 \
|
||||
mco_pskey=un0aez2ei9eiGaequaey4loocohjuch4Ievu3shaeweeg5Uthi \
|
||||
mco_stomphost=10.20.0.100 \
|
||||
mco_stomphost=10.0.0.100 \
|
||||
|
||||
Make sure the ``mco_stomphost`` is set for the master node so that the orchestrator can find the nodes. ::
|
||||
|
||||
@ -170,7 +187,7 @@ Next you'll define the actual servers. ::
|
||||
eth0:
|
||||
mac: "08:00:27:BD:3A:7D"
|
||||
static: "1"
|
||||
ip-address: "10.20.0.101"
|
||||
ip-address: "10.0.0.101"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-controller-01.localdomain"
|
||||
management: "1"
|
||||
@ -198,10 +215,10 @@ Also, make sure the ``ip-address`` is correct, and that the ``dns-name`` has you
|
||||
|
||||
In this example, IP addresses should be assigned as follows::
|
||||
|
||||
fuel-controller-01: 10.20.0.101
|
||||
fuel-controller-02: 10.20.0.102
|
||||
fuel-controller-03: 10.20.0.103
|
||||
fuel-compute-01: 10.20.0.110
|
||||
fuel-controller-01: 10.0.0.101
|
||||
fuel-controller-02: 10.0.0.102
|
||||
fuel-controller-03: 10.0.0.103
|
||||
fuel-compute-01: 10.0.0.110
|
||||
|
||||
Repeat this step for each of the other controllers, and for the compute node. Note that the compute node has its own role::
|
||||
|
||||
@ -212,7 +229,7 @@ Repeat this step for each of the other controllers, and for the compute node. N
|
||||
eth0:
|
||||
mac: "08:00:27:AE:A9:6E"
|
||||
static: "1"
|
||||
ip-address: "10.20.0.110"
|
||||
ip-address: "10.0.0.110"
|
||||
netmask: "255.255.255.0"
|
||||
dns-name: "fuel-compute-01.localdomain"
|
||||
management: "1"
|
||||
@ -233,8 +250,8 @@ Repeat this step for each of the other controllers, and for the compute node. N
|
||||
peerdns: "no"
|
||||
|
||||
|
||||
Load the configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
Loading the configuration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Once you've completed the changes to ``config.yaml``, you need to load the information into Cobbler. To do that, use the ``cobbler_system`` script::
|
||||
|
||||
@ -242,4 +259,39 @@ Once you've completed the changes to ``config.yaml``, you need to load the infor
|
||||
|
||||
Now you're ready to start spinning up the controllers and compute nodes.
|
||||
|
||||
Installing the operating system
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Now that Cobbler has the correct configuration, the only thing you
|
||||
need to do is to PXE-boot your nodes. This means that they will boot over the network, with
|
||||
DHCP/TFTP provided by Cobbler, and will be provisioned accordingly,
|
||||
with the specified operating system and configuration.
|
||||
|
||||
If you installed Fuel from the ISO, start fuel-controller-01 first and let the installation finish before starting the other nodes; Fuel will cache the downloads so subsequent installs will go faster.
|
||||
|
||||
The process for each node looks like this:
|
||||
|
||||
|
||||
#. Start the VM.
|
||||
#. Press F12 immediately and select l (LAN) as a bootable media.
|
||||
#. Wait for the installation to complete.
|
||||
#. Log into the new machine using root/r00tme.
|
||||
#. **Change the root password.**
|
||||
#. Check that networking is set up correctly and the machine can reach the Internet::
|
||||
|
||||
ping fuel-pm.localdomain
|
||||
ping www.mirantis.com
|
||||
|
||||
If you're unable to ping outside addresses, add the fuel-pm server as a default gateway::
|
||||
|
||||
route add default gw 10.0.0.100
|
||||
|
||||
**It is important to note** that if you use VLANs in your network
|
||||
configuration, you always have to keep in mind the fact that PXE
|
||||
booting does not work on tagged interfaces. Therefore, all your nodes,
|
||||
including the one where the Cobbler service resides, must share one
|
||||
untagged VLAN (also called native VLAN). If necessary, you can use the
|
||||
``dhcp_interface`` parameter of the ``cobbler::server`` class to bind the DHCP
|
||||
service to the appropriate interface.
|
||||
|
||||
|
||||
|
@ -1,40 +0,0 @@
|
||||
.. _Install-OS-Using-Fuel:
|
||||
|
||||
Installing the OS using Fuel
|
||||
----------------------------
|
||||
|
||||
The first step in creating the actual OpenStack nodes is to let Fuel's Cobbler kickstart and preseed files assist in the installation of operating systems on the target servers.
|
||||
|
||||
Now that Cobbler has the correct configuration, the only thing you
|
||||
need to do is to PXE-boot your nodes. This means that they will boot over the network, with
|
||||
DHCP/TFTP provided by Cobbler, and will be provisioned accordingly,
|
||||
with the specified operating system and configuration.
|
||||
|
||||
If you installed Fuel from the ISO, start fuel-controller-01 first and let the installation finish before starting the other nodes; Fuel will cache the downloads so subsequent installs will go faster.
|
||||
|
||||
The process for each node looks like this:
|
||||
|
||||
|
||||
#. Start the VM.
|
||||
#. Press F12 immediately and select l (LAN) as a bootable media.
|
||||
#. Wait for the installation to complete.
|
||||
#. Log into the new machine using root/r00tme.
|
||||
#. **Change the root password.**
|
||||
#. Check that networking is set up correctly and the machine can reach the Internet::
|
||||
|
||||
ping fuel-pm.localdomain
|
||||
ping www.mirantis.com
|
||||
|
||||
If you're unable to ping outside addresses, add the fuel-pm server as a default gateway::
|
||||
|
||||
route add default gw 10.20.0.100
|
||||
|
||||
**It is important to note** that if you use VLANs in your network
|
||||
configuration, you always have to keep in mind the fact that PXE
|
||||
booting does not work on tagged interfaces. Therefore, all your nodes,
|
||||
including the one where the Cobbler service resides, must share one
|
||||
untagged VLAN (also called native VLAN). If necessary, you can use the
|
||||
``dhcp_interface`` parameter of the ``cobbler::server`` class to bind the DHCP
|
||||
service to the appropriate interface.
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
Preparing for deployment
|
||||
------------------------
|
||||
Generating the Puppet manifest
|
||||
------------------------------
|
||||
|
||||
Before you can deploy OpenStack, you will need to configure the site.pp file. While previous versions of Fuel required you to manually configure ``site.pp``, version 2.1 includes the ``openstack_system`` script, which uses both the ``config.yaml`` and template files for the various reference architectures to create the appropriate Puppet manifest. To create ``site.pp``, execute this command::
|
||||
|
||||
|
@ -37,7 +37,7 @@ In this case, we don't need to make any changes to the interface
|
||||
settings, because they match what we've already set up. ::
|
||||
|
||||
# Public and Internal VIPs. These virtual addresses are required by HA topology and will be managed by keepalived.
|
||||
$internal_virtual_ip = '10.20.0.10'
|
||||
$internal_virtual_ip = '10.0.0.10'
|
||||
|
||||
# Change this IP to IP routable from your 'public' network,
|
||||
# e. g. Internet or your office LAN, in which your public
|
||||
@ -54,26 +54,26 @@ The next section sets up the servers themselves. If you are setting up Fuel man
|
||||
{
|
||||
'name' => 'fuel-pm',
|
||||
'role' => 'cobbler',
|
||||
'internal_address' => '10.20.0.100',
|
||||
'internal_address' => '10.0.0.100',
|
||||
'public_address' => '192.168.0.100',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.20.0.100',
|
||||
'storage_local_net_ip' => '10.0.0.100',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-controller-01',
|
||||
'role' => 'primary-controller',
|
||||
'internal_address' => '10.20.0.101',
|
||||
'internal_address' => '10.0.0.101',
|
||||
'public_address' => '192.168.0.101',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.20.0.101',
|
||||
'storage_local_net_ip' => '10.0.0.101',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-controller-02',
|
||||
'role' => 'controller',
|
||||
'internal_address' => '10.20.0.102',
|
||||
'internal_address' => '10.0.0.102',
|
||||
'public_address' => '192.168.0.102',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.20.0.102',
|
||||
'storage_local_net_ip' => '10.0.0.102',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-controller-03',
|
||||
@ -81,7 +81,7 @@ The next section sets up the servers themselves. If you are setting up Fuel man
|
||||
'internal_address' => '10.0.0.105',
|
||||
'public_address' => '192.168.0.105',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.20.0.105',
|
||||
'storage_local_net_ip' => '10.0.0.105',
|
||||
},
|
||||
{
|
||||
'name' => 'fuel-compute-01',
|
||||
@ -89,7 +89,7 @@ The next section sets up the servers themselves. If you are setting up Fuel man
|
||||
'internal_address' => '10.0.0.106',
|
||||
'public_address' => '192.168.0.106',
|
||||
'mountpoints'=> "1 1\n2 1",
|
||||
'storage_local_net_ip' => '10.20.0.106',
|
||||
'storage_local_net_ip' => '10.0.0.106',
|
||||
}
|
||||
]
|
||||
|
||||
@ -97,36 +97,32 @@ Because this section comes from a template, it will likely include a number of s
|
||||
|
||||
Next the ``site.pp`` file lists all of the nodes and roles you defined in the ``config.yaml`` file::
|
||||
|
||||
$nodes = [{'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
|
||||
'controller','internal_address' => '10.20.0.101'},
|
||||
{'public_address' => '10.20.1.102','name' => 'fuel-controller-02','role' =>
|
||||
'controller','internal_address' => '10.20.0.102'},
|
||||
{'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
|
||||
'compute','internal_address' => '10.20.0.101'},
|
||||
{'public_address' => '10.20.1.102','name' => 'fuel-controller-02','role' =>
|
||||
'compute','internal_address' => '10.20.0.102'},
|
||||
{'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
|
||||
'storage','internal_address' => '10.20.0.101'},
|
||||
{'public_address' => '10.20.1.102','name' => 'fuel-controller-02','role' =>
|
||||
'storage','internal_address' => '10.20.0.102'},
|
||||
{'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
|
||||
'swift-proxy','internal_address' => '10.20.0.101'},
|
||||
{'public_address' => '10.20.1.102','name' => 'fuel-controller-02','role' =>
|
||||
'swift-proxy','internal_address' => '10.20.0.102'},
|
||||
{'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
|
||||
'quantum','internal_address' => '10.20.0.101'}]
|
||||
$nodes = [{'public_address' => '192.168.0.101','name' => 'fuel-controller-01','role' =>
|
||||
'primary-controller','internal_address' => '10.0.0.101',
|
||||
'storage_local_net_ip' => '10.0.0.101', 'mountpoints' => '1 2\n2 1',
|
||||
'swift-zone' => 1 },
|
||||
{'public_address' => '192.168.0.102','name' => 'fuel-controller-02','role' =>
|
||||
'controller','internal_address' => '10.0.0.102',
|
||||
'storage_local_net_ip' => '10.0.0.102', 'mountpoints' => '1 2\n2 1',
|
||||
'swift-zone' => 2},
|
||||
{'public_address' => '192.168.0.103','name' => 'fuel-controller-03','role' =>
|
||||
'storage','internal_address' => '10.0.0.103',
|
||||
'storage_local_net_ip' => '10.0.0.103', 'mountpoints' => '1 2\n2 1',
|
||||
'swift-zone' => 3},
|
||||
{'public_address' => '192.168.0.110','name' => 'fuel-compute-01','role' =>
|
||||
'compute','internal_address' => '10.0.0.110'}]
|
||||
|
||||
Possible roles include ‘compute’, ‘controller’, ‘storage’, ‘swift-proxy’, ‘quantum’, ‘master’, and ‘cobbler’. Compute nodes cannot be described because it is required for them to disable network configuration. Alternatively, you can force DHCP configuration to ensure proper configuration of IP addresses, default gateways, and DNS servers. IMPORTANT: DNS servers must contain information about all nodes of the cluster. At the time of deployment of the cluster in a standard scenario, the cobbler node contains this information.
|
||||
Possible roles include ‘compute’, ‘controller’, ‘primary-controller’, ‘storage’, ‘swift-proxy’, ‘quantum’, ‘master’, and ‘cobbler’. Check the IP addresses for each node and make sure that they mesh with what's in this array.
|
||||
|
||||
The file also specifies the default gateway to be the fuel-pm machine::
|
||||
|
||||
$default_gateway = '10.20.0.10'
|
||||
$default_gateway = '192.168.0.1'
|
||||
|
||||
Next ``site.pp`` defines DNS servers and provides netmasks::
|
||||
|
||||
# Specify nameservers here.
|
||||
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
|
||||
$dns_nameservers = ['10.20.0.10','8.8.8.8']
|
||||
$dns_nameservers = ['10.0.0.100','8.8.8.8']
|
||||
|
||||
# Specify netmasks for internal and external networks.
|
||||
$internal_netmask = '255.255.255.0'
|
||||
@ -138,7 +134,7 @@ Next ``site.pp`` defines DNS servers and provides netmasks::
|
||||
$ha_provider = 'pacemaker'
|
||||
$use_unicast_corosync = false
|
||||
|
||||
Next specify the main controller. ::
|
||||
Next specify the main controller as the Nagios master. ::
|
||||
|
||||
# Set nagios master fqdn
|
||||
$nagios_master = 'fuel-controller-01.localdomain'
|
||||
@ -231,17 +227,18 @@ These values don't actually relate to Quantum; they are used by nova-network. I
|
||||
# Which IP address will be used for creating GRE tunnels.
|
||||
$quantum_gre_bind_addr = $internal_address
|
||||
|
||||
#Which IP have Quantum network node?
|
||||
$quantum_net_node_hostname = 'fuel-controller-03'
|
||||
$quantum_net_node_address = $controller_internal_addresses[$quantum_net_node_hostname]
|
||||
|
||||
If you are installing Quantum in non-HA mode, you will need to specify which single controller controls Quantum. ::
|
||||
|
||||
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
|
||||
# the first address will be defined as an external default router,
|
||||
# the second address will be attached to an uplink bridge interface,
|
||||
# the remaining addresses will be utilized for the floating IP address pool.
|
||||
$external_ipinfo = {'pool_start' => '192.168.56.30','public_net_router' => '192.168.0.1', 'pool_end' => '192.168.56.60','ext_bridge' => '192.168.0.1'}
|
||||
$external_ipinfo = {
|
||||
'pool_start' => '192.168.0.115',
|
||||
'public_net_router' => '192.168.0.1',
|
||||
'pool_end' => '192.168.0.126',
|
||||
'ext_bridge' => '0.0.0.0'
|
||||
}
|
||||
|
||||
# Quantum segmentation range.
|
||||
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
|
||||
@ -274,7 +271,7 @@ The remaining configuration is used to define classes that will be added to each
|
||||
stage {'netconfig':
|
||||
before => Stage['main'],
|
||||
}
|
||||
class {'l23network': stage=> 'netconfig'}
|
||||
class {'l23network': use_ovs => $quantum, stage=> 'netconfig'}
|
||||
class node_netconfig (
|
||||
$mgmt_ipaddr,
|
||||
$mgmt_netmask = '255.255.255.0',
|
||||
@ -359,9 +356,7 @@ We want Cinder to be on the controller nodes, so set this value to ``['controlle
|
||||
$manage_volumes = true
|
||||
|
||||
# Setup network interface, which Cinder uses to export iSCSI targets.
|
||||
# This interface defines which IP to use to listen on iscsi port for
|
||||
# incoming connections of initiators
|
||||
$cinder_iscsi_bind_iface = $internal_int
|
||||
$cinder_iscsi_bind_addr = $internal_address
|
||||
|
||||
|
||||
|
||||
@ -380,6 +375,23 @@ and specify that rather than ``$internal_int``. ::
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
|
||||
$nv_physical_volume = ['/dev/sdb']
|
||||
|
||||
#Evaluate cinder node selection
|
||||
if ($cinder) {
|
||||
if (member($cinder_nodes,'all')) {
|
||||
$is_cinder_node = true
|
||||
} elsif (member($cinder_nodes,$::hostname)) {
|
||||
$is_cinder_node = true
|
||||
} elsif (member($cinder_nodes,$internal_address)) {
|
||||
$is_cinder_node = true
|
||||
} elsif ($node[0]['role'] =~ /controller/)) {
|
||||
$is_cinder_node = member($cinder_nodes, 'controller')
|
||||
} else {
|
||||
$is_cinder_node = member($cinder_nodes, $node[0]['role'])
|
||||
}
|
||||
} else {
|
||||
$is_cinder_node = false
|
||||
}
|
||||
|
||||
### CINDER/VOLUME END ###
|
||||
|
||||
@ -405,8 +417,7 @@ Enabling Glance and Swift
|
||||
|
||||
There aren't many changes that you will need to make to the default
|
||||
configuration in order to enable Swift to work properly in Swift
|
||||
Compact mode, but you will need to adjust for the fact that we are
|
||||
running Swift on physical partitions ::
|
||||
Compact mode, but you will need to adjust if you want to run Swift on physical partitions ::
|
||||
|
||||
|
||||
...
|
||||
@ -420,7 +431,7 @@ running Swift on physical partitions ::
|
||||
# set 'loopback' or false
|
||||
# This parameter controls where swift partitions are located:
|
||||
# on physical partitions or inside loopback devices.
|
||||
$swift_loopback = false
|
||||
$swift_loopback = loopback
|
||||
|
||||
The default value is ``loopback``, which tells Swift to use a loopback storage device, which is basically a file that acts like a drive, rather than an actual physical drive. You can also set this value to ``false``, which tells OpenStack to use a physical file instead. ::
|
||||
|
||||
@ -509,7 +520,7 @@ To tell Fuel to download packages from external repos provided by Mirantis and y
|
||||
# though it is NOT recommended.
|
||||
$mirror_type = 'default'
|
||||
$enable_test_repo = false
|
||||
$repo_proxy = 'http://10.20.0.100:3128'
|
||||
$repo_proxy = 'http://10.0.0.100:3128'
|
||||
|
||||
Once again, the ``$mirror_type`` **must** be set to ``default``. If you set it correctly in ``config.yaml`` and ran ``openstack_system`` this will already be taken care of. Otherwise, **make sure** to set this value yourself.
|
||||
|
||||
@ -548,6 +559,7 @@ There are two hashes describing these limits: ``$nova_rate_limits`` and ``$cinde
|
||||
'PUT' => 1000, 'GET' => 1000,
|
||||
'DELETE' => 1000
|
||||
}
|
||||
...
|
||||
|
||||
|
||||
Enabling Horizon HTTPS/SSL mode
|
||||
@ -555,6 +567,7 @@ Enabling Horizon HTTPS/SSL mode
|
||||
|
||||
Using the ``$horizon_use_ssl`` variable, you have the option to decide whether the OpenStack dashboard (Horizon) uses HTTP or HTTPS::
|
||||
|
||||
...
|
||||
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
|
||||
$horizon_use_ssl = false
|
||||
|
||||
@ -592,9 +605,8 @@ Defining the node configurations
|
||||
|
||||
Now that we've set all of the global values, its time to make sure that
|
||||
the actual node definitions are correct. For example, by default all
|
||||
nodes will enable Cinder on ``/dev/sdb``, but we don't want that for the
|
||||
controllers, so set ``nv_physical_volume`` to ``null``, and ``manage_volumes`` to ``false``. ::
|
||||
|
||||
nodes will enable Cinder on ``/dev/sdb``. If you didn't want that for all
|
||||
controllers, you could set ``nv_physical_volume`` to ``null`` for a specific node or nodes. ::
|
||||
|
||||
|
||||
...
|
||||
@ -608,13 +620,6 @@ controllers, so set ``nv_physical_volume`` to ``null``, and ``manage_volumes`` t
|
||||
public_interface => $public_int,
|
||||
internal_interface => $internal_int,
|
||||
...
|
||||
manage_volumes => false,
|
||||
galera_nodes => $controller_hostnames,
|
||||
nv_physical_volume => null,
|
||||
use_syslog => $use_syslog,
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
cinder_rate_limits => $cinder_rate_limits,
|
||||
horizon_use_ssl => $horizon_use_ssl,
|
||||
use_unicast_corosync => $use_unicast_corosync,
|
||||
ha_provider => $ha_provider
|
||||
}
|
||||
@ -629,16 +634,8 @@ controllers, so set ``nv_physical_volume`` to ``null``, and ``manage_volumes`` t
|
||||
|
||||
|
||||
|
||||
Fortunately, Fuel includes a class for the controllers, so you don't
|
||||
have to make these changes for each individual controller. As you can
|
||||
see, the controllers generally use the global values, but in this case
|
||||
you're telling the controllers not to manage_volumes, and not to use
|
||||
``/dev/sdb`` for Cinder.
|
||||
|
||||
|
||||
|
||||
If you look down a little further, this class then goes on to help
|
||||
specify the individual controllers and compute nodes::
|
||||
Fortunately, as you can see here, Fuel includes a class for the controllers, so you don't
|
||||
have to make global changes for each individual controller. If you look down a little further, this class then goes on to help specify the individual controllers and compute nodes::
|
||||
|
||||
|
||||
...
|
||||
@ -680,15 +677,16 @@ specify the individual controllers and compute nodes::
|
||||
class { 'openstack::swift::proxy':
|
||||
swift_user_password => $swift_user_password,
|
||||
swift_proxies => $swift_proxies,
|
||||
primary_proxy => $primary_proxy,
|
||||
controller_node_address => $internal_virtual_ip,
|
||||
swift_local_net_ip => $internal_address,
|
||||
...
|
||||
rabbit_ha_virtual_ip => $internal_virtual_ip,
|
||||
}
|
||||
}
|
||||
|
||||
Notice also that each controller has the swift_zone specified, so each
|
||||
of the three controllers can represent each of the three Swift zones.
|
||||
|
||||
Similarly, site.pp defines a class for the compute nodes.
|
||||
|
||||
Installing Nagios Monitoring using Puppet
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@ -696,14 +694,14 @@ Fuel provides a way to deploy Nagios for monitoring your OpenStack cluster. It w
|
||||
|
||||
|
||||
Nagios Agent
|
||||
~~~~~~~~~~~~
|
||||
++++++++++++
|
||||
|
||||
In order to install Nagios NRPE on a compute or controller node, a node should have the following settings: ::
|
||||
|
||||
class {'nagios':
|
||||
proj_name => 'test',
|
||||
services => ['nova-compute','nova-network','libvirt'],
|
||||
whitelist => ['127.0.0.1','10.0.97.5'],
|
||||
whitelist => ['127.0.0.1', $nagios_master],
|
||||
hostgroup => 'compute',
|
||||
}
|
||||
|
||||
@ -713,7 +711,7 @@ In order to install Nagios NRPE on a compute or controller node, a node should h
|
||||
* ``hostgroup``: The group to be used in the nagios master (do not forget create the group in the nagios master).
|
||||
|
||||
Nagios Server
|
||||
~~~~~~~~~~~~~
|
||||
+++++++++++++
|
||||
|
||||
In order to install Nagios Master on any convenient node, a node should have the following applied: ::
|
||||
|
||||
@ -737,9 +735,9 @@ In order to install Nagios Master on any convenient node, a node should have the
|
||||
|
||||
|
||||
Health Checks
|
||||
~~~~~~~~~~~~~
|
||||
+++++++++++++
|
||||
|
||||
The complete definition of the available services to monitor and their health checks can be viewed at ``deployment/puppet/nagios/manifests/params.pp``.
|
||||
You can see the complete definition of the available services to monitor and their health checks at ``deployment/puppet/nagios/manifests/params.pp``.
|
||||
|
||||
Here is the list: ::
|
||||
|
||||
@ -779,76 +777,10 @@ Here is the list: ::
|
||||
'host-alive' => 'check-host-alive',
|
||||
}
|
||||
|
||||
Finally, back in ``site.pp``, you define the compute nodes::
|
||||
Node definitions
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
# Definition of OpenStack compute nodes.
|
||||
node /fuel-compute-[\d+]/ {
|
||||
## Uncomment lines bellow if You want
|
||||
## configure network of this nodes
|
||||
## by puppet.
|
||||
class {'::node_netconfig':
|
||||
mgmt_ipaddr => $::internal_address,
|
||||
mgmt_netmask => $::internal_netmask,
|
||||
public_ipaddr => $::public_address,
|
||||
public_netmask => $::public_netmask,
|
||||
stage => 'netconfig',
|
||||
}
|
||||
include stdlib
|
||||
class { 'operatingsystem::checksupported':
|
||||
stage => 'setup'
|
||||
}
|
||||
|
||||
class {'nagios':
|
||||
proj_name => $proj_name,
|
||||
services => [
|
||||
'host-alive', 'nova-compute','nova-network','libvirt'
|
||||
],
|
||||
whitelist => ['127.0.0.1', $nagios_master],
|
||||
hostgroup => 'compute',
|
||||
}
|
||||
|
||||
class { 'openstack::compute':
|
||||
public_interface => $public_int,
|
||||
private_interface => $private_interface,
|
||||
internal_address => $internal_address,
|
||||
libvirt_type => 'kvm',
|
||||
fixed_range => $fixed_range,
|
||||
network_manager => $network_manager,
|
||||
network_config => { 'vlan_start' => $vlan_start },
|
||||
multi_host => $multi_host,
|
||||
sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova",
|
||||
rabbit_nodes => $controller_hostnames,
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_user => $rabbit_user,
|
||||
rabbit_ha_virtual_ip => $internal_virtual_ip,
|
||||
glance_api_servers => "${internal_virtual_ip}:9292",
|
||||
vncproxy_host => $public_virtual_ip,
|
||||
verbose => $verbose,
|
||||
vnc_enabled => true,
|
||||
nova_user_password => $nova_user_password,
|
||||
cache_server_ip => $controller_hostnames,
|
||||
service_endpoint => $internal_virtual_ip,
|
||||
quantum => $quantum,
|
||||
quantum_sql_connection => $quantum_sql_connection,
|
||||
quantum_user_password => $quantum_user_password,
|
||||
quantum_host => $quantum_net_node_address,
|
||||
tenant_network_type => $tenant_network_type,
|
||||
segment_range => $segment_range,
|
||||
cinder => $cinder,
|
||||
manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
|
||||
cinder_iscsi_bind_iface=> $cinder_iscsi_bind_iface,
|
||||
nv_physical_volume => $nv_physical_volume,
|
||||
db_host => $internal_virtual_ip,
|
||||
ssh_private_key => 'puppet:///ssh_keys/openstack',
|
||||
ssh_public_key => 'puppet:///ssh_keys/openstack.pub',
|
||||
use_syslog => $use_syslog,
|
||||
nova_rate_limits => $nova_rate_limits,
|
||||
cinder_rate_limits => $cinder_rate_limits
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
In the ``openstack/examples/site_openstack_full.pp`` example, the following nodes are specified:
|
||||
These are the node definitions generated for a Compact HA deployment. Other deployment configurations generate other definitions. For example, the ``openstack/examples/site_openstack_full.pp`` template specifies the following nodes:
|
||||
|
||||
* fuel-controller-01
|
||||
* fuel-controller-02
|
||||
@ -865,97 +797,3 @@ Using this architecture, the system includes three stand-alone swift-storage ser
|
||||
With ``site.pp`` prepared, you're ready to perform the actual installation.
|
||||
|
||||
|
||||
Installing OpenStack using Puppet directly
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Now that you've set all of your configurations, all that's left to stand
|
||||
up your OpenStack cluster is to run Puppet on each of your nodes; the
|
||||
Puppet Master knows what to do for each of them.
|
||||
|
||||
You have two options for performing this step. The first, and by far the easiest, is to use the orchestrator. If you're going to take that option, skip ahead to :ref:`Deploying OpenStack via Orchestration <orchestration>`. If you choose not to use orchestration, or if for some reason you want to reload only one or two nodes, you can run Puppet manually on a the target nodes.
|
||||
|
||||
If you're starting from scratch, start by logging in to fuel-controller-01 and running the Puppet
|
||||
agent.
|
||||
|
||||
One optional step would be to use the script command to log all
|
||||
of your output so you can check for errors if necessary::
|
||||
|
||||
|
||||
|
||||
script agent-01.log
|
||||
puppet agent --test
|
||||
|
||||
You will to see a great number of messages scroll by, and the
|
||||
installation will take a significant amount of time. When the process
|
||||
has completed, press CTRL-D to stop logging and grep for errors::
|
||||
|
||||
|
||||
|
||||
grep err: agent-01.log
|
||||
|
||||
|
||||
|
||||
If you find any errors relating to other nodes, ignore them for now.
|
||||
|
||||
|
||||
|
||||
Now you can run the same installation procedure on fuel-controller-01
|
||||
and fuel-controller-02, as well as fuel-compute-01.
|
||||
|
||||
|
||||
|
||||
Note that the controllers must be installed sequentially due to the
|
||||
nature of assembling a MySQL cluster based on Galera, which means that
|
||||
one must complete its installation before the next begins, but that
|
||||
compute nodes can be installed concurrently once the controllers are
|
||||
in place.
|
||||
|
||||
|
||||
|
||||
In some cases, you may find errors related to resources that are not
|
||||
yet available when the installation takes place. To solve that
|
||||
problem, simply re-run the puppet agent on the affected node after running the other controllers, and
|
||||
again grep for error messages.
|
||||
|
||||
|
||||
|
||||
When you see no errors on any of your nodes, your OpenStack cluster is
|
||||
ready to go.
|
||||
|
||||
|
||||
Examples of OpenStack installation sequences
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When running Puppet manually, the exact sequence depends on what it is you're trying to achieve. In most cases, you'll need to run Puppet more than once; with every deployment pass Puppet collects and adds necessary absent information to the OpenStack configuration, stores it to PuppedDB and applies necessary changes.
|
||||
|
||||
**Note:** *Sequentially run* means you don't start the next node deployment until previous one is finished.
|
||||
|
||||
**Example 1:** **Full OpenStack deployment with standalone storage nodes**
|
||||
|
||||
* Create necessary volumes on storage nodes as described in :ref:`create-the-XFS-partition`.
|
||||
* Sequentially run a deployment pass on every SwiftProxy node (``fuel-swiftproxy-01 ... fuel-swiftproxy-xx``), starting with the ``primary-swift-proxy node``. Node names are set by the ``$swift_proxies`` variable in ``site.pp``. There are 2 Swift Proxies by default.
|
||||
* Sequentially run a deployment pass on every storage node (``fuel-swift-01`` ... ``fuel-swift-xx``).
|
||||
* Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``). starting with the ``primary-controller`` node.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
|
||||
* Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers, these nodes may be deployed in parallel.
|
||||
* Run an additional deployment pass on Controller 1 only (``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
|
||||
**Example 2:** **Compact OpenStack deployment with storage and swift-proxy combined with nova-controller on the same nodes**
|
||||
|
||||
* Create the necessary volumes on controller nodes as described in :ref:`create-the-XFS-partition`
|
||||
* Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``), starting with the ``primary-controller node``. Errors in Swift storage such as */Stage[main]/Swift::Storage::Container/Ring_container_device[<device address>]: Could not evaluate: Device not found check device on <device address>* are expected during the deployment passes until the very final pass.
|
||||
* Run an additional deployment pass on Controller 1 only (``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
|
||||
* Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes may be deployed in parallel.
|
||||
|
||||
**Example 3:** **OpenStack HA installation without Swift**
|
||||
|
||||
* Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``), starting with the primary controller. No errors should appear during this deployment pass.
|
||||
* Run an additional deployment pass on the primary controller only (``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
|
||||
* Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes may be deployed in parallel.
|
||||
|
||||
**Example 4:** **The most simple OpenStack installation: Controller + Compute on the same node**
|
||||
|
||||
* Set the ``node /fuel-controller-[\d+]/`` variable in ``site.pp`` to match the hostname of the node on which you are going to deploy OpenStack. Set the ``node /fuel-compute-[\d+]/`` variable to **mismatch** the node name. Run a deployment pass on this node. No errors should appear during this deployment pass.
|
||||
* Set the ``node /fuel-compute-[\d+]/`` variable in ``site.pp`` to match the hostname of the node on which you are going to deploy OpenStack. Set the ``node /fuel-controller-[\d+]/`` variable to **mismatch** the node name. Run a deployment pass on this node. No errors should appear during this deployment pass.
|
@ -1,24 +0,0 @@
|
||||
.. _orchestration:
|
||||
|
||||
Deploying via orchestration
|
||||
----------------------------
|
||||
|
||||
Manually installing a handful of servers might be managable, but repeatable installations, or those that involve a large number of servers, require automated orchestration. Now you can use orchestration with Fuel through the ``astute`` script. This script is configured using the ``astute.yaml`` file you created when you ran ``openstack_system``.
|
||||
|
||||
To run the orchestrator, log in to ``fuel-pm`` and execute::
|
||||
|
||||
astute -f astute.yaml
|
||||
|
||||
You will see a message on ``fuel-pm`` stating that the installation has started on fuel-controller-01. To see what's going on on the target node, type::
|
||||
|
||||
tail -f /var/log/syslog
|
||||
|
||||
for Ubuntu, or::
|
||||
|
||||
tail -f /var/log/messages
|
||||
|
||||
for CentOS/Red Hat.
|
||||
|
||||
Note that Puppet will require several runs to install all the different roles, so the first time it runs, the orchestrator will show an error, but it just means that the installation isn't complete. Also, after the first run on each server, the orchestrator doesn't output messages on fuel-pm; when it's finished running, it will return you to the command prompt. In the meantime, you can see what's going on by watching the logs on each individual machine.
|
||||
|
||||
|
119
docs/pages/installation-instructions/0070-orchestration.rst
Normal file
119
docs/pages/installation-instructions/0070-orchestration.rst
Normal file
@ -0,0 +1,119 @@
|
||||
Deploying OpenStack
|
||||
-------------------
|
||||
|
||||
You have two options for deploying OpenStack. The eaiser method is to use orchestration, but you can also deploy your nodes manually.
|
||||
|
||||
.. _orchestration:
|
||||
|
||||
Deploying via orchestration
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Manually installing a handful of servers might be managable, but repeatable installations, or those that involve a large number of servers, require automated orchestration. Now you can use orchestration with Fuel through the ``astute`` script. This script is configured using the ``astute.yaml`` file you created when you ran ``openstack_system``.
|
||||
|
||||
To confirm that your servers are ready for orchestration, execute the command::
|
||||
|
||||
mco ping
|
||||
|
||||
You should see all three controllers, plus the compute node, answer the call::
|
||||
|
||||
fuel-compute-01 time=107.26 ms
|
||||
fuel-controller-01 time=120.14 ms
|
||||
fuel-controller-02 time=135.94 ms
|
||||
fuel-controller-03 time=139.33 ms
|
||||
|
||||
To run the orchestrator, log in to ``fuel-pm`` and execute::
|
||||
|
||||
astute -f astute.yaml
|
||||
|
||||
You will see a message on ``fuel-pm`` stating that the installation has started on fuel-controller-01. To see what's going on on the target node, type::
|
||||
|
||||
tail -f /var/log/syslog
|
||||
|
||||
for Ubuntu, or::
|
||||
|
||||
tail -f /var/log/messages
|
||||
|
||||
for CentOS/Red Hat.
|
||||
|
||||
Note that Puppet will require several runs to install all the different roles, so the first time it runs, the orchestrator will show an error, but it just means that the installation isn't complete. Also, after the first run on each server, the orchestrator doesn't output messages on fuel-pm; when it's finished running, it will return you to the command prompt. In the meantime, you can see what's going on by watching the logs on each individual machine.
|
||||
|
||||
|
||||
Installing OpenStack using Puppet directly
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If for some reason you don't wish to use orchestration -- for example, if you are adding a new node to an existing (non-HA) cluster -- you have the option to install on an individual node or nodes using Puppet directly.
|
||||
|
||||
Start by logging in to the target server -- fuel-controller-01 to start, if you're starting from scratch -- and running the Puppet agent.
|
||||
|
||||
One optional step would be to use the script command to log all
|
||||
of your output so you can check for errors if necessary::
|
||||
|
||||
script agent-01.log
|
||||
puppet agent --test
|
||||
|
||||
You will to see a great number of messages scroll by, and the
|
||||
installation will take a significant amount of time. When the process
|
||||
has completed, press CTRL-D to stop logging and grep for errors::
|
||||
|
||||
grep err: agent-01.log
|
||||
|
||||
If you find any errors relating to other nodes, ignore them for now.
|
||||
|
||||
Now you can run the same installation procedure on fuel-controller-02
|
||||
and fuel-controller-03, as well as fuel-compute-01.
|
||||
|
||||
Note that the controllers must be installed sequentially due to the
|
||||
nature of assembling a MySQL cluster based on Galera, which means that
|
||||
one must complete its installation before the next begins, but that
|
||||
compute nodes can be installed concurrently once the controllers are
|
||||
in place.
|
||||
|
||||
|
||||
|
||||
In some cases, you may find errors related to resources that are not
|
||||
yet available when the installation takes place. To solve that
|
||||
problem, simply re-run the puppet agent on the affected node after running the other controllers, and
|
||||
again grep for error messages.
|
||||
|
||||
|
||||
|
||||
When you see no errors on any of your nodes, your OpenStack cluster is
|
||||
ready to go.
|
||||
|
||||
|
||||
Examples of OpenStack installation sequences
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
When running Puppet manually, the exact sequence depends on what it is you're trying to achieve. In most cases, you'll need to run Puppet more than once; with every deployment pass Puppet collects and adds necessary absent information to the OpenStack configuration, stores it to PuppedDB and applies necessary changes.
|
||||
|
||||
**Note:** *Sequentially run* means you don't start the next node deployment until previous one is finished.
|
||||
|
||||
**Example 1:** **Full OpenStack deployment with standalone storage nodes**
|
||||
|
||||
* Create necessary volumes on storage nodes as described in :ref:`create-the-XFS-partition`.
|
||||
* Sequentially run a deployment pass on every SwiftProxy node (``fuel-swiftproxy-01 ... fuel-swiftproxy-xx``), starting with the ``primary-swift-proxy node``. Node names are set by the ``$swift_proxies`` variable in ``site.pp``. There are 2 Swift Proxies by default.
|
||||
* Sequentially run a deployment pass on every storage node (``fuel-swift-01`` ... ``fuel-swift-xx``).
|
||||
* Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``). starting with the ``primary-controller`` node.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
|
||||
* Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers, these nodes may be deployed in parallel.
|
||||
* Run an additional deployment pass on Controller 1 only (``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
|
||||
**Example 2:** **Compact OpenStack deployment with storage and swift-proxy combined with nova-controller on the same nodes**
|
||||
|
||||
* Create the necessary volumes on controller nodes as described in :ref:`create-the-XFS-partition`
|
||||
* Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``), starting with the ``primary-controller node``. Errors in Swift storage such as */Stage[main]/Swift::Storage::Container/Ring_container_device[<device address>]: Could not evaluate: Device not found check device on <device address>* are expected during the deployment passes until the very final pass.
|
||||
* Run an additional deployment pass on Controller 1 only (``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
|
||||
* Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes may be deployed in parallel.
|
||||
|
||||
**Example 3:** **OpenStack HA installation without Swift**
|
||||
|
||||
* Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``), starting with the primary controller. No errors should appear during this deployment pass.
|
||||
* Run an additional deployment pass on the primary controller only (``fuel-controller-01``) to finalize the Galera cluster configuration.
|
||||
* Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
|
||||
* Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes may be deployed in parallel.
|
||||
|
||||
**Example 4:** **The most simple OpenStack installation: Controller + Compute on the same node**
|
||||
|
||||
* Set the ``node /fuel-controller-[\d+]/`` variable in ``site.pp`` to match the hostname of the node on which you are going to deploy OpenStack. Set the ``node /fuel-compute-[\d+]/`` variable to **mismatch** the node name. Run a deployment pass on this node. No errors should appear during this deployment pass.
|
||||
* Set the ``node /fuel-compute-[\d+]/`` variable in ``site.pp`` to match the hostname of the node on which you are going to deploy OpenStack. Set the ``node /fuel-controller-[\d+]/`` variable to **mismatch** the node name. Run a deployment pass on this node. No errors should appear during this deployment pass.
|
@ -4,30 +4,45 @@ Supported Software
|
||||
Fuel has been tested and is guaranteed to work with the following software components:
|
||||
|
||||
* Operating Systems
|
||||
* CentOS 6.3 (x86_64 architecture only)
|
||||
* RHEL 6.3 (x86_64 architecture only)
|
||||
* Ubuntu 12.04 (x86_64 architecture only)
|
||||
* CentOS 6.4 (x86_64 architecture only)
|
||||
* RHEL 6.4 (x86_64 architecture only)
|
||||
|
||||
* Puppet (IT automation tool)
|
||||
* 2.7.19
|
||||
* 3.0.0
|
||||
|
||||
* MCollective
|
||||
* 2.2.4
|
||||
|
||||
* Cobbler (bare-metal provisioning tool)
|
||||
* 2.2.3
|
||||
|
||||
* OpenStack
|
||||
* Folsom release
|
||||
* Grizzly release 2013.1
|
||||
|
||||
* Hypervisor
|
||||
* KVM
|
||||
|
||||
* Open vSwitch
|
||||
* 1.10.0
|
||||
|
||||
* HA Proxy
|
||||
* 1.4.19
|
||||
|
||||
* Galera
|
||||
* 23.2.1
|
||||
* 23.2.2
|
||||
|
||||
* RabbitMQ
|
||||
* 2.8.7
|
||||
|
||||
* Pacemaker
|
||||
* 1.1.8
|
||||
|
||||
* Corosync
|
||||
* 1.4.3
|
||||
|
||||
* Keepalived
|
||||
* 1.2.4
|
||||
|
||||
* Nagios
|
||||
* 3.4.4
|
||||
|
||||
|
@ -4,8 +4,7 @@ Download Fuel
|
||||
The first step in installing Fuel is to download the version
|
||||
appropriate for your environment.
|
||||
|
||||
Fuel is available for both Essex and Folsom OpenStack installations, and will be available for Grizzly
|
||||
shortly after Grizzly's release.
|
||||
Fuel is available for Essex, Folsom and Grizzly OpenStack installations, and will be available for Havana shortly after Havana's release.
|
||||
|
||||
To make your installation easier, we also offer a pre-built ISO for installing the master node with Puppet Master and Cobbler. You can mount this ISO in a physical or VirtualBox machine in order to
|
||||
easily create your master node. (Instructions for performing this step
|
||||
|
@ -1,7 +1,7 @@
|
||||
Release Notes
|
||||
-------------
|
||||
|
||||
.. include:: /pages/introduction/release-notes/v2-2-folsom.rst
|
||||
.. include:: /pages/introduction/release-notes/v3-0-grizzly.rst
|
||||
.. include:: /pages/introduction/release-notes/v2-1-folsom.rst
|
||||
.. include:: /pages/introduction/release-notes/v2-0-folsom.rst
|
||||
.. include:: /pages/introduction/release-notes/v1-0-essex.rst
|
||||
|
34
docs/pages/introduction/release-notes/v3-0-grizzly.rst
Normal file
34
docs/pages/introduction/release-notes/v3-0-grizzly.rst
Normal file
@ -0,0 +1,34 @@
|
||||
v3.0-grizzly
|
||||
^^^^^^^^^^^^
|
||||
|
||||
**New Features in Fuel and Fuel Web 3.0**
|
||||
|
||||
* Support for OpenStack Grizzly
|
||||
* Support for CentOS 6.4
|
||||
* Deployment improvements
|
||||
|
||||
* Deployment of Cinder as a standalone node
|
||||
* Users may now choose where to store Cinder volumes
|
||||
* User defined disk space allocation for the base OS, Cinder and Virtual Machines
|
||||
* Ability to add new compute nodes without redeployment of the whole environment
|
||||
* Swift installation occurs in a single pass instead of multiple passes
|
||||
|
||||
* Network configuration enhancements
|
||||
|
||||
* Support for NIC bonding
|
||||
* Ability to map logical networks to physical interfaces
|
||||
* Improved firewall module
|
||||
|
||||
**Support for OpenStack Grizzly**
|
||||
|
||||
OpenStack Grizzly is the seventh release of the open source software for building public, private, and hybrid clouds. Fuel now supports deploying the Grizzly version of OpenStack in a variety of configurations including High Availability (HA). For a list of known limitations, please refer to the Known Issues section below.
|
||||
|
||||
**Support for CentOS 6.4**
|
||||
|
||||
CentOS 6.4 is now the base operating system for the Fuel master node, as well as the deployed slave nodes.
|
||||
|
||||
**Deployment Improvements**
|
||||
|
||||
* Deployment of Cinder as a standalone node / User choice
|
||||
|
||||
Previously, Cinder could only be deployed onto a compute node. Now, you may choose to deploy Cinder as a standalone node separate from a compute node. Both options – either deployed with a compute node or standalone – are available.
|
@ -0,0 +1,2 @@
|
||||
.. include:: /pages/production-considerations/0010-introduction.rst
|
||||
.. include:: /pages/production-considerations/0015-sizing-hardware.rst
|
@ -1,12 +1,14 @@
|
||||
|
||||
Logical Setup
|
||||
-------------
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
An OpenStack HA cluster involves, at a minimum, three types of nodes:
|
||||
controller nodes, compute nodes, and storage nodes.
|
||||
|
||||
Controller Nodes
|
||||
^^^^^^^^^^^^^^^^
|
||||
++++++++++++++++
|
||||
|
||||
|
||||
The first order of business in achieving high availability (HA) is
|
||||
redundancy, so the first step is to provide multiple controller nodes.
|
||||
You must keep in mind, however, that the database uses Galera to
|
||||
@ -45,7 +47,7 @@ mechanism for achieving HA:
|
||||
|
||||
|
||||
Compute Nodes
|
||||
^^^^^^^^^^^^^
|
||||
+++++++++++++
|
||||
|
||||
OpenStack compute nodes are, in many ways, the foundation of your
|
||||
cluster; they are the servers on which your users will create their
|
||||
@ -60,7 +62,7 @@ controller nodes using the VIP and going through HAProxy.
|
||||
|
||||
|
||||
Storage Nodes
|
||||
^^^^^^^^^^^^^
|
||||
+++++++++++++
|
||||
|
||||
|
||||
In this OpenStack cluster reference architecture, shared storage acts
|
||||
|
@ -1,6 +1,6 @@
|
||||
|
||||
Cluster Sizing
|
||||
--------------
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
This reference architecture is well suited for production-grade
|
||||
OpenStack deployments on a medium and large scale when you can afford
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user