diff --git a/CHANGELOG b/CHANGELOG
new file mode 100644
index 0000000000..f3b1e407fa
--- /dev/null
+++ b/CHANGELOG
@@ -0,0 +1,12 @@
+3.0-alpha-174-g94a98d6
+- Merge from master branch
+- Rsyslog tuning
+- Puppet debug output
+- Centos 6.4
+
+2.1-folsom-docs-324-g61d1599
+- Grizzly support for centos simple
+- Option for PKI auth for keystone (grizzly native)
+- Nova-conductor as generic nova service at compute nodes
+- CI scripts changes for grizzly tempest (host only routed IP addresses for public pool)
+-
diff --git a/deployment/puppet/cinder/manifests/api.pp b/deployment/puppet/cinder/manifests/api.pp
index 0bcbb9e591..31caccfbd7 100644
--- a/deployment/puppet/cinder/manifests/api.pp
+++ b/deployment/puppet/cinder/manifests/api.pp
@@ -77,9 +77,6 @@ if $cinder_rate_limits {
}
if $keystone_enabled {
- cinder_config {
- 'DEFAULT/auth_strategy': value => 'keystone' ;
- }
cinder_config {
'keystone_authtoken/auth_protocol': value => $keystone_auth_protocol;
'keystone_authtoken/auth_host': value => $keystone_auth_host;
diff --git a/deployment/puppet/cobbler/examples/server_site.pp b/deployment/puppet/cobbler/examples/server_site.pp
index 75a9c683e3..8b9319b3ca 100644
--- a/deployment/puppet/cobbler/examples/server_site.pp
+++ b/deployment/puppet/cobbler/examples/server_site.pp
@@ -43,11 +43,6 @@ node fuel-cobbler {
Class[cobbler::server] ->
Class[cobbler::distro::centos64_x86_64]
- # class { cobbler::distro::centos63_x86_64:
- # http_iso => "http://10.100.0.1/iso/CentOS-6.3-x86_64-netinstall.iso",
- # ks_url => "http://172.18.8.52/~hex/centos/6.3/os/x86_64",
- # }
-
class { cobbler::distro::centos64_x86_64:
http_iso => "http://download.mirantis.com/epel-fuel-folsom-2.1/CentOS-6.4-x86_64-minimal.iso",
ks_url => "http://download.mirantis.com/centos-6.4",
diff --git a/deployment/puppet/cobbler/examples/site.pp b/deployment/puppet/cobbler/examples/site.pp
index c8f15bd058..56a02b4b30 100644
--- a/deployment/puppet/cobbler/examples/site.pp
+++ b/deployment/puppet/cobbler/examples/site.pp
@@ -83,11 +83,6 @@ node fuel-cobbler {
Class['cobbler::server'] ->
Class['cobbler::distro::centos64_x86_64']
- # class { 'cobbler::distro::centos63_x86_64':
- # http_iso => 'http://10.100.0.1/iso/CentOS-6.3-x86_64-netinstall.iso',
- # ks_url => 'http://172.18.8.52/~hex/centos/6.3/os/x86_64',
- # }
-
class { 'cobbler::distro::centos64_x86_64':
http_iso => 'http://download.mirantis.com/epel-fuel-folsom-2.1/CentOS-6.4-x86_64-minimal.iso',
ks_url => 'cobbler',
diff --git a/deployment/puppet/cobbler/manifests/distro/centos64_x86_64.pp b/deployment/puppet/cobbler/manifests/distro/centos64_x86_64.pp
index a7cf03b520..e96b69a753 100644
--- a/deployment/puppet/cobbler/manifests/distro/centos64_x86_64.pp
+++ b/deployment/puppet/cobbler/manifests/distro/centos64_x86_64.pp
@@ -14,8 +14,8 @@
class cobbler::distro::centos64_x86_64(
- $http_iso = 'http://download.mirantis.com/epel-fuel-folsom-2.1/CentOS-6.4-x86_64-minimal.iso',
- $ks_url = 'http://download.mirantis.com/epel-fuel-folsom-2.1'
+ $http_iso = 'http://download.mirantis.com/epel-fuel-grizzly/isos/x86_64/CentOS-6.4-x86_64-minimal.iso',
+ $ks_url = 'http://download.mirantis.com/epel-fuel-grizzly'
) {
Exec {path => '/usr/bin:/bin:/usr/sbin:/sbin'}
diff --git a/deployment/puppet/cobbler/manifests/profile/centos64_x86_64.pp b/deployment/puppet/cobbler/manifests/profile/centos64_x86_64.pp
index d48619933c..4104bcfbd7 100644
--- a/deployment/puppet/cobbler/manifests/profile/centos64_x86_64.pp
+++ b/deployment/puppet/cobbler/manifests/profile/centos64_x86_64.pp
@@ -32,7 +32,7 @@ class cobbler::profile::centos64_x86_64(
},
{
"name" => "Mirantis-epel-fuel-install",
- "url" => "http://download.mirantis.com/epel-fuel-folsom-2.1",
+ "url" => "http://download.mirantis.com/epel-fuel-grizzly",
}
],
diff --git a/deployment/puppet/cobbler/templates/kickstart/centos.ks.erb b/deployment/puppet/cobbler/templates/kickstart/centos.ks.erb
index cb1864bf75..6a88399533 100644
--- a/deployment/puppet/cobbler/templates/kickstart/centos.ks.erb
+++ b/deployment/puppet/cobbler/templates/kickstart/centos.ks.erb
@@ -102,6 +102,8 @@ wget
crontabs
cronie
ruby-augeas
+yum-plugin-versionlock
+
# COBBLER EMBEDDED SNIPPET: 'puppet_install_if_enabled'
# LISTS puppet PACKAGE IF puppet_auto_setup VARIABLE IS SET TO 1
$SNIPPET('puppet_install_if_enabled')
@@ -114,6 +116,7 @@ $SNIPPET('mcollective_install_if_enabled')
# HERE ARE COMMANDS THAT WILL BE LAUNCHED JUST AFTER
# INSTALLATION ITSELF COMPLETED
%post
+yum versionlock puppet
echo -e "modprobe nf_conntrack_ipv4\nmodprobe nf_conntrack_ipv6" >> /etc/rc.modules
chmod +x /etc/rc.modules
echo -e "net.nf_conntrack_max=1048576" >> /etc/sysctl.conf
diff --git a/deployment/puppet/cobbler/templates/kickstart/rhel.ks.erb b/deployment/puppet/cobbler/templates/kickstart/rhel.ks.erb
index 8214068e9a..910e35d51d 100644
--- a/deployment/puppet/cobbler/templates/kickstart/rhel.ks.erb
+++ b/deployment/puppet/cobbler/templates/kickstart/rhel.ks.erb
@@ -100,6 +100,8 @@ wget
crontabs
cronie
ruby-augeas
+yum-plugin-versionlock
+
# COBBLER EMBEDDED SNIPPET: 'puppet_install_if_enabled'
# LISTS puppet PACKAGE IF puppet_auto_setup VARIABLE IS SET TO 1
$SNIPPET('puppet_install_if_enabled')
@@ -112,6 +114,7 @@ $SNIPPET('mcollective_install_if_enabled')
# HERE ARE COMMANDS THAT WILL BE LAUNCHED JUST AFTER
# INSTALLATION ITSELF COMPLETED
%post
+yum versionlock puppet
echo -e "modprobe nf_conntrack_ipv4\nmodprobe nf_conntrack_ipv6" >> /etc/rc.modules
chmod +x /etc/rc.modules
echo -e "net.nf_conntrack_max=1048576" >> /etc/sysctl.conf
diff --git a/deployment/puppet/corosync/lib/puppet/provider/service/pacemaker.rb b/deployment/puppet/corosync/lib/puppet/provider/service/pacemaker.rb
index c40c9ad1d5..ce4b74617b 100644
--- a/deployment/puppet/corosync/lib/puppet/provider/service/pacemaker.rb
+++ b/deployment/puppet/corosync/lib/puppet/provider/service/pacemaker.rb
@@ -177,20 +177,25 @@ Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Cor
next unless node[:state] == :online
debug("getting last ops on #{node[:uname]} for #{@resource[:name]}")
all_operations = XPath.match(@@cib,"cib/status/node_state[@uname='#{node[:uname]}']/lrm/lrm_resources/lrm_resource/lrm_rsc_op[starts-with(@id,'#{@resource[:name]}')]")
+ debug("ALL OPERATIONS:\n\n #{all_operations.inspect}")
next if all_operations.nil?
completed_ops = all_operations.select{|op| op.attributes['op-status'].to_i != -1 }
+ debug("COMPLETED OPERATIONS:\n\n #{completed_ops.inspect}")
next if completed_ops.nil?
start_stop_ops = completed_ops.select{|op| ["start","stop","monitor"].include? op.attributes['operation']}
+ debug("START/STOP OPERATIONS:\n\n #{start_stop_ops.inspect}")
next if start_stop_ops.nil?
sorted_operations = start_stop_ops.sort do
- |a,b| a.attributes['call-id'] <=> b.attributes['call-id']
+ |a,b| a.attributes['call-id'].to_i <=> b.attributes['call-id'].to_i
end
good_operations = sorted_operations.select do |op|
op.attributes['rc-code'] == '0' or
op.attributes['operation'] == 'monitor'
end
+ debug("GOOD OPERATIONS :\n\n #{good_operations.inspect}")
next if good_operations.nil?
last_op = good_operations.last
+ debug("LAST GOOD OPERATION :\n\n '#{last_op.inspect}' '#{last_op.nil?}' '#{last_op}'")
next if last_op.nil?
last_successful_op = nil
if ['start','stop'].include?(last_op.attributes['operation'])
@@ -204,6 +209,7 @@ Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Cor
last_successful_op = 'start'
end
end
+ debug("LAST SUCCESSFUL OP :\n\n #{last_successful_op.inspect}")
@last_successful_operations << last_successful_op if !last_successful_op.nil?
end
@last_successful_operations
@@ -214,7 +220,7 @@ Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Cor
# end
def enable
- crm('resource','manage', @resource[:name])
+ crm('resource','manage', get_service_name)
end
def enabled?
@@ -223,7 +229,7 @@ Puppet::Type.type(:service).provide :pacemaker, :parent => Puppet::Provider::Cor
end
def disable
- crm('resource','unmanage',@resource[:name])
+ crm('resource','unmanage',get_service_name)
end
#TODO: think about per-node start/stop/restart of services
diff --git a/deployment/puppet/corosync/manifests/init.pp b/deployment/puppet/corosync/manifests/init.pp
index 399e748a67..536900fdaf 100644
--- a/deployment/puppet/corosync/manifests/init.pp
+++ b/deployment/puppet/corosync/manifests/init.pp
@@ -138,6 +138,18 @@ class corosync (
require => Package['corosync']
}
+ if $::osfamily == "RedHat" {
+ file { '/var/lib/pacemaker/cores/root':
+ ensure => directory,
+ mode => '0750',
+ owner => 'hacluster',
+ group => 'haclient',
+ recurse => true,
+ purge => true,
+ require => Package['corosync']
+ }
+ }
+
if $::osfamily == 'Debian' {
exec { 'enable corosync':
command => 'sed -i s/START=no/START=yes/ /etc/default/corosync',
diff --git a/deployment/puppet/firewall/lib/puppet/provider/firewall/iptables.rb b/deployment/puppet/firewall/lib/puppet/provider/firewall/iptables.rb
index c8e9c94789..f859df6791 100644
--- a/deployment/puppet/firewall/lib/puppet/provider/firewall/iptables.rb
+++ b/deployment/puppet/firewall/lib/puppet/provider/firewall/iptables.rb
@@ -198,7 +198,9 @@ Puppet::Type.type(:firewall).provide :iptables, :parent => Puppet::Provider::Fir
# Normalise all rules to CIDR notation.
[:source, :destination].each do |prop|
- hash[prop] = Puppet::Util::IPCidr.new(hash[prop]).cidr unless hash[prop].nil?
+ if hash[prop] =~ /^(\d{1,3}\.){3}\d{1,3}(:?\/(\d+))?$/
+ hash[prop] = Puppet::Util::IPCidr.new(hash[prop]).cidr \
+ end
end
[:dport, :sport, :port, :state].each do |prop|
@@ -252,9 +254,9 @@ Puppet::Type.type(:firewall).provide :iptables, :parent => Puppet::Provider::Fir
# Proto should equal 'all' if undefined
hash[:proto] = "all" if !hash.include?(:proto)
- # If the jump parameter is set to one of: ACCEPT, REJECT or DROP then
+ # If the jump parameter is set to one of: ACCEPT, REJECT, NOTRACK or DROP then
# we should set the action parameter instead.
- if ['ACCEPT','REJECT','DROP'].include?(hash[:jump]) then
+ if ['ACCEPT','REJECT','DROP','NOTRACK'].include?(hash[:jump]) then
hash[:action] = hash[:jump].downcase
hash.delete(:jump)
end
diff --git a/deployment/puppet/firewall/lib/puppet/type/firewall.rb b/deployment/puppet/firewall/lib/puppet/type/firewall.rb
index e451699ba0..b94e2a064c 100644
--- a/deployment/puppet/firewall/lib/puppet/type/firewall.rb
+++ b/deployment/puppet/firewall/lib/puppet/type/firewall.rb
@@ -87,11 +87,12 @@ Puppet::Type.newtype(:firewall) do
* accept - the packet is accepted
* reject - the packet is rejected with a suitable ICMP response
* drop - the packet is dropped
+ * notrack - packet will be processed atop conntrack
If you specify no value it will simply match the rule but perform no
action unless you provide a provider specific parameter (such as *jump*).
EOS
- newvalues(:accept, :reject, :drop)
+ newvalues(:accept, :reject, :drop, :notrack)
end
# Generic matching properties
diff --git a/deployment/puppet/galera/manifests/init.pp b/deployment/puppet/galera/manifests/init.pp
index 6ede63b8bb..22fba23311 100644
--- a/deployment/puppet/galera/manifests/init.pp
+++ b/deployment/puppet/galera/manifests/init.pp
@@ -14,8 +14,8 @@ class galera (
$node_address = $ipaddress_eth0,
$setup_multiple_gcomm = true,
$skip_name_resolve = false,
- $node_addresses = [
- $ipaddress_eth0]) {
+ $node_addresses = [$ipaddress_eth0],
+ ) {
include galera::params
$mysql_user = $::galera::params::mysql_user
@@ -25,13 +25,6 @@ class galera (
case $::osfamily {
'RedHat' : {
- if (!$::selinux == 'false') and !defined(Class['selinux']) {
- class { 'selinux':
- mode => 'disabled',
- before => Package['MySQL-server']
- }
- }
-
file { '/etc/init.d/mysql':
ensure => present,
mode => 755,
@@ -65,12 +58,6 @@ class galera (
}
}
'Debian' : {
- if (!$::selinux == 'false') and !defined(Class['selinux']) {
- class { 'selinux':
- mode => 'disabled',
- before => Package['MySQL-server']
- }
- }
file { '/etc/init.d/mysql':
ensure => present,
@@ -187,7 +174,7 @@ class galera (
exec { "wait-initial-sync":
logoutput => true,
- command => "/usr/bin/mysql -Nbe \"show status like 'wsrep_local_state_comment'\" | /bin/grep -q Synced && sleep 10",
+ command => "/usr/bin/mysql -Nbe \"show status like 'wsrep_local_state_comment'\" | /bin/grep -q -e Synced -e Initialized && sleep 10",
try_sleep => 5,
tries => 60,
refreshonly => true,
@@ -225,4 +212,15 @@ class galera (
node_addresses => $node_addresses,
node_address => $node_address,
}
+
+ if $primary_controller {
+ exec { "start-new-galera-cluster":
+ path => "/usr/bin:/usr/sbin:/bin:/sbin",
+ logoutput => true,
+ command => '/etc/init.d/mysql stop; sleep 10; killall -w mysqld && ( killall -w -9 mysqld_safe || : ) && sleep 10; /etc/init.d/mysql start --wsrep-cluster-address=gcomm:// &',
+ onlyif => "[ -f /var/lib/mysql/grastate.dat ] && (cat /var/lib/mysql/grastate.dat | awk '\$1 == \"uuid:\" {print \$2}' | awk '{if (\$0 == \"00000000-0000-0000-0000-000000000000\") exit 0; else exit 1}')",
+ require => Service["mysql-galera"],
+ before => Exec ["wait-for-synced-state"],
+ }
+ }
}
diff --git a/deployment/puppet/galera/templates/wsrep.cnf.erb b/deployment/puppet/galera/templates/wsrep.cnf.erb
index 52629a871e..84e6184714 100644
--- a/deployment/puppet/galera/templates/wsrep.cnf.erb
+++ b/deployment/puppet/galera/templates/wsrep.cnf.erb
@@ -50,14 +50,10 @@ wsrep_provider_options="pc.ignore_sb = no;ist.recv_addr=<%= node_address %>;gmca
# Logical cluster name. Should be the same for all nodes.
wsrep_cluster_name="<%= cluster_name -%>"
-<% if primary_controller -%>
-wsrep_cluster_address="gcomm://"
+<% if setup_multiple_gcomm -%>
+wsrep_cluster_address="gcomm://<%= @node_addresses.reject{|ip| ip == hostname || ip == node_address || ip == l3_fqdn_hostname }.collect {|ip| ip + ':' + 4567.to_s }.join ',' %>?pc.wait_prim=no"
<% else -%>
- <% if setup_multiple_gcomm -%>
-wsrep_cluster_address="gcomm://<%= @node_addresses.reject{|ip| ip == hostname || ip == node_address || ip == l3_fqdn_hostname }.collect {|ip| ip + ':' + 4567.to_s }.join ',' %>"
- <% else -%>
-wsrep_cluster_address="gcomm://<%= @node_addresses.first %>:4567"
- <% end -%>
+wsrep_cluster_address="gcomm://<%= @node_addresses.first %>:4567?pc.wait_prim=no"
<% end -%>
# Human-readable node name (non-unique). Hostname by default.
diff --git a/deployment/puppet/horizon/manifests/init.pp b/deployment/puppet/horizon/manifests/init.pp
index 9e189f10a3..77364e5c78 100644
--- a/deployment/puppet/horizon/manifests/init.pp
+++ b/deployment/puppet/horizon/manifests/init.pp
@@ -26,7 +26,7 @@ class horizon(
$cache_server_port = '11211',
$swift = false,
$quantum = false,
- $package_ensure = present,
+ $package_ensure = present,
$horizon_app_links = false,
$keystone_host = '127.0.0.1',
$keystone_port = 5000,
@@ -37,6 +37,7 @@ class horizon(
$http_port = 80,
$https_port = 443,
$use_ssl = false,
+ $log_level = 'DEBUG',
) {
include horizon::params
@@ -66,6 +67,11 @@ class horizon(
mode => '0644',
}
+ file {'/usr/share/openstack-dashboard/':
+ recurse => true,
+ subscribe => Package['dashboard'],
+ }
+
case $use_ssl {
'exist': { # SSL certificate already exists
$generate_sslcert_names = true
diff --git a/deployment/puppet/horizon/templates/local_settings.py.erb b/deployment/puppet/horizon/templates/local_settings.py.erb
index 14d62e90c4..ea89c6e6ea 100644
--- a/deployment/puppet/horizon/templates/local_settings.py.erb
+++ b/deployment/puppet/horizon/templates/local_settings.py.erb
@@ -109,10 +109,9 @@ LOGGING = {
'class': 'logging.StreamHandler',
},
'file': {
- # Set the level to "DEBUG" for verbose output logging.
- 'level': 'DEBUG',
+ 'level': '<%= log_level %>',
'class': 'logging.FileHandler',
- 'filename': '/var/log/horizon/horizon.log'
+ 'filename': '<%= scope.lookupvar("horizon::params::logdir") %>/horizon.log'
},
},
'loggers': {
@@ -148,12 +147,10 @@ LOGGING = {
}
}
}
-
-LOGIN_URL = '<%= root_url %>/auth/login/'
-LOGOUT_URL = '<%= root_url %>/auth/logout/'
-LOGIN_REDIRECT_URL = '<%= root_url %>/syspanel/'
+LOGIN_URL='<%= scope.lookupvar("horizon::params::root_url") %>/auth/login/'
+LOGIN_REDIRECT_URL='<%= scope.lookupvar("horizon::params::root_url") %>'
# The Ubuntu package includes pre-compressed JS and compiled CSS to allow
# offline compression by default. To enable online compression, install
# the node-less package and enable the following option.
-COMPRESS_OFFLINE = True
+COMPRESS_OFFLINE = False
diff --git a/deployment/puppet/keystone/manifests/init.pp b/deployment/puppet/keystone/manifests/init.pp
index fd437add37..c8b1d1de3d 100644
--- a/deployment/puppet/keystone/manifests/init.pp
+++ b/deployment/puppet/keystone/manifests/init.pp
@@ -21,6 +21,10 @@
# Defaults to False.
# [catalog_type] Type of catalog that keystone uses to store endpoints,services. Optional.
# Defaults to sql. (Also accepts template)
+# [token_format] Format keystone uses for tokens. Optional. Defaults to UUID (PKI is grizzly native mode though).
+# Supports PKI and UUID.
+# [cache_dir] Directory created when token_format is PKI. Optional.
+# Defaults to /var/cache/keystone.
# [enalbles] If the keystone services should be enabled. Optioal. Default to true.
# [sql_conneciton] Url used to connect to database.
# [idle_timeout] Timeout when db connections should be reaped.
@@ -54,15 +58,20 @@ class keystone(
$debug = 'False',
$use_syslog = false,
$catalog_type = 'sql',
+ $token_format = 'UUID',
+# $token_format = 'PKI',
+ $cache_dir = '/var/cache/keystone',
$enabled = true,
$sql_connection = 'sqlite:////var/lib/keystone/keystone.db',
$idle_timeout = '200'
) {
validate_re($catalog_type, 'template|sql')
+ validate_re($token_format, 'UUID|PKI')
Keystone_config<||> ~> Service['keystone']
Keystone_config<||> ~> Exec<| title == 'keystone-manage db_sync'|>
+ Package['keystone'] ~> Exec<| title == 'keystone-manage pki_setup'|> ~> Service['keystone']
# TODO implement syslog features
if $use_syslog {
@@ -72,7 +81,7 @@ class keystone(
path => "/etc/keystone/logging.conf",
owner => "keystone",
group => "keystone",
- require => [User['keystone'],Group['keystone'],File['/etc/keystone']]
+ require => File['/etc/keystone'],
}
##TODO add rsyslog module config
} else {
@@ -86,7 +95,6 @@ class keystone(
owner => 'keystone',
group => 'keystone',
mode => '0644',
- #require => Package['keystone'],
notify => Service['keystone'],
}
@@ -226,10 +234,26 @@ class keystone(
provider => $::keystone::params::service_provider,
}
+ keystone_config { 'signing/token_format': value => $token_format }
+ if($token_format == 'PKI') {
+ file { $cache_dir:
+ ensure => directory,
+ }
+
+ # keystone-manage pki_setup Should be run as the same system user that will be running the Keystone service to ensure
+ # proper ownership for the private key file and the associated certificates
+ exec { 'keystone-manage pki_setup':
+ path => '/usr/bin',
+ user => 'keystone',
+ refreshonly => true,
+ }
+ }
+
if $enabled {
# this probably needs to happen more often than just when the db is
# created
exec { 'keystone-manage db_sync':
+ user => 'keystone',
path => '/usr/bin',
refreshonly => true,
notify => Service['keystone'],
diff --git a/deployment/puppet/l23network/.project b/deployment/puppet/l23network/.project
deleted file mode 100644
index dc553cb641..0000000000
--- a/deployment/puppet/l23network/.project
+++ /dev/null
@@ -1,24 +0,0 @@
-
-
- l23network
-
-
- stdlib
-
-
-
- org.cloudsmith.geppetto.pp.dsl.ui.modulefileBuilder
-
-
-
-
- org.eclipse.xtext.ui.shared.xtextBuilder
-
-
-
-
-
- org.cloudsmith.geppetto.pp.dsl.ui.puppetNature
- org.eclipse.xtext.ui.shared.xtextNature
-
-
diff --git a/deployment/puppet/l23network/.rspec b/deployment/puppet/l23network/.rspec
deleted file mode 100644
index 5f1647637a..0000000000
--- a/deployment/puppet/l23network/.rspec
+++ /dev/null
@@ -1,2 +0,0 @@
---color
---format progress
diff --git a/deployment/puppet/l23network/Gemfile b/deployment/puppet/l23network/Gemfile
deleted file mode 100644
index 957feb6514..0000000000
--- a/deployment/puppet/l23network/Gemfile
+++ /dev/null
@@ -1,14 +0,0 @@
-#source :rubygems
-source 'https://rubygems.org'
-
-gem 'rake'
-gem 'puppet-lint'
-gem 'rspec'
-gem 'rspec-puppet'
-
-## Will come in handy later on. But you could just use
-# gem 'puppet'
-puppetversion = ENV.key?('PUPPET_VERSION') ? "~> #{ENV['PUPPET_VERSION']}" : ['>= 2.7']
-gem 'puppet', puppetversion
-gem 'puppetlabs_spec_helper'
-
diff --git a/deployment/puppet/l23network/README.md b/deployment/puppet/l23network/README.md
index e3e12b977b..75785ce363 100644
--- a/deployment/puppet/l23network/README.md
+++ b/deployment/puppet/l23network/README.md
@@ -1,8 +1,10 @@
L23network
==========
-Puppet module for configuring network interfaces, 802.1q vlans and bondings on 2 and 3 level.
+Puppet module for configuring network interfaces on 2nd and 3rd level (802.1q vlans, access ports, NIC-bondind, assign IP addresses, dhcp, and interfaces without IP addresses).
+Can work together with Open vSwitch or standart linux way.
+At this moment support Centos 6.3+ (RHEL6) and Ubuntu 12.04 or above.
-Can work together with open vSwitch or standart linux way. At this moment support CentOS 6.3 (RHEL6) and Ubuntu 12.04 or above.
+L23network module have a same behavior for both operation systems.
Usage
@@ -22,7 +24,9 @@ If you do not plan to use open vSwitch you can disable it:
class {'l23network': use_ovs=>false, stage=> 'netconfig'}
-L2 network configuation
+
+
+L2 network configuation (Open vSwitch only)
-----------------------
Current layout is:
@@ -52,24 +56,102 @@ If you do not define type for port (or define '') then ovs-vsctl will work by de
You can use skip_existing option if you do not want to interrupt the configuration during adding of existing port or bridge.
- L3 network configuration
- -----------------------
-
- l23network::l3::ifconfig {"some_name0": interface=>'eth0', ipaddr=>'192.168.0.1', netmask=>'255.255.255.0'}
- l23network::l3::ifconfig {"some_name1": interface=>'br-ex', ipaddr=>'192.168.10.1', netmask=>'255.255.255.0', ifname_order_prefix='ovs'}
- l23network::l3::ifconfig {"some_name2": interface=>'aaa0', ipaddr=>'192.168.10.1', netmask=>'255.255.255.0', ifname_order_prefix='zzz'}
-
- Option 'ipaddr' can contain IP address, 'dhcp', or 'none' (for interface with no IP address).
+L3 network configuration
+------------------------
-When CentOS or Ubuntu starts they initialize and configure network interfaces in alphabetical order.
-In example above we change the order of configuration process by ifname_order_prefix keyword. The order will be:
+### Simple IP address definition, DHCP or address-less interfaces
- ifcfg-eth0
+ l23network::l3::ifconfig {"eth0": ipaddr=>'192.168.1.1/24'}
+ l23network::l3::ifconfig {"xXxXxXx":
+ interface => 'eth1',
+ ipaddr => '192.168.2.1',
+ netmask => '255.255.255.0'
+ }
+ l23network::l3::ifconfig {"eth2": ipaddr=>'dhcp'}
+ l23network::l3::ifconfig {"eth3": ipaddr=>'none'}
+
+Option *ipaddr* can contains IP address, 'dhcp', or 'none' string. In this example we describe configuration of 4 network interfaces:
+* Interface *eth0* have short CIDR-notated form of IP address definition.
+* Interface *eth1*
+* Interface *eth2* will be configured to use dhcp protocol.
+* Interface *eth3* will be configured as interface without IP address.
+ Often it's need for create "master" interface for 802.1q vlans (in native linux implementation)
+ or as slave interface for bonding.
+
+CIDR-notated form of IP address have more priority, that classic *ipaddr* and *netmask* definition.
+If you ommited *natmask* and not used CIDR-notated form -- will be used
+default *netmask* value as '255.255.255.0'.
+
+### Multiple IP addresses for one interface (aliases)
+
+ l23network::l3::ifconfig {"eth0":
+ ipaddr => ['192.168.0.1/24', '192.168.1.1/24', '192.168.2.1/24']
+ }
+
+You can pass list of CIDR-notated IP addresses to the *ipaddr* parameter for assign many IP addresses to one interface.
+In this case will be created aliases (not a subinterfaces). Array can contains one or more elements.
+
+### UP and DOWN interface order
+
+ l23network::l3::ifconfig {"eth1":
+ ipaddr=>'192.168.1.1/24'
+ }
+ l23network::l3::ifconfig {"br-ex":
+ ipaddr=>'192.168.10.1/24',
+ ifname_order_prefix='ovs'
+ }
+ l23network::l3::ifconfig {"aaa0":
+ ipaddr=>'192.168.20.1/24',
+ ifname_order_prefix='zzz'
+ }
+
+Centos and Ubuntu (at startup OS) start and configure network interfaces in alphabetical order
+interface configuration file names. In example above we change configuration process order
+by *ifname_order_prefix* keyword. We will have this order:
+
+ ifcfg-eth1
ifcfg-ovs-br-ex
ifcfg-zzz-aaa0
And the OS will configure interfaces br-ex and aaa0 after eth0
+### Default gateway
+
+ l23network::l3::ifconfig {"eth1":
+ ipaddr => '192.168.2.5/24',
+ gateway => '192.168.2.1',
+ check_by_ping => '8.8.8.8',
+ check_by_ping_timeout => '30'
+ }
+
+In this example we define default *gateway* and options for waiting that network stay up.
+Parameter *check_by_ping* define IP address, that will be pinged. Puppet will be blocked for waiting
+response for *check_by_ping_timeout* seconds.
+Parameter *check_by_ping* can be IP address, 'gateway', or 'none' string for disabling checking.
+By default gateway will be pinged.
+
+### DNS-specific options
+
+ l23network::l3::ifconfig {"eth1":
+ ipaddr => '192.168.2.5/24',
+ dns_nameservers => ['8.8.8.8','8.8.4.4'],
+ dns_search => ['aaa.com','bbb.com'],
+ dns_domain => 'qqq.com'
+ }
+
+Also we can specify DNS nameservers, and search list that will be inserted (by resolvconf lib) to /etc/resolv.conf .
+Option *dns_domain* implemented only in Ubuntu.
+
+### DHCP-specific options
+
+ l23network::l3::ifconfig {"eth2":
+ ipaddr => 'dhcp',
+ dhcp_hostname => 'compute312',
+ dhcp_nowait => false,
+ }
+
+
+
Bonding
-------
### Using standart linux ifenslave bonding
@@ -96,9 +178,13 @@ More information about bonding of network interfaces you can find in manuals for
* https://help.ubuntu.com/community/UbuntuBonding
* http://wiki.centos.org/TipsAndTricks/BondingInterfaces
-### Using open vSwitch
-In open vSwitch for bonding of two network interfaces you need to add a special resource "bond" to bridge.
-In this example we add "eth1" and "eth2" interfaces to bridge "bridge0":
+### Using Open vSwitch
+For bonding two interfaces you need:
+* Specify OVS bridge
+* Specify special resource "bond" and add it to bridge. Specify bond-specific parameters.
+* Assign IP address to the newly-created network interface (if need).
+
+In this example we add "eth1" and "eth2" interfaces to bridge "bridge0" as bond "bond1".
l23network::l2::bridge{'bridge0': } ->
l23network::l2::bond{'bond1':
@@ -108,6 +194,10 @@ In this example we add "eth1" and "eth2" interfaces to bridge "bridge0":
'lacp=active',
'other_config:lacp-time=fast'
],
+ } ->
+ l23network::l3::ifconfig {'bond1':
+ ipaddr => '192.168.232.1',
+ netmask => '255.255.255.0',
}
Open vSwitch provides a lot of parameter for different configurations.
@@ -115,16 +205,19 @@ We can specify them in "properties" option as list of parameter=value
(or parameter:key=value) strings.
You can find more parameters in [open vSwitch documentation page](http://openvswitch.org/support/).
+
+
802.1q vlan access ports
------------------------
### Using standart linux way
+
We can use tagged vlans over ordinary network interfaces and over bonds.
L23networks module supports two types of vlan interface namings:
* *vlanXXX* -- 802.1q tag XXX from the vlan interface name. You must specify the
parent interface name in the **vlandev** parameter.
* *eth0.XXX* -- 802.1q tag XXX and parent interface name from the vlan interface name
-If you are using 802.1q vlans over bonds it is recommended to use the first one.
+If you are using 802.1q vlans over bonds it is strongly recommended to use the first one.
In this example we can see both types:
@@ -155,19 +248,30 @@ In this example we can see both types:
### Using open vSwitch
In the open vSwitch all internal traffic is virtually tagged.
To create a 802.1q tagged access port you need to specify a vlan tag when adding a port to the bridge.
-In example above we create two ports with tags 10 and 20:
+In example above we create two ports with tags 10 and 20, and assign IP address to interface with tag 10:
+
l23network::l2::bridge{'bridge0': } ->
l23network::l2::port{'vl10':
- bridge => 'bridge0',
- type => 'internal',
- port_properties => ['tag=10'],
+ bridge => 'bridge0',
+ type => 'internal',
+ port_properties => [
+ 'tag=10'
+ ],
} ->
l23network::l2::port{'vl20':
- bridge => 'bridge0',
- type => 'internal',
- port_properties => ['tag=20'],
- }
+ bridge => 'bridge0',
+ type => 'internal',
+ port_properties => [
+ 'tag=20'
+ ],
+ } ->
+ l23network::l3::ifconfig {'vl10':
+ ipaddr => '192.168.101.1/24',
+ } ->
+ l23network::l3::ifconfig {'vl20':
+ ipaddr => 'none',
+ }
You can get more details about vlans in open vSwitch at [open vSwitch documentation page](http://openvswitch.org/support/config-cookbooks/vlan-configuration-cookbook/).
diff --git a/deployment/puppet/l23network/manifests/l3/create_br_iface.pp b/deployment/puppet/l23network/manifests/l3/create_br_iface.pp
index da186c75ef..25686b33fa 100644
--- a/deployment/puppet/l23network/manifests/l3/create_br_iface.pp
+++ b/deployment/puppet/l23network/manifests/l3/create_br_iface.pp
@@ -85,7 +85,7 @@ define l23network::l3::create_br_iface (
skip_existing => $se,
require => L23network::L2::Bridge["$bridge"]
} ->
- l23network::l3::ifconfig {$interface: # no quotes here, $interface is an array!!!
+ l23network::l3::ifconfig {$interface: # no quotes here, $interface _may_be_ array!!!
ipaddr => 'none',
ifname_order_prefix => '0',
require => L23network::L2::Bond["$ovs_bond_name"],
@@ -98,7 +98,7 @@ define l23network::l3::create_br_iface (
skip_existing => $se,
require => L23network::L2::Bridge["$bridge"]
} ->
- l23network::l3::ifconfig {"$interface": # USE quotes since the only one interface name is provided!!!!!
+ l23network::l3::ifconfig {"$interface": # USE quotes!!!!!
ipaddr => 'none',
vlandev => $lnx_interface_vlandev,
bond_mode => $lnx_interface_bond_mode,
diff --git a/deployment/puppet/nova/Gemfile b/deployment/puppet/nova/Gemfile
new file mode 120000
index 0000000000..065890d84c
--- /dev/null
+++ b/deployment/puppet/nova/Gemfile
@@ -0,0 +1 @@
+.gemfile
\ No newline at end of file
diff --git a/deployment/puppet/nova/lib/puppet/provider/nova_floating/nova_manage.rb b/deployment/puppet/nova/lib/puppet/provider/nova_floating/nova_manage.rb
index d21c7e4e60..692a184f0b 100644
--- a/deployment/puppet/nova/lib/puppet/provider/nova_floating/nova_manage.rb
+++ b/deployment/puppet/nova/lib/puppet/provider/nova_floating/nova_manage.rb
@@ -13,11 +13,16 @@ Puppet::Type.type(:nova_floating).provide(:nova_manage) do
end
def create
- nova_manage("floating", "create", resource[:network])
+ nova_manage("floating", "create", resource[:network])
end
def destroy
nova_manage("floating", "delete", resource[:network])
end
+ def parse
+ /([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})(\/([0-9]{1,2}))?/ =~ resource[:network]
+ [Regexp.last_match(1), Regexp.last_match(3)]
+ end
+
end
diff --git a/deployment/puppet/nova/lib/puppet/type/nova_floating.rb b/deployment/puppet/nova/lib/puppet/type/nova_floating.rb
index 4a497883af..969e76c639 100644
--- a/deployment/puppet/nova/lib/puppet/type/nova_floating.rb
+++ b/deployment/puppet/nova/lib/puppet/type/nova_floating.rb
@@ -5,8 +5,8 @@ Puppet::Type.newtype(:nova_floating) do
ensurable
newparam(:network, :namevar => true) do
- desc "Network (ie, 192.168.1.0/24 or 192.168.1.128/25 etc.)"
- newvalues(/^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\/[0-9]{1,2}$/)
+ desc "Network or ip (ie, 192.168.1.0/24, 192.168.1.128/25, 192.168.1.15 etc.)"
+ newvalues(/^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}(\/[0-9]{1,2})?$/)
end
end
diff --git a/deployment/puppet/nova/manifests/api.pp b/deployment/puppet/nova/manifests/api.pp
index 71b7b855ec..16d18e08f0 100644
--- a/deployment/puppet/nova/manifests/api.pp
+++ b/deployment/puppet/nova/manifests/api.pp
@@ -22,6 +22,7 @@ class nova::api(
$auth_protocol = 'http',
$admin_tenant_name = 'services',
$admin_user = 'nova',
+ $cinder = true,
$enabled_apis = 'ec2,osapi_compute,metadata',
$nova_rate_limits = undef,
$nova_user_password= undef, #Empty password generates error and saves from non-working installation
@@ -80,10 +81,10 @@ class nova::api(
service_name => $::nova::params::api_service_name,
}
- if $enabled_apis =~ /osapi_volume/ {
- $volume_api_class = 'nova.volume.api.API'
- } else {
+ if $cinder {
$volume_api_class = 'nova.volume.cinder.API'
+ } else {
+ $volume_api_class = 'nova.volume.api.API'
}
nova_config {
diff --git a/deployment/puppet/nova/manifests/compute/libvirt.pp b/deployment/puppet/nova/manifests/compute/libvirt.pp
index 5721d659ae..2fe048da0d 100644
--- a/deployment/puppet/nova/manifests/compute/libvirt.pp
+++ b/deployment/puppet/nova/manifests/compute/libvirt.pp
@@ -56,6 +56,13 @@ class nova::compute::libvirt (
ensure => present,
}
+ file_line { 'no_qemu_selinux':
+ path => '/etc/libvirt/qemu.conf',
+ line => 'security_driver="none"',
+ require => Package[$::nova::params::libvirt_package_name],
+ notify => Service['libvirt']
+ }
+
service { 'libvirt' :
name => $::nova::params::libvirt_service_name,
ensure => running,
diff --git a/deployment/puppet/nova/manifests/conductor.pp b/deployment/puppet/nova/manifests/conductor.pp
new file mode 100644
index 0000000000..6768cab315
--- /dev/null
+++ b/deployment/puppet/nova/manifests/conductor.pp
@@ -0,0 +1,17 @@
+#
+# installs nova conductor package and service
+#
+class nova::conductor(
+ $enabled = false,
+ $ensure_package = 'present'
+) {
+
+ include nova::params
+
+ nova::generic_service { 'conductor':
+ enabled => $enabled,
+ package_name => $::nova::params::conductor_package_name,
+ service_name => $::nova::params::conductor_service_name,
+ ensure_package => $ensure_package,
+ }
+}
diff --git a/deployment/puppet/nova/manifests/manage/floating.pp b/deployment/puppet/nova/manifests/manage/floating.pp
index 5afa71535b..0ccb631e67 100644
--- a/deployment/puppet/nova/manifests/manage/floating.pp
+++ b/deployment/puppet/nova/manifests/manage/floating.pp
@@ -1,7 +1,10 @@
-define nova::manage::floating ( $network ) {
+define nova::manage::floating (
+ $network = $name
+) {
- File['/etc/nova/nova.conf'] -> Nova_floating[$name]
- Exec<| title == 'nova-db-sync' |> -> Nova_floating[$name]
+ File['/etc/nova/nova.conf'] ->
+ Exec<| title == 'nova-db-sync' |> ->
+ Nova_floating[$name]
nova_floating { $name:
ensure => present,
diff --git a/deployment/puppet/nova/manifests/params.pp b/deployment/puppet/nova/manifests/params.pp
index e02e817de5..820b51be88 100644
--- a/deployment/puppet/nova/manifests/params.pp
+++ b/deployment/puppet/nova/manifests/params.pp
@@ -11,6 +11,7 @@ $libvirt_type_kvm = 'qemu-kvm'
$cert_package_name = 'openstack-nova-cert'
$common_package_name = 'openstack-nova-common'
$compute_package_name = 'openstack-nova-compute'
+ $conductor_package_name = 'openstack-nova-conductor'
$consoleauth_package_name = 'openstack-nova-console'
$doc_package_name = 'openstack-nova-doc'
$libvirt_package_name = 'libvirt'
@@ -27,6 +28,7 @@ $libvirt_type_kvm = 'qemu-kvm'
$api_service_name = 'openstack-nova-api'
$cert_service_name = 'openstack-nova-cert'
$compute_service_name = 'openstack-nova-compute'
+ $conductor_service_name = 'openstack-nova-conductor'
$consoleauth_service_name = 'openstack-nova-consoleauth'
$console_service_name = 'openstack-nova-console'
$libvirt_service_name = 'libvirtd'
@@ -51,6 +53,7 @@ $libvirt_type_kvm = 'qemu-kvm'
$cert_package_name = 'nova-cert'
$common_package_name = 'nova-common'
$compute_package_name = 'nova-compute'
+ $conductor_package_name = 'nova-conductor'
$doc_package_name = 'nova-doc'
$libvirt_package_name = 'libvirt-bin'
$network_package_name = 'nova-network'
@@ -66,6 +69,7 @@ $libvirt_type_kvm = 'qemu-kvm'
$api_service_name = 'nova-api'
$cert_service_name = 'nova-cert'
$compute_service_name = 'nova-compute'
+ $conductor_service_name = 'nova-conductor'
$consoleauth_service_name = 'nova-consoleauth'
$console_service_name = 'nova-console'
$libvirt_service_name = 'libvirt-bin'
diff --git a/deployment/puppet/nova/spec/unit/provider/nova_floating/nova_floating_spec.rb b/deployment/puppet/nova/spec/unit/provider/nova_floating/nova_floating_spec.rb
new file mode 100644
index 0000000000..cf4489ebd1
--- /dev/null
+++ b/deployment/puppet/nova/spec/unit/provider/nova_floating/nova_floating_spec.rb
@@ -0,0 +1,38 @@
+require 'spec_helper'
+
+describe Puppet::Type.type(:nova_floating).provider(:nova_manage) do
+
+ let(:resource) { Puppet::Type.type(:nova_floating).new(:name => '192.168.1.1' ) }
+ let(:provider) { resource.provider }
+
+ describe "#create_by_name" do
+ it "should create floating" do
+ provider.parse().should == ["192.168.1.1", nil]
+ end
+ end
+
+ for net in ['10.0.0.1', '10.0.0.0/16'] do
+ describe "#create #{net}" do
+ it "should create floating for #{net}" do
+ resource[:network]= net
+ provider.expects(:nova_manage).with("floating", "create", net)
+ provider.create()
+ end
+ end
+ describe "#destroy #{net}" do
+ it "should destroy floating for #{net}" do
+ resource[:network]= net
+ provider.expects(:nova_manage).with("floating", "delete", net)
+ provider.destroy()
+ end
+ end
+ describe "#check masklen #{net}" do
+ it "should returns right values for #{net}" do
+ resource[:network]= net
+ /([0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})(\/([0-9]{1,2}))?/ =~ net
+ provider.parse().should == [Regexp.last_match(1), Regexp.last_match(3)]
+ end
+ end
+ end
+
+end
\ No newline at end of file
diff --git a/deployment/puppet/nova/spec/unit/type/nova_floating_spec.rb b/deployment/puppet/nova/spec/unit/type/nova_floating_spec.rb
new file mode 100644
index 0000000000..556e80e97b
--- /dev/null
+++ b/deployment/puppet/nova/spec/unit/type/nova_floating_spec.rb
@@ -0,0 +1,26 @@
+require 'puppet'
+require 'puppet/type/nova_floating'
+describe 'Puppet::Type.type(:nova_floating)' do
+ before :each do
+ @nova_floating = Puppet::Type.type(:nova_floating).new(:name => 'test_IP', :network => '192.168.1.2')
+ end
+
+ it 'should accept valid IP address' do
+ @nova_floating[:network] = '192.168.1.1'
+ @nova_floating[:network] == '192.168.1.1'
+ end
+ it 'should accept valid CIDR subnet' do
+ @nova_floating[:network] = '192.168.1.0/24'
+ @nova_floating[:network] == '192.168.1.0/24'
+ end
+ it 'should not accept masklen more 2 didits' do
+ expect {
+ @nova_floating[:network] = '192.168.1.0/245'
+ }.to raise_error(Puppet::Error, /Invalid value/)
+ end
+ it 'should not accept invalid ensure values' do
+ expect {
+ @nova_floating[:network] = 'qweqweqweqwe'
+ }.to raise_error(Puppet::Error, /Invalid value/)
+ end
+end
\ No newline at end of file
diff --git a/deployment/puppet/openstack/examples/site_openstack_compact_fordocs.pp b/deployment/puppet/openstack/examples/site_openstack_compact_fordocs.pp
index fef11e9aff..ef6364cdc8 100644
--- a/deployment/puppet/openstack/examples/site_openstack_compact_fordocs.pp
+++ b/deployment/puppet/openstack/examples/site_openstack_compact_fordocs.pp
@@ -472,6 +472,7 @@ Exec { logoutput => true }
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
+
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
@@ -480,6 +481,15 @@ class { 'openstack::mirantis_repos':
repo_proxy=>$repo_proxy,
}
+if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
+ class { 'selinux':
+ mode=>"disabled",
+ stage=>"openstack-custom-repo"
+ }
+}
+
+
+
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
diff --git a/deployment/puppet/openstack/examples/site_openstack_ha_compact.pp b/deployment/puppet/openstack/examples/site_openstack_ha_compact.pp
index 99204e5986..bfd415b8bc 100644
--- a/deployment/puppet/openstack/examples/site_openstack_ha_compact.pp
+++ b/deployment/puppet/openstack/examples/site_openstack_ha_compact.pp
@@ -473,6 +473,7 @@ Exec { logoutput => true }
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
+
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
@@ -484,7 +485,15 @@ class { 'openstack::mirantis_repos':
class { '::openstack::firewall':
stage => 'openstack-firewall'
}
-
+
+ if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
+ class { 'selinux':
+ mode=>"disabled",
+ stage=>"openstack-custom-repo"
+ }
+}
+
+
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
@@ -548,9 +557,9 @@ class compact_controller (
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
- cinder => $is_cinder_node,
+ cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
- manage_volumes => $manage_volumes,
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
galera_nodes => $controller_hostnames,
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
@@ -604,9 +613,11 @@ node /fuel-controller-[\d+]/ {
swift_local_net_ip => $swift_local_net_ip,
master_swift_proxy_ip => $master_swift_proxy_ip,
sync_rings => ! $primary_proxy,
- cinder => $is_cinder_node,
+ #disable cinder in storage-node in order to avoid
+ #duplicate classes call with different parameters
+ cinder => false,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
- manage_volumes => $manage_volumes,
+ manage_volumes => false,
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
@@ -689,7 +700,7 @@ node /fuel-compute-[\d+]/ {
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
- manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
diff --git a/deployment/puppet/openstack/examples/site_openstack_ha_full.pp b/deployment/puppet/openstack/examples/site_openstack_ha_full.pp
index fa190e89e1..f0639172d2 100644
--- a/deployment/puppet/openstack/examples/site_openstack_ha_full.pp
+++ b/deployment/puppet/openstack/examples/site_openstack_ha_full.pp
@@ -494,6 +494,7 @@ Exec { logoutput => true }
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
+
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
@@ -506,6 +507,14 @@ class { 'openstack::mirantis_repos':
stage => 'openstack-firewall'
}
+if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
+ class { 'selinux':
+ mode=>"disabled",
+ stage=>"openstack-custom-repo"
+ }
+}
+
+
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
@@ -588,9 +597,9 @@ class ha_controller (
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
- cinder => $is_cinder_node,
+ cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
- manage_volumes => $manage_volumes,
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
galera_nodes => $controller_hostnames,
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
@@ -671,7 +680,7 @@ node /fuel-compute-[\d+]/ {
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
cinder => $cinder,
- manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
@@ -714,9 +723,9 @@ node /fuel-swift-[\d+]/ {
swift_zone => $swift_zone,
swift_local_net_ip => $swift_local_net_ip,
master_swift_proxy_ip => $master_swift_proxy_ip,
- cinder => $is_cindernode,
+ cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
- manage_volumes => $manage_volumes,
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
service_endpoint => $internal_virtual_ip,
diff --git a/deployment/puppet/openstack/examples/site_openstack_ha_minimal.pp b/deployment/puppet/openstack/examples/site_openstack_ha_minimal.pp
index 8c4f597212..016a68b415 100644
--- a/deployment/puppet/openstack/examples/site_openstack_ha_minimal.pp
+++ b/deployment/puppet/openstack/examples/site_openstack_ha_minimal.pp
@@ -172,7 +172,7 @@ $vlan_start = 300
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
-$tenant_network_type = 'gre'
+$tenant_network_type = 'vlan'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
@@ -192,7 +192,7 @@ $external_ipinfo = {}
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
-$segment_range = '900:999'
+$segment_range = '300:349'
# Set up OpenStack network manager. It is used ONLY in nova-network.
# Consult Openstack nova-network docs for possible values.
@@ -441,6 +441,7 @@ Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
+
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
@@ -453,6 +454,15 @@ class { 'openstack::mirantis_repos':
stage => 'openstack-firewall'
}
+if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
+ class { 'selinux':
+ mode=>"disabled",
+ stage=>"openstack-custom-repo"
+ }
+}
+
+
+
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
@@ -515,9 +525,9 @@ class compact_controller (
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
- cinder => $is_cinder_node,
+ cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
- manage_volumes => $manage_volumes,
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
galera_nodes => $controller_hostnames,
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
@@ -602,7 +612,7 @@ node /fuel-compute-[\d+]/ {
segment_range => $segment_range,
cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
- manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
nv_physical_volume => $nv_physical_volume,
db_host => $internal_virtual_ip,
ssh_private_key => 'puppet:///ssh_keys/openstack',
diff --git a/deployment/puppet/openstack/examples/site_openstack_simple.pp b/deployment/puppet/openstack/examples/site_openstack_simple.pp
index 8ddda6f461..aac8185cf5 100644
--- a/deployment/puppet/openstack/examples/site_openstack_simple.pp
+++ b/deployment/puppet/openstack/examples/site_openstack_simple.pp
@@ -140,7 +140,7 @@ $vlan_start = 300
# Segmentation type for isolating traffic between tenants
# Consult Openstack Quantum docs
-$tenant_network_type = 'gre'
+$tenant_network_type = 'vlan'
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
@@ -160,7 +160,7 @@ $external_ipinfo = {}
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
-$segment_range = '900:999'
+$segment_range = '300:349'
# Set up OpenStack network manager. It is used ONLY in nova-network.
# Consult Openstack nova-network docs for possible values.
@@ -389,6 +389,7 @@ Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
+Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage pki_setup' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
@@ -403,6 +404,7 @@ Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
# Globally apply an environment-based tag to all resources on each node.
tag("${::deployment_id}::${::environment}")
+
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
@@ -416,6 +418,14 @@ class { 'openstack::mirantis_repos':
stage => 'openstack-firewall'
}
+if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
+ class { 'selinux':
+ mode=>"disabled",
+ stage=>"openstack-custom-repo"
+ }
+}
+
+
if $::operatingsystem == 'Ubuntu' {
class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
@@ -479,9 +489,9 @@ class simple_controller (
quantum_external_ipinfo => $external_ipinfo,
tenant_network_type => $tenant_network_type,
segment_range => $segment_range,
- cinder => $is_cinder_node,
+ cinder => $cinder,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
- manage_volumes => $manage_volumes,
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
nv_physical_volume => $nv_physical_volume,
use_syslog => $use_syslog,
nova_rate_limits => $nova_rate_limits,
@@ -597,8 +607,8 @@ node /fuel-compute-[\d+]/ {
verbose => $verbose,
segment_range => $segment_range,
cinder => $cinder,
- manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
- nv_physical_volume => $nv_physical_volume,
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
+ nv_physical_volume => $nv_physical_volume,
cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
use_syslog => $use_syslog,
nova_rate_limits => $nova_rate_limits,
diff --git a/deployment/puppet/openstack/examples/site_openstack_single.pp b/deployment/puppet/openstack/examples/site_openstack_single.pp
index 9f0b5a78a5..c551a5dd8d 100644
--- a/deployment/puppet/openstack/examples/site_openstack_single.pp
+++ b/deployment/puppet/openstack/examples/site_openstack_single.pp
@@ -1,92 +1,380 @@
#
-# Example of how to deploy basic single openstack environment.
+# Parameter values in this file should be changed, taking into consideration your
+# networking setup and desired OpenStack settings.
+#
+# Please consult with the latest Fuel User Guide before making edits.
#
+### GENERAL CONFIG ###
+# This section sets main parameters such as hostnames and IP addresses of different nodes
+
# deploy a script that can be used to test nova
class { 'openstack::test_file': }
-####### shared variables ##################
+# This is the name of the public interface. The public network provides address space for Floating IPs, as well as public IP accessibility to the API endpoints.
+$public_interface = 'eth1'
+
+# This is the name of the internal interface. It will be attached to the management network, where data exchange between components of the OpenStack cluster will happen.
+$internal_interface = 'eth0'
+
+# This is the name of the private interface. All traffic within OpenStack tenants' networks will go through this interface.
+$private_interface = 'eth2'
+
+$nodes_harr = [
+ {
+ 'name' => 'fuel-cobbler',
+ 'role' => 'cobbler',
+ 'internal_address' => '10.0.0.102',
+ 'public_address' => '10.0.204.102',
+ },
+ {
+ 'name' => 'fuel-controller-01',
+ 'role' => 'controller',
+ 'internal_address' => '10.0.0.103',
+ 'public_address' => '10.0.204.103',
+ },
+ {
+ 'name' => 'fuel-controller-01',
+ 'role' => 'compute',
+ 'internal_address' => '10.0.0.103',
+ 'public_address' => '10.0.204.103',
+ },
+]
+$nodes = $nodes_harr
+$default_gateway = '10.0.204.1'
+
+# Specify nameservers here.
+# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
+$dns_nameservers = ['10.0.204.1','8.8.8.8']
+
+# Specify netmasks for internal and external networks.
+$internal_netmask = '255.255.255.0'
+$public_netmask = '255.255.255.0'
-# this section is used to specify global variables that will
-# be used in the deployment of multi and single node openstack
-# environments
+$node = filter_nodes($nodes,'name',$::hostname)
+$internal_address = $node[0]['internal_address']
+$public_address = $node[0]['public_address']
-# assumes that eth0 is the public interface
-$public_interface = 'eth0'
-# assumes that eth1 is the interface that will be used for the vm network
-# this configuration assumes this interface is active but does not have an
-# ip address allocated to it.
-$private_interface = 'eth1'
-# credentials
-$admin_email = 'root@localhost'
+$controllers = merge_arrays(filter_nodes($nodes,'role','primary-controller'), filter_nodes($nodes,'role','controller'))
+$controller_internal_address = $controllers[0]['internal_address']
+$controller_public_address = $controllers[0]['public_address']
+
+$ha_provider = 'generic'
+
+# Set nagios master fqdn
+$nagios_master = 'nagios-server.localdomain'
+## proj_name name of environment nagios configuration
+$proj_name = 'test'
+
+#Specify if your installation contains multiple Nova controllers. Defaults to true as it is the most common scenario.
+$multi_host = false
+
+# Specify different DB credentials for various services
+$mysql_root_password = 'nova'
+$admin_email = 'openstack@openstack.org'
$admin_password = 'nova'
-$keystone_db_password = 'keystone_db_pass'
-$keystone_admin_token = 'keystone_admin_token'
-$nova_db_password = 'nova_pass'
-$nova_user_password = 'nova_pass'
-$glance_db_password = 'glance_pass'
-$glance_user_password = 'glance_pass'
-$horizon_secret_key = 'dummy_secret_key'
-$mysql_root_password = 'sql_pass'
-$rabbit_password = 'openstack_rabbit_password'
-$rabbit_user = 'openstack_rabbit_user'
-$fixed_range = '10.0.58.0/24'
-$floating_range = '10.0.75.128/27'
-$vlan_start = 300
-# switch this to true to have all service log at verbose
-$verbose = true
-# by default it does not enable atomatically adding floating IPs
+
+$keystone_db_password = 'nova'
+$keystone_admin_token = 'nova'
+
+$glance_db_password = 'nova'
+$glance_user_password = 'nova'
+
+$nova_db_password = 'nova'
+$nova_user_password = 'nova'
+
+$rabbit_password = 'nova'
+$rabbit_user = 'nova'
+
+# End DB credentials section
+
+### GENERAL CONFIG END ###
+
+### NETWORK/QUANTUM ###
+# Specify network/quantum specific settings
+
+# Should we use quantum or nova-network(deprecated).
+# Consult OpenStack documentation for differences between them.
+$quantum = false
+$quantum_netnode_on_cnt = true
+
+# Specify network creation criteria:
+# Should puppet automatically create networks?
+$create_networks = true
+# Fixed IP addresses are typically used for communication between VM instances.
+$fixed_range = '10.0.198.128/27'
+# Floating IP addresses are used for communication of VM instances with the outside world (e.g. Internet).
+$floating_range = '10.0.204.128/28'
+
+# These parameters are passed to the previously specified network manager , e.g. nova-manage network create.
+# Not used in Quantum.
+# Consult openstack docs for corresponding network manager.
+# https://fuel-dev.mirantis.com/docs/0.2/pages/0050-installation-instructions.html#network-setup
+$num_networks = 1
+$network_size = 31
+$vlan_start = 300
+
+# Quantum
+
+# Segmentation type for isolating traffic between tenants
+# Consult Openstack Quantum docs
+$tenant_network_type = 'gre'
+
+# Which IP address will be used for creating GRE tunnels.
+$quantum_gre_bind_addr = $internal_address
+
+# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
+# the first address will be defined as an external default router,
+# the second address will be attached to an uplink bridge interface,
+# the remaining addresses will be utilized for the floating IP address pool.
+$external_ipinfo = {}
+## $external_ipinfo = {
+## 'public_net_router' => '10.0.74.129',
+## 'ext_bridge' => '10.0.74.130',
+## 'pool_start' => '10.0.74.131',
+## 'pool_end' => '10.0.74.142',
+## }
+
+# Quantum segmentation range.
+# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
+# For GRE networks: Valid tunnel IDs can be any 32-bit unsigned integer.
+$segment_range = '900:999'
+
+# Set up OpenStack network manager. It is used ONLY in nova-network.
+# Consult Openstack nova-network docs for possible values.
+$network_manager = 'nova.network.manager.FlatDHCPManager'
+
+# Assign floating IPs to VMs on startup automatically?
$auto_assign_floating_ip = false
-# Cinder service
-$cinder = false
-$quantum = false
-$swift = false
-$use_syslog = false
+# Database connections
+$sql_connection = "mysql://nova:${nova_db_password}@${controller_internal_address}/nova"
+
+$public_int = $public_interface
+$internal_int = $internal_interface
+
+#Network configuration
stage {'netconfig':
before => Stage['main'],
}
-class {'l23network': stage=> 'netconfig'}
-$quantum_gre_bind_addr = $internal_address
-# Packages repo setup
+class {'l23network': use_ovs=>$quantum, stage=> 'netconfig'}
+class node_netconfig (
+ $mgmt_ipaddr,
+ $mgmt_netmask = '255.255.255.0',
+ $public_ipaddr = undef,
+ $public_netmask= '255.255.255.0',
+ $save_default_gateway=false,
+ $quantum = $quantum,
+) {
+ if $quantum {
+ l23network::l3::create_br_iface {'mgmt':
+ interface => $internal_interface, # !!! NO $internal_int /sv !!!
+ bridge => $internal_br,
+ ipaddr => $mgmt_ipaddr,
+ netmask => $mgmt_netmask,
+ dns_nameservers => $dns_nameservers,
+ save_default_gateway => $save_default_gateway,
+ } ->
+ l23network::l3::create_br_iface {'ex':
+ interface => $public_interface, # !! NO $public_int /sv !!!
+ bridge => $public_br,
+ ipaddr => $public_ipaddr,
+ netmask => $public_netmask,
+ gateway => $default_gateway,
+ }
+ } else {
+ # nova-network mode
+ l23network::l3::ifconfig {$public_int:
+ ipaddr => $public_ipaddr,
+ netmask => $public_netmask,
+ gateway => $default_gateway,
+ }
+ l23network::l3::ifconfig {$internal_int:
+ ipaddr => $mgmt_ipaddr,
+ netmask => $mgmt_netmask,
+ dns_nameservers => $dns_nameservers,
+ }
+ }
+ l23network::l3::ifconfig {$private_interface: ipaddr=>'none' }
+ class { cobbler::checksum_bootpc: }
+}
+### NETWORK/QUANTUM END ###
+
+
+# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
+# installation. Each cluster requires a unique integer value.
+# Valid identifier range is 1 to 254
+$deployment_id = '69'
+
+# Below you can enable or disable various services based on the chosen deployment topology:
+### CINDER/VOLUME ###
+
+# Should we use cinder or nova-volume(obsolete)
+# Consult openstack docs for differences between them
+$cinder = true
+
+# Choose which nodes to install cinder onto
+# 'compute' -> compute nodes will run cinder
+# 'controller' -> controller nodes will run cinder
+# 'storage' -> storage nodes will run cinder
+# 'fuel-controller-XX' -> specify particular host(s) by hostname
+# 'XXX.XXX.XXX.XXX' -> specify particular host(s) by IP address
+# 'all' -> compute, controller, and storage nodes will run cinder (excluding swift and proxy nodes)
+
+$cinder_nodes = ['controller']
+
+#Set it to true if your want cinder-volume been installed to the host
+#Otherwise it will install api and scheduler services
+$manage_volumes = true
+
+# Setup network interface, which Cinder uses to export iSCSI targets.
+$cinder_iscsi_bind_addr = $internal_address
+
+# Below you can add physical volumes to cinder. Please replace values with the actual names of devices.
+# This parameter defines which partitions to aggregate into cinder-volumes or nova-volumes LVM VG
+# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# USE EXTREME CAUTION WITH THIS SETTING! IF THIS PARAMETER IS DEFINED,
+# IT WILL AGGREGATE THE VOLUMES INTO AN LVM VOLUME GROUP
+# AND ALL THE DATA THAT RESIDES ON THESE VOLUMES WILL BE LOST!
+# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
+$nv_physical_volume = ['/dev/sdz', '/dev/sdy', '/dev/sdx']
+
+#Evaluate cinder node selection
+if ($cinder) {
+ if (member($cinder_nodes,'all')) {
+ $is_cinder_node = true
+ } elsif (member($cinder_nodes,$::hostname)) {
+ $is_cinder_node = true
+ } elsif (member($cinder_nodes,$internal_address)) {
+ $is_cinder_node = true
+ } elsif ($node[0]['role'] =~ /controller/ ) {
+ $is_cinder_node = member($cinder_nodes,'controller')
+ } else {
+ $is_cinder_node = member($cinder_nodes,$node[0]['role'])
+ }
+} else {
+ $is_cinder_node = false
+}
+
+### CINDER/VOLUME END ###
+
+### GLANCE and SWIFT ###
+
+# Which backend to use for glance
+# Supported backends are "swift" and "file"
+$glance_backend = 'file'
+
+# Use loopback device for swift:
+# set 'loopback' or false
+# This parameter controls where swift partitions are located:
+# on physical partitions or inside loopback devices.
+$swift_loopback = false
+
+### Glance and swift END ###
+
+### Syslog ###
+# Enable error messages reporting to rsyslog. Rsyslog must be installed in this case.
+$use_syslog = false
+if $use_syslog {
+ class { "::rsyslog::client":
+ log_local => true,
+ log_auth_local => true,
+ server => '127.0.0.1',
+ port => '514'
+ }
+}
+
+### Syslog END ###
+
+
+case $::osfamily {
+ "Debian": {
+ $rabbitmq_version_string = '2.8.7-1'
+ }
+ "RedHat": {
+ $rabbitmq_version_string = '2.8.7-2.el6'
+ }
+}
+
+# OpenStack packages to be installed
+$openstack_version = {
+ 'keystone' => 'latest',
+ 'glance' => 'latest',
+ 'horizon' => 'latest',
+ 'nova' => 'latest',
+ 'novncproxy' => 'latest',
+ 'cinder' => 'latest',
+ 'rabbitmq_version' => $rabbitmq_version_string,
+}
+
+# Which package repo mirror to use. Currently "default".
+# "custom" is used by Mirantis for testing purposes.
+# Local puppet-managed repo option planned for future releases.
+# If you want to set up a local repository, you will need to manually adjust mirantis_repos.pp,
+# though it is NOT recommended.
$mirror_type = 'default'
$enable_test_repo = false
$repo_proxy = undef
$use_upstream_mysql = true
+# This parameter specifies the verbosity level of log messages
+# in openstack components config. Currently, it disables or enables debugging.
+$verbose = true
+
+#Rate Limits for cinder and Nova
+#Cinder and Nova can rate-limit your requests to API services.
+#These limits can be reduced for your installation or usage scenario.
+#Change the following variables if you want. They are measured in requests per minute.
+$nova_rate_limits = {
+ 'POST' => 1000,
+ 'POST_SERVERS' => 1000,
+ 'PUT' => 1000, 'GET' => 1000,
+ 'DELETE' => 1000
+}
+$cinder_rate_limits = {
+ 'POST' => 1000,
+ 'POST_SERVERS' => 1000,
+ 'PUT' => 1000, 'GET' => 1000,
+ 'DELETE' => 1000
+}
+
+
+Exec { logoutput => true }
#Specify desired NTP servers here.
#If you leave it undef pool.ntp.org
#will be used
+
$ntp_servers = ['pool.ntp.org']
-# This parameter specifies the the identifier of the current cluster. This is needed in case of multiple environments.
-# installation. Each cluster requires a unique integer value.
-# Valid identifier range is 1 to 254
-$deployment_id = '59'
-
-# Globally apply an environment-based tag to all resources on each node.
-tag("${::deployment_id}::${::environment}")
-
class {'openstack::clocksync': ntp_servers=>$ntp_servers}
#Exec clocksync from openstack::clocksync before services
#connectinq to AMQP server are started.
Exec<| title == 'clocksync' |>->Nova::Generic_service<| |>
-Exec<| title == 'clocksync' |>->Service<| title == 'quantum-l3' |>
-Exec<| title == 'clocksync' |>->Service<| title == 'quantum-dhcp-service' |>
-Exec<| title == 'clocksync' |>->Service<| title == 'quantum-ovs-plugin-service' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-volume' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-api' |>
Exec<| title == 'clocksync' |>->Service<| title == 'cinder-scheduler' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage db_sync' |>
+Exec<| title == 'clocksync' |>->Exec<| title == 'keystone-manage pki_setup' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'glance-manage db_sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'nova-manage db sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'initial-db-sync' |>
Exec<| title == 'clocksync' |>->Exec<| title == 'post-nova_config' |>
+
+
+
+### END OF PUBLIC CONFIGURATION PART ###
+# Normally, you do not need to change anything after this string
+
+# Globally apply an environment-based tag to all resources on each node.
+tag("${::deployment_id}::${::environment}")
+
+
stage { 'openstack-custom-repo': before => Stage['netconfig'] }
class { 'openstack::mirantis_repos':
stage => 'openstack-custom-repo',
@@ -100,27 +388,29 @@ class { 'openstack::mirantis_repos':
stage => 'openstack-firewall'
}
-# OpenStack packages and customized component versions to be installed.
-# Use 'latest' to get the most recent ones or specify exact version if you need to install custom version.
-case $::osfamily {
- "Debian": {
- $rabbitmq_version_string = '2.8.7-1'
- }
- "RedHat": {
- $rabbitmq_version_string = '2.8.7-2.el6'
+if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
+ class { 'selinux':
+ mode=>"disabled",
+ stage=>"openstack-custom-repo"
}
}
-$openstack_version = {
- 'keystone' => 'latest',
- 'glance' => 'latest',
- 'horizon' => 'latest',
- 'nova' => 'latest',
- 'novncproxy' => 'latest',
- 'cinder' => 'latest',
- 'rabbitmq_version' => $rabbitmq_version_string,
+
+
+if $::operatingsystem == 'Ubuntu' {
+ class { 'openstack::apparmor::disable': stage => 'openstack-custom-repo' }
}
+sysctl::value { 'net.ipv4.conf.all.rp_filter': value => '0' }
+
+# Dashboard(horizon) https/ssl mode
+# false: normal mode with no encryption
+# 'default': uses keys supplied with the ssl module package
+# 'exist': assumes that the keys (domain name based certificate) are provisioned in advance
+# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
+$horizon_use_ssl = false
+$horizon_secret_key = 'dummy_secret_key'
+
# Every node should be deployed as all-in-one openstack installations.
node default {
include stdlib
@@ -128,40 +418,69 @@ node default {
stage => 'setup'
}
+ class {'::node_netconfig':
+ mgmt_ipaddr => $::internal_address,
+ mgmt_netmask => $::internal_netmask,
+ public_ipaddr => $::public_address,
+ public_netmask => $::public_netmask,
+ stage => 'netconfig',
+ }
+
+ class {'nagios':
+ proj_name => $proj_name,
+ services => [
+ 'host-alive','nova-novncproxy','keystone', 'nova-scheduler',
+ 'nova-consoleauth', 'nova-cert', 'nova-api', 'glance-api',
+ 'glance-registry','horizon', 'rabbitmq', 'mysql',
+ ],
+ whitelist => ['127.0.0.1', $nagios_master],
+ hostgroup => 'controller',
+ }
+
class { 'openstack::all':
- public_address => $ipaddress_eth0,
- public_interface => $public_interface,
+ admin_address => $controller_internal_address,
+ service_endpoint => $controller_internal_address,
+ public_address => $controller_public_address,
+ public_interface => $public_int,
private_interface => $private_interface,
+ internal_address => $controller_internal_address,
+ floating_range => $floating_range,
+ fixed_range => $fixed_range,
+ network_manager => $network_manager,
+ num_networks => $num_networks,
+ network_size => $network_size,
+ network_config => { 'vlan_start' => $vlan_start },
+ verbose => $verbose,
+ auto_assign_floating_ip => $auto_assign_floating_ip,
+ mysql_root_password => $mysql_root_password,
admin_email => $admin_email,
admin_password => $admin_password,
keystone_db_password => $keystone_db_password,
keystone_admin_token => $keystone_admin_token,
- nova_db_password => $nova_db_password,
- nova_user_password => $nova_user_password,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
+ nova_db_password => $nova_db_password,
+ nova_user_password => $nova_user_password,
secret_key => $horizon_secret_key,
- mysql_root_password => $mysql_root_password,
rabbit_password => $rabbit_password,
rabbit_user => $rabbit_user,
- libvirt_type => 'kvm',
- floating_range => $floating_range,
- fixed_range => $fixed_range,
- verbose => $verbose,
- auto_assign_floating_ip => $auto_assign_floating_ip,
- network_config => { 'vlan_start' => $vlan_start },
purge_nova_config => false,
cinder => $cinder,
+ cinder_iscsi_bind_addr => $cinder_iscsi_bind_addr,
+ manage_volumes => $cinder ? { false => $manage_volumes, default =>$is_cinder_node },
+ nv_physical_volume => $nv_physical_volume,
+ use_syslog => $use_syslog,
+ nova_rate_limits => $nova_rate_limits,
+ cinder_rate_limits => $cinder_rate_limits,
quantum => $quantum,
swift => $swift,
+ glance_backend => $glance_backend,
}
-
class { 'openstack::auth_file':
admin_password => $admin_password,
keystone_admin_token => $keystone_admin_token,
- controller_node => '127.0.0.1',
+ controller_node => $controller_internal_address,
}
}
-
diff --git a/deployment/puppet/openstack/files/filter_quantum_ports.py b/deployment/puppet/openstack/files/filter_quantum_ports.py
index 75e5ce27ad..6895ad9c89 100755
--- a/deployment/puppet/openstack/files/filter_quantum_ports.py
+++ b/deployment/puppet/openstack/files/filter_quantum_ports.py
@@ -109,7 +109,7 @@ class QuantumXxx(object):
return []
rv = []
for i in self.get_ports_by_owner(port_owner, activeonly=activeonly):
- rv.append("{0}{1}".format(port_name_prefix, i['id'][:port_id_part_len]))
+ rv.append("{0}{1} {2}".format(port_name_prefix, i['id'][:port_id_part_len], i['fixed_ips'][0]['ip_address']))
return rv
@@ -129,4 +129,4 @@ if __name__ == '__main__':
Qu = QuantumXxx(get_authconfig(options.authconf), retries=options.retries)
for i in Qu.get_ifnames_for(args[0].strip(" \"\'"), activeonly=options.activeonly):
print(i)
-###
\ No newline at end of file
+###
diff --git a/deployment/puppet/openstack/files/quantum-agent-dhcp b/deployment/puppet/openstack/files/quantum-agent-dhcp
index 9f82659700..b419d54365 100644
--- a/deployment/puppet/openstack/files/quantum-agent-dhcp
+++ b/deployment/puppet/openstack/files/quantum-agent-dhcp
@@ -250,7 +250,7 @@ quantum_dhcp_agent_status() {
clean_up()
{
- filter_quantum_ports.py "network:dhcp" | while read port; do
+ filter_quantum_ports.py "network:dhcp" | while read port ip; do
ocf_log info "Cleaning up port ${port}"
ovs-vsctl -- --if-exists del-port ${port};
rc=$?
diff --git a/deployment/puppet/openstack/files/quantum-agent-l3 b/deployment/puppet/openstack/files/quantum-agent-l3
index 7936899338..d196e8cc81 100644
--- a/deployment/puppet/openstack/files/quantum-agent-l3
+++ b/deployment/puppet/openstack/files/quantum-agent-l3
@@ -41,6 +41,7 @@ OCF_RESKEY_os_auth_url_default="http://localhost:5000/v2.0"
OCF_RESKEY_username_default="quantum"
OCF_RESKEY_password_default="quantum_pass"
OCF_RESKEY_tenant_default="services"
+OCF_RESKEY_external_bridge_default="br-ex"
: ${OCF_RESKEY_os_auth_url=${OCF_RESKEY_os_auth_url_default}}
@@ -52,6 +53,7 @@ OCF_RESKEY_tenant_default="services"
: ${OCF_RESKEY_plugin_config=${OCF_RESKEY_plugin_config_default}}
: ${OCF_RESKEY_user=${OCF_RESKEY_user_default}}
: ${OCF_RESKEY_pid=${OCF_RESKEY_pid_default}}
+: ${OCF_RESKEY_external_bridge=${OCF_RESKEY_external_bridge_default}}
@@ -170,6 +172,15 @@ Admin tenant name
+
+
+External bridge for l3-agent
+
+External bridge
+
+
+
+
@@ -260,8 +271,9 @@ quantum_l3_agent_status() {
clean_up()
{
- filter_quantum_ports.py "network:router_gateway" | while read port; do
+ filter_quantum_ports.py "network:router_gateway" | while read port ip; do
ocf_log info "Cleaning up port ${port}"
+ ( ip address delete ${ip} dev $OCF_RESKEY_external_bridge || : )
ovs-vsctl -- --if-exists del-port ${port};
rc=$?
if [ $rc -ne 0 ]; then
@@ -269,7 +281,7 @@ clean_up()
exit $OCF_ERR_GENERIC
fi
done
- filter_quantum_ports.py "network:router_interface" | while read port; do
+ filter_quantum_ports.py "network:router_interface" | while read port ip; do
ocf_log info "Cleaning up port ${port}"
ovs-vsctl -- --if-exists del-port ${port};
rc=$?
diff --git a/deployment/puppet/openstack/lib/puppet/parser/functions/shellescape.rb b/deployment/puppet/openstack/lib/puppet/parser/functions/shellescape.rb
new file mode 100644
index 0000000000..1e9adc3b29
--- /dev/null
+++ b/deployment/puppet/openstack/lib/puppet/parser/functions/shellescape.rb
@@ -0,0 +1,11 @@
+require 'shellwords'
+module Puppet::Parser::Functions
+ newfunction(:shellescape, :type => :rvalue, :doc => <<-EOS
+ Escapes shell charactes.
+ EOS
+ ) do |arguments|
+ raise(Puppet::ParseError, "shellescape(): Wrong number of arguments " +
+ "given (#{arguments.size} for 1)") if arguments.size != 1
+ return Shellwords.escape(arguments[0])
+ end
+end
diff --git a/deployment/puppet/openstack/manifests/all.pp b/deployment/puppet/openstack/manifests/all.pp
index 914b099ce4..c38bd17874 100644
--- a/deployment/puppet/openstack/manifests/all.pp
+++ b/deployment/puppet/openstack/manifests/all.pp
@@ -75,6 +75,7 @@ class openstack::all (
$nova_user_password,
$secret_key,
$internal_address = '127.0.0.1',
+ $admin_address = '127.0.0.1',
# cinder and quantum password are not required b/c they are
# optional. Not sure what to do about this.
$cinder_user_password = 'cinder_pass',
@@ -103,11 +104,13 @@ class openstack::all (
$floating_range = false,
$create_networks = true,
$num_networks = 1,
+ $network_size = 255,
$auto_assign_floating_ip = false,
$network_config = {},
$quantum = false,
# Rabbit
$rabbit_user = 'nova',
+ $rabbit_nodes = ['127.0.0.1'],
# Horizon
$horizon = true,
$cache_server_ip = '127.0.0.1',
@@ -118,8 +121,11 @@ class openstack::all (
$cinder = false,
$cinder_db_user = 'cinder',
$cinder_db_dbname = 'cinder',
- $volume_group = 'cinder-volumes',
- $cinder_test = false,
+ $cinder_iscsi_bind_addr = false,
+ $cinder_volume_group = 'cinder-volumes',
+ $nv_physical_volume = undef,
+ $manage_volumes = false,
+ $cinder_rate_limits = undef,
#
$quantum_db_user = 'quantum',
$quantum_db_dbname = 'quantum',
@@ -129,14 +135,21 @@ class openstack::all (
$vnc_enabled = true,
# General
$enabled = true,
- $verbose = 'False'
+ $verbose = 'False',
+ $service_endpoint = '127.0.0.1',
+ $glance_backend = 'file',
+ $use_syslog = false,
+ $nova_rate_limits = undef,
) {
# Ensure things are run in order
Class['openstack::db::mysql'] -> Class['openstack::keystone']
Class['openstack::db::mysql'] -> Class['openstack::glance']
- # set up mysql server
+ if defined(Class['openstack::cinder']) {
+ Class['openstack::db::mysql'] -> Class['openstack::cinder']
+ }
+ # set up mysql server
if ($db_type == 'mysql') {
if ($enabled) {
Class['glance::db::mysql'] -> Class['glance::registry']
@@ -184,8 +197,8 @@ class openstack::all (
admin_email => $admin_email,
admin_password => $admin_password,
public_address => $public_address,
- internal_address => '127.0.0.1',
- admin_address => '127.0.0.1',
+ internal_address => $internal_address,
+ admin_address => $admin_address,
#region => $region,
glance_user_password => $glance_user_password,
nova_user_password => $nova_user_password,
@@ -193,6 +206,7 @@ class openstack::all (
cinder_user_password => $cinder_user_password,
quantum => $quantum,
quantum_user_password => $quantum_user_password,
+ use_syslog => $use_syslog,
}
######## GLANCE ##########
@@ -205,7 +219,12 @@ class openstack::all (
glance_db_dbname => $glance_db_dbname,
glance_db_password => $glance_db_password,
glance_user_password => $glance_user_password,
+ auth_uri => "http://${service_endpoint}:5000/",
+ keystone_host => $service_endpoint,
enabled => $enabled,
+ glance_backend => $glance_backend,
+ registry_host => $service_endpoint,
+ use_syslog => $use_syslog,
}
######## NOVA ###########
@@ -227,27 +246,26 @@ class openstack::all (
}
}
- ######### Cinder Controller Services ########
- $enabled_apis_ = 'ec2,osapi_compute,metadata'
-
if ($cinder) {
- class { "cinder::base":
- verbose => $verbose,
- sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@127.0.0.1/${cinder_db_dbname}?charset=utf8",
- rabbit_password => $rabbit_password,
- }
+ $enabled_apis = 'ec2,osapi_compute'
+ }
+ else {
+ $enabled_apis = 'ec2,osapi_compute,osapi_volume'
+ }
- class { 'cinder::api':
- keystone_password => $cinder_user_password,
+ ######### Cinder Controller Services ########
+ if !defined(Class['openstack::cinder']) {
+ class {'openstack::cinder':
+ sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@127.0.0.1/${cinder_db_dbname}?charset=utf8",
+ rabbit_password => $rabbit_password,
+ cinder_user_password => $cinder_user_password,
+ volume_group => $cinder_volume_group,
+ physical_volume => $nv_physical_volume,
+ manage_volumes => true,
+ enabled => true,
+ iscsi_bind_host => $cinder_iscsi_bind_addr,
+ cinder_rate_limits => $cinder_rate_limits,
}
-
- class { 'cinder::scheduler': }
- class { 'cinder::volume': }
- class { 'cinder::volume::iscsi':
- iscsi_ip_address => '127.0.0.1',
- }
-
- $enabled_apis = $enabled_apis_
} else {
# Set up nova-volume
class { 'lvm':
@@ -263,8 +281,6 @@ class openstack::all (
}
class { 'nova::volume::iscsi': }
-
- $enabled_apis = "${enabled_apis_},osapi_volume"
}
# Install / configure rabbitmq
@@ -280,7 +296,7 @@ class openstack::all (
rabbit_userid => $rabbit_user,
rabbit_password => $rabbit_password,
image_service => 'nova.image.glance.GlanceImageService',
- glance_api_servers => '127.0.0.1:9292',
+ glance_api_servers => "$internal_address:9292",
verbose => $verbose,
rabbit_host => '127.0.0.1',
}
@@ -289,8 +305,16 @@ class openstack::all (
class { 'nova::api':
enabled => $enabled,
admin_password => $nova_user_password,
- auth_host => 'localhost',
+ auth_host => $service_endpoint,
enabled_apis => $enabled_apis,
+ nova_rate_limits => $nova_rate_limits,
+ cinder => $cinder,
+ }
+
+ # Configure nova-conductor
+ class {'nova::conductor':
+ enabled => $enabled,
+ ensure_package => $ensure_package,
}
# Configure nova-quota
@@ -313,6 +337,7 @@ class openstack::all (
config_overrides => $network_config,
create_networks => $really_create_networks,
num_networks => $num_networks,
+ network_size => $network_size,
enabled => $enabled,
}
} else {
@@ -355,12 +380,12 @@ class openstack::all (
quantum_admin_password => $quantum_user_password,
#$use_dhcp = 'True',
#$public_interface = undef,
- quantum_connection_host => 'localhost',
+ quantum_connection_host => $service_endpoint,
quantum_auth_strategy => 'keystone',
- quantum_url => "http://127.0.0.1:9696",
+ quantum_url => "http://$internal_address:9696",
quantum_admin_tenant_name => 'services',
#quantum_admin_username => 'quantum',
- quantum_admin_auth_url => "http://127.0.0.1:35357/v2.0",
+ quantum_admin_auth_url => "http://${admin_address}:35357/v2.0",
public_interface => $public_interface,
}
}
@@ -402,7 +427,7 @@ class openstack::all (
######## Horizon ########
if ($horizon) {
class { 'memcached':
- listen_ip => '127.0.0.1',
+ listen_ip => '0.0.0.0',
}
class { 'openstack::horizon':
@@ -412,6 +437,7 @@ class openstack::all (
swift => $swift,
quantum => $quantum,
horizon_app_links => $horizon_app_links,
+ bind_address => $public_address,
}
}
diff --git a/deployment/puppet/openstack/manifests/auth_file.pp b/deployment/puppet/openstack/manifests/auth_file.pp
index 9cc60db2c6..eea8fe64c0 100644
--- a/deployment/puppet/openstack/manifests/auth_file.pp
+++ b/deployment/puppet/openstack/manifests/auth_file.pp
@@ -10,12 +10,15 @@ class openstack::auth_file(
$admin_user = 'admin',
$admin_tenant = 'admin'
) {
+ $escaped_tenant = shellescape($admin_tenant)
+ $escaped_user = shellescape($admin_user)
+ $escaped_password = shellescape($admin_password)
file { '/root/openrc':
content =>
"
- export OS_TENANT_NAME=${admin_tenant}
- export OS_USERNAME=${admin_user}
- export OS_PASSWORD=${admin_password}
+ export OS_TENANT_NAME=${escaped_tenant}
+ export OS_USERNAME=${escaped_user}
+ export OS_PASSWORD=${escaped_password}
export OS_AUTH_URL=\"http://${controller_node}:5000/v2.0/\"
export OS_AUTH_STRATEGY=keystone
export SERVICE_TOKEN=${keystone_admin_token}
diff --git a/deployment/puppet/openstack/manifests/cinder.pp b/deployment/puppet/openstack/manifests/cinder.pp
index eeaeea8412..e636c85070 100644
--- a/deployment/puppet/openstack/manifests/cinder.pp
+++ b/deployment/puppet/openstack/manifests/cinder.pp
@@ -22,6 +22,14 @@ class openstack::cinder(
# purge => true,
# }
#}
+ # There are two assumptions - everyone should use keystone auth
+ # and we had glance_api_servers set globally in every mode except
+ # single when service should authenticate itself against
+ # localhost anyway.
+
+ cinder_config { 'DEFAULT/auth_strategy': value => 'keystone' }
+ cinder_config { 'DEFAULT/glance_api_servers': value => $glance_api_servers }
+
if $rabbit_nodes and !$rabbit_ha_virtual_ip {
$rabbit_hosts = inline_template("<%= @rabbit_nodes.map {|x| x + ':5672'}.join ',' %>")
Cinder_config['DEFAULT/rabbit_ha_queues']->Service<| title == 'cinder-api'|>
diff --git a/deployment/puppet/openstack/manifests/compute.pp b/deployment/puppet/openstack/manifests/compute.pp
index 83a69c018f..db2450a5b9 100644
--- a/deployment/puppet/openstack/manifests/compute.pp
+++ b/deployment/puppet/openstack/manifests/compute.pp
@@ -90,13 +90,12 @@ class openstack::compute (
$ssh_private_key = undef,
$cache_server_ip = ['127.0.0.1'],
$cache_server_port = '11211',
- $nova_volume = 'nova-volumes',
$ssh_public_key = undef,
# if the cinder management components should be installed
$manage_volumes = false,
$nv_physical_volume = undef,
$cinder_volume_group = 'cinder-volumes',
- $cinder = false,
+ $cinder = true,
$cinder_user_password = 'cinder_user_pass',
$cinder_db_password = 'cinder_db_pass',
$cinder_db_user = 'cinder',
@@ -104,9 +103,9 @@ class openstack::compute (
$cinder_iscsi_bind_addr = false,
$db_host = '127.0.0.1',
$use_syslog = false,
- $nova_rate_limits = undef,
- $cinder_rate_limits = undef,
- $create_networks = false
+ $nova_rate_limits = undef,
+ $cinder_rate_limits = undef,
+ $create_networks = false
) {
#
@@ -159,45 +158,43 @@ class openstack::compute (
}
class { 'nova':
- ensure_package => $::openstack_version['nova'],
- sql_connection => $sql_connection,
- rabbit_nodes => $rabbit_nodes,
- rabbit_userid => $rabbit_user,
- rabbit_password => $rabbit_password,
- image_service => 'nova.image.glance.GlanceImageService',
- glance_api_servers => $glance_api_servers,
- verbose => $verbose,
- rabbit_host => $rabbit_host,
- use_syslog => $use_syslog,
- api_bind_address => $internal_address,
- rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
+ ensure_package => $::openstack_version['nova'],
+ sql_connection => $sql_connection,
+ rabbit_nodes => $rabbit_nodes,
+ rabbit_userid => $rabbit_user,
+ rabbit_password => $rabbit_password,
+ image_service => 'nova.image.glance.GlanceImageService',
+ glance_api_servers => $glance_api_servers,
+ verbose => $verbose,
+ rabbit_host => $rabbit_host,
+ use_syslog => $use_syslog,
+ api_bind_address => $internal_address,
+ rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
}
#Cinder setup
- if ($cinder) {
$enabled_apis = 'metadata'
package {'python-cinderclient': ensure => present}
+ if $cinder and $manage_volumes {
class {'openstack::cinder':
- sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
- rabbit_password => $rabbit_password,
- rabbit_host => false,
- rabbit_nodes => $rabbit_nodes,
- volume_group => $cinder_volume_group,
- physical_volume => $nv_physical_volume,
- manage_volumes => $manage_volumes,
- enabled => true,
- auth_host => $service_endpoint,
- bind_host => false,
- iscsi_bind_host => $cinder_iscsi_bind_addr,
- cinder_user_password => $cinder_user_password,
- use_syslog => $use_syslog,
- cinder_rate_limits => $cinder_rate_limits,
- rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
+ sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
+ rabbit_password => $rabbit_password,
+ rabbit_host => false,
+ rabbit_nodes => $rabbit_nodes,
+ volume_group => $cinder_volume_group,
+ physical_volume => $nv_physical_volume,
+ manage_volumes => $manage_volumes,
+ enabled => true,
+ auth_host => $service_endpoint,
+ bind_host => false,
+ iscsi_bind_host => $cinder_iscsi_bind_addr,
+ cinder_user_password => $cinder_user_password,
+ use_syslog => $use_syslog,
+ cinder_rate_limits => $cinder_rate_limits,
+ rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
+ }
}
- } else {
- $enabled_apis = 'metadata,osapi_volume'
- }
# Install / configure nova-compute
@@ -270,6 +267,7 @@ class openstack::compute (
admin_user => 'nova',
admin_password => $nova_user_password,
enabled_apis => $enabled_apis,
+ cinder => $cinder,
auth_host => $service_endpoint,
nova_rate_limits => $nova_rate_limits,
}
diff --git a/deployment/puppet/openstack/manifests/controller.pp b/deployment/puppet/openstack/manifests/controller.pp
index ce10f2d93c..63f23c6505 100644
--- a/deployment/puppet/openstack/manifests/controller.pp
+++ b/deployment/puppet/openstack/manifests/controller.pp
@@ -127,7 +127,7 @@ class openstack::controller (
$cache_server_ip = ['127.0.0.1'],
$cache_server_port = '11211',
$swift = false,
- $cinder = false,
+ $cinder = true,
$horizon_app_links = undef,
# General
$verbose = 'False',
@@ -175,7 +175,7 @@ class openstack::controller (
Class['openstack::db::mysql'] -> Class['openstack::glance']
Class['openstack::db::mysql'] -> Class['openstack::nova::controller']
if defined(Class['openstack::cinder']) {
- Class['openstack::db::mysql'] -> Class['openstack::cinder']
+ Class['openstack::db::mysql'] -> Class['openstack::cinder']
}
$rabbit_addresses = inline_template("<%= @rabbit_nodes.map {|x| x + ':5672'}.join ',' %>")
@@ -281,9 +281,10 @@ class openstack::controller (
}
if ($cinder) {
$enabled_apis = 'ec2,osapi_compute'
- } else {
- $enabled_apis = 'ec2,osapi_compute,osapi_volume'
}
+ else {
+ $enabled_apis = 'ec2,osapi_compute,osapi_volume'
+ }
class { 'openstack::nova::controller':
# Database
@@ -338,11 +339,13 @@ class openstack::controller (
ensure_package => $::openstack_version['nova'],
use_syslog => $use_syslog,
nova_rate_limits => $nova_rate_limits,
+ cinder => $cinder
}
######### Cinder Controller Services ########
- if ($cinder) {
- class {'openstack::cinder':
+ if $cinder {
+ if !defined(Class['openstack::cinder']) {
+ class {'openstack::cinder':
sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
rabbit_password => $rabbit_password,
rabbit_host => false,
@@ -359,21 +362,22 @@ class openstack::controller (
cinder_rate_limits => $cinder_rate_limits,
rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
}
- } else {
- if $manage_volumes {
-
- class { 'nova::volume':
- ensure_package => $::openstack_version['nova'],
- enabled => true,
- }
-
- class { 'nova::volume::iscsi':
- iscsi_ip_address => $api_bind_address,
- physical_volume => $nv_physical_volume,
- }
}
- # Set up nova-volume
}
+ else {
+ if $manage_volumes {
+
+ class { 'nova::volume':
+ ensure_package => $::openstack_version['nova'],
+ enabled => true,
+ }
+ class { 'nova::volume::iscsi':
+ iscsi_ip_address => $api_bind_address,
+ physical_volume => $nv_physical_volume,
+ }
+ }
+ # Set up nova-volume
+ }
if !defined(Class['memcached']){
class { 'memcached':
diff --git a/deployment/puppet/openstack/manifests/firewall.pp b/deployment/puppet/openstack/manifests/firewall.pp
index 1e7e38f17b..3d131931e2 100644
--- a/deployment/puppet/openstack/manifests/firewall.pp
+++ b/deployment/puppet/openstack/manifests/firewall.pp
@@ -22,6 +22,7 @@ class openstack::firewall (
$nova_vncproxy_port = 6080,
$erlang_epmd_port = 4369,
$erlang_rabbitmq_port = 5672,
+ $erlang_rabbitmq_backend_port = 5673,
$erlang_inet_dist_port = 41055,
$memcached_port = 11211,
$rsync_port = 873,
@@ -116,7 +117,7 @@ class openstack::firewall (
}
firewall {'106 rabbitmq ':
- port => [$erlang_epmd_port, $erlang_rabbitmq_port, $erlang_inet_dist_port],
+ port => [$erlang_epmd_port, $erlang_rabbitmq_port, $erlang_rabbitmq_backend_port, $erlang_inet_dist_port],
proto => 'tcp',
action => 'accept',
}
@@ -199,6 +200,19 @@ class openstack::firewall (
action => 'accept',
}
+ firewall {'118 vnc ports':
+ port => "5900-6100",
+ proto => 'tcp',
+ action => 'accept',
+ }
+
+ firewall { '333 accept gre':
+ chain => 'PREROUTING',
+ table => 'raw',
+ proto => 'gre',
+ action => 'notrack',
+ }
+
firewall { '999 drop all other requests':
action => 'drop',
}
diff --git a/deployment/puppet/openstack/manifests/mirantis_repos.pp b/deployment/puppet/openstack/manifests/mirantis_repos.pp
index 6720a68db8..d5ead63f4b 100644
--- a/deployment/puppet/openstack/manifests/mirantis_repos.pp
+++ b/deployment/puppet/openstack/manifests/mirantis_repos.pp
@@ -9,15 +9,18 @@ class openstack::mirantis_repos (
$deb_updates = 'http://172.18.67.168/ubuntu-repo/mirror.yandex.ru/ubuntu',
$deb_security = 'http://172.18.67.168/ubuntu-repo/mirror.yandex.ru/ubuntu',
$deb_fuel_folsom_repo = 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom',
+ $deb_fuel_grizzly_repo = 'http://osci-gbp.srt.mirantis.net/ubuntu/fuel/',
$deb_cloud_archive_repo = 'http://172.18.67.168/ubuntu-cloud.archive.canonical.com/ubuntu',
$deb_rabbit_repo = 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom',
- $enable_epel = false,
+ $enable_epel = false,
$fuel_mirrorlist = 'http://download.mirantis.com/epel-fuel-folsom-2.1/mirror.internal-stage.list',
$mirrorlist_base = 'http://172.18.67.168/centos-repo/mirror-6.3-os.list',
$mirrorlist_updates = 'http://172.18.67.168/centos-repo/mirror-6.3-updates.list',
+ $grizzly_baseurl = 'http://download.mirantis.com/epel-fuel-grizzly/',
$enable_test_repo = false,
$repo_proxy = undef,
- $use_upstream_mysql = false,) {
+ $use_upstream_mysql = false,
+) {
case $::osfamily {
'Debian' : {
class { 'apt::proxy':
@@ -25,11 +28,11 @@ class openstack::mirantis_repos (
stage => $::openstack::mirantis_repos::stage
}
- apt::pin { 'mirantis-releases':
- order => 20,
- priority => 1001,
- originator => $originator
- }
+# apt::pin { 'mirantis-releases':
+# order => 20,
+# priority => 1001,
+# originator => $originator
+# }
if $use_upstream_mysql {
apt::pin { 'upstream-mysql':
@@ -75,29 +78,20 @@ class openstack::mirantis_repos (
# Below we set our internal repos for testing purposes. Some of them may match with external ones.
if $type == 'custom' {
- if $enable_test_repo {
- apt::source { 'precise-fuel-folsom':
- location => $deb_fuel_folsom_repo,
- release => 'precise-2.1.0.1',
- repos => 'main',
- key => 'F8AF89DD',
- key_source => 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom/Mirantis.key',
- include_src => false,
- }
- } else {
- apt::source { 'precise-fuel-folsom':
- location => $deb_fuel_folsom_repo,
- release => 'precise-2.1.0.1',
- repos => 'main',
- key => 'F8AF89DD',
- key_source => 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom/Mirantis.key',
- include_src => false,
+
+ apt::pin { 'precise-fuel-grizzly':
+ order => 19,
+ priority => 1001,
}
+
+ apt::pin { 'cloud-archive':
+ order => 20,
+ priority => 1002,
}
apt::source { 'cloud-archive':
location => $deb_cloud_archive_repo,
- release => 'precise-updates/folsom',
+ release => 'precise-updates/grizzly',
repos => 'main',
key => '5EDB1B62EC4926EA',
key_source => 'http://172.18.67.168/ubuntu-repo/precise-fuel-folsom/cloud-archive.key',
@@ -105,6 +99,15 @@ class openstack::mirantis_repos (
include_src => false,
}
+ apt::source { 'precise-fuel-grizzly':
+ location => $deb_fuel_grizzly_repo,
+ release => 'precise-3.0',
+ repos => 'main',
+ key => 'F8AF89DD',
+ key_source => 'http://osci-gbp.srt.mirantis.net/ubuntu/key.gpg',
+ include_src => false,
+ }
+
apt::source { 'rabbit-3.0':
location => $deb_rabbit_repo,
release => 'precise-rabbitmq-3.0',
@@ -151,62 +154,72 @@ class openstack::mirantis_repos (
# ############### End of forced apt-get update block ###############
}
- 'RedHat' : {
+ 'RedHat': {
+
Yumrepo {
- proxy => $repo_proxy, }
+ proxy => $repo_proxy,
+ }
- yumrepo { 'centos-extras':
- descr => 'Local extras mirror repository',
- name => 'extras',
- enabled => 0,
- baseurl => "http://archive.kernel.org/centos/6.3/os/x86_64/",
- mirrorlist => absent
- }
-
-
- # added internal/external network mirror
+ # added internal (custom)/external (default) network mirror
if $type == 'default' {
- yumrepo { 'openstack-epel-fuel':
- descr => 'Mirantis OpenStack Custom Packages',
- mirrorlist => 'http://download.mirantis.com/epel-fuel-folsom-2.1/mirror.external.list',
- gpgcheck => '1',
- gpgkey => 'http://download.mirantis.com/epel-fuel-folsom-2.1/epel.key http://download.mirantis.com/epel-fuel-folsom-2.1/centos.key http://download.mirantis.com/epel-fuel-folsom-2.1/rabbit.key http://download.mirantis.com/epel-fuel-folsom-2.1/mirantis.key http://download.mirantis.com/epel-fuel-folsom-2.1/mysql.key http://download.mirantis.com/epel-fuel-folsom-2.1/nginx.key',
- }
+
yumrepo { 'centos-base':
- descr => 'Mirantis-CentOS',
+ descr => 'Mirantis-CentOS-Base',
name => 'base',
- baseurl => "http://download.mirantis.com/centos-6.4",
- mirrorlist => absent
- }
- yumrepo { 'vault6.3-base':
- descr => 'Vault 6.3 base mirror repository',
- name => 'v6.3-base',
- enabled => 0,
- baseurl => "http://vault.centos.org/6.3/os/x86_64/",
- mirrorlist => absent
+ baseurl => 'http://download.mirantis.com/centos-6.4',
+ gpgcheck => '1',
+ gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
+ mirrorlist => absent,
}
+ yumrepo { 'openstack-epel-fuel-grizzly':
+ descr => 'Mirantis OpenStack grizzly Custom Packages',
+ baseurl => 'http://download.mirantis.com/epel-fuel-grizzly',
+ gpgcheck => '1',
+ gpgkey => 'http://download.mirantis.com/epel-fuel-grizzly/mirantis.key',
+ mirrorlist => absent,
+ }
+ # completely disable additional out-of-box repos
+ yumrepo { 'extras':
+ descr => 'CentOS-$releasever - Extras',
+ mirrorlist => 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras',
+ gpgcheck => '1',
+ baseurl => absent,
+ gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
+ enabled => '0',
+ }
+
+ yumrepo { 'updates':
+ descr => 'CentOS-$releasever - Updates',
+ mirrorlist => 'http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates',
+ gpgcheck => '1',
+ baseurl => absent,
+ gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
+ enabled => '0',
+ }
}
if $type == 'custom' {
- yumrepo { 'openstack-epel-fuel':
- descr => 'Mirantis OpenStack Custom Packages',
- mirrorlist => $fuel_mirrorlist,
- gpgcheck => '1',
- gpgkey => 'http://download.mirantis.com/epel-fuel-folsom-2.1/epel.key http://download.mirantis.com/epel-fuel-folsom-2.1/centos.key http://download.mirantis.com/epel-fuel-folsom-2.1/rabbit.key http://download.mirantis.com/epel-fuel-folsom-2.1/mirantis.key http://download.mirantis.com/epel-fuel-folsom-2.1/mysql.key http://download.mirantis.com/epel-fuel-folsom-2.1/nginx.key',
+
+ yumrepo { 'openstack-epel-fuel-grizzly':
+ descr => 'Mirantis OpenStack grizzly Custom Packages',
+ baseurl => 'http://download.mirantis.com/epel-fuel-grizzly/',
+ gpgcheck => '0',
}
if $upstream_mirror == true {
yumrepo { 'centos-base':
- descr => 'Local base mirror repository',
name => 'base',
+ gpgcheck => '1',
mirrorlist => $mirrorlist_base,
+ gpgkey => 'http://centos.srt.mirantis.net/RPM-GPG-KEY-CentOS-6',
}
yumrepo { 'centos-updates':
- descr => 'Local updates mirror repository',
name => 'updates',
+ gpgcheck => '1',
mirrorlist => $mirrorlist_updates,
+ gpgkey => 'http://centos.srt.mirantis.net/RPM-GPG-KEY-CentOS-6',
}
}
}
@@ -214,16 +227,16 @@ class openstack::mirantis_repos (
if $enable_test_repo {
yumrepo { 'openstack-osci-repo':
descr => 'Mirantis OpenStack OSCI Packages',
- baseurl => 'http://osci-koji.srt.mirantis.net/mash/fuel-folsom/x86_64/',
+ baseurl => 'http://osci-koji.srt.mirantis.net/mash/fuel-3.0/x86_64/',
gpgcheck => '1',
- gpgkey => 'http://download.mirantis.com/epel-fuel-folsom/epel.key http://download.mirantis.com/epel-fuel-folsom/centos.key http://download.mirantis.com/epel-fuel-folsom/rabbit.key http://download.mirantis.com/epel-fuel-folsom/mirantis.key http://download.mirantis.com/epel-fuel-folsom/mysql.key http://download.mirantis.com/epel-fuel-folsom/nginx.key',
+ gpgkey => 'http://download.mirantis.com/epel-fuel-grizzly/mirantis.key',
}
}
if $enable_epel {
Yumrepo {
failovermethod => 'priority',
- gpgkey => 'http://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6',
+ gpgkey => 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6',
gpgcheck => 1,
enabled => 1,
}
@@ -231,6 +244,7 @@ class openstack::mirantis_repos (
yumrepo { 'epel-testing':
descr => 'Extra Packages for Enterprise Linux 6 - Testing - $basearch',
mirrorlist => 'http://mirrors.fedoraproject.org/metalink?repo=testing-epel6&arch=$basearch',
+ enabled => 1,
}
yumrepo { 'epel':
diff --git a/deployment/puppet/openstack/manifests/nova/controller.pp b/deployment/puppet/openstack/manifests/nova/controller.pp
index d10c77b4a4..1aef014674 100644
--- a/deployment/puppet/openstack/manifests/nova/controller.pp
+++ b/deployment/puppet/openstack/manifests/nova/controller.pp
@@ -82,6 +82,7 @@ class openstack::nova::controller (
$api_bind_address = '0.0.0.0',
$use_syslog = false,
$nova_rate_limits = undef,
+ $cinder = true
) {
# Configure the db string
@@ -257,10 +258,16 @@ class openstack::nova::controller (
auth_host => $keystone_host,
enabled_apis => $_enabled_apis,
ensure_package => $ensure_package,
- nova_rate_limits => $nova_rate_limits
+ nova_rate_limits => $nova_rate_limits,
+ cinder => $cinder
}
- if $auto_assign_floating_ip {
+ class {'nova::conductor':
+ enabled => $enabled,
+ ensure_package => $ensure_package,
+ }
+
+if $auto_assign_floating_ip {
nova_config { 'DEFAULT/auto_assign_floating_ip': value => 'True' }
}
diff --git a/deployment/puppet/openstack/manifests/quantum_router.pp b/deployment/puppet/openstack/manifests/quantum_router.pp
index 86dc2a8f2e..7ab724d1f3 100644
--- a/deployment/puppet/openstack/manifests/quantum_router.pp
+++ b/deployment/puppet/openstack/manifests/quantum_router.pp
@@ -64,8 +64,8 @@ class openstack::quantum_router (
if $quantum_network_node {
class { 'quantum::agents::ovs':
- bridge_uplinks => ["br-ex:${public_interface}","br-prv:${private_interface}"],
- bridge_mappings => ['physnet1:br-ex', 'physnet2:br-prv'],
+ bridge_uplinks => ["br-prv:${private_interface}"],
+ bridge_mappings => ['physnet2:br-prv'],
enable_tunneling => $enable_tunneling,
local_ip => $internal_address,
service_provider => $service_provider
diff --git a/deployment/puppet/openstack/manifests/swift/storage_node.pp b/deployment/puppet/openstack/manifests/swift/storage_node.pp
index 05c6e365d2..44fb80bd69 100644
--- a/deployment/puppet/openstack/manifests/swift/storage_node.pp
+++ b/deployment/puppet/openstack/manifests/swift/storage_node.pp
@@ -1,39 +1,42 @@
class openstack::swift::storage_node (
$swift_zone,
- $swift_hash_suffix = 'swift_secret',
- $swift_local_net_ip = $::ipaddress_eth0,
- $storage_type = 'loopback',
- $storage_base_dir = '/srv/loopback-device',
- $storage_mnt_base_dir = '/srv/node',
- $storage_devices = ['1', '2'],
- $storage_weight = 1,
- $package_ensure = 'present',
- $loopback_size = '1048756',
+ $swift_hash_suffix = 'swift_secret',
+ $swift_local_net_ip = $::ipaddress_eth0,
+ $storage_type = 'loopback',
+ $storage_base_dir = '/srv/loopback-device',
+ $storage_mnt_base_dir = '/srv/node',
+ $storage_devices = [
+ '1',
+ '2'],
+ $storage_weight = 1,
+ $package_ensure = 'present',
+ $loopback_size = '1048756',
$master_swift_proxy_ip,
- $rings = ['account', 'object', 'container'],
- $sync_rings = true,
- $loopback_size = '1048756',
+ $rings = [
+ 'account',
+ 'object',
+ 'container'],
+ $sync_rings = true,
# if the cinder management components should be installed
- $cinder = false,
- $manage_volumes = false,
- $nv_physical_volume = undef,
- $cinder_volume_group = 'cinder-volumes',
- $cinder_user_password = 'cinder_user_pass',
- $cinder_db_password = 'cinder_db_pass',
- $cinder_db_user = 'cinder',
- $cinder_db_dbname = 'cinder',
- $cinder_iscsi_bind_addr = false,
- $cinder_rate_limits = false,
- $db_host = '127.0.0.1',
- $service_endpoint = '127.0.0.1',
- $use_syslog = false,
+ $cinder = true,
+ $manage_volumes = false,
+ $nv_physical_volume = undef,
+ $cinder_volume_group = 'cinder-volumes',
+ $cinder_user_password = 'cinder_user_pass',
+ $cinder_db_password = 'cinder_db_pass',
+ $cinder_db_user = 'cinder',
+ $cinder_db_dbname = 'cinder',
+ $cinder_iscsi_bind_addr = false,
+ $cinder_rate_limits = false,
+ $db_host = '127.0.0.1',
+ $service_endpoint = '127.0.0.1',
+ $use_syslog = false,
# Rabbit details necessary for cinder
- $rabbit_nodes = false,
- $rabbit_password = 'rabbit_pw',
- $rabbit_host = false,
- $rabbit_user = 'nova',
- $rabbit_ha_virtual_ip = false,
-) {
+ $rabbit_nodes = false,
+ $rabbit_password = 'rabbit_pw',
+ $rabbit_host = false,
+ $rabbit_user = 'nova',
+ $rabbit_ha_virtual_ip = false,) {
if !defined(Class['swift']) {
class { 'swift':
swift_hash_suffix => $swift_hash_suffix,
@@ -58,46 +61,43 @@ class openstack::swift::storage_node (
}
validate_string($master_swift_proxy_ip)
-
+
if $sync_rings {
- if member($rings, 'account') and ! defined(Swift::Ringsync['account']) {
+ if member($rings, 'account') and !defined(Swift::Ringsync['account']) {
swift::ringsync { 'account': ring_server => $master_swift_proxy_ip }
}
-
- if member($rings, 'object') and ! defined(Swift::Ringsync['object']) {
+
+ if member($rings, 'object') and !defined(Swift::Ringsync['object']) {
swift::ringsync { 'object': ring_server => $master_swift_proxy_ip }
}
-
- if member($rings, 'container') and ! defined(Swift::Ringsync['container']) {
+
+ if member($rings, 'container') and !defined(Swift::Ringsync['container']) {
swift::ringsync { 'container': ring_server => $master_swift_proxy_ip }
}
Swift::Ringsync <| |> ~> Class["swift::storage::all"]
}
- $enabled_apis = 'ec2,osapi_compute'
- if ($cinder) and !defined(Class['swift']) {
- package {'python-cinderclient': ensure => present}
- class {'openstack::cinder':
- sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
- rabbit_password => $rabbit_password,
- rabbit_host => false,
- rabbit_nodes => $rabbit_nodes,
- volume_group => $cinder_volume_group,
- physical_volume => $nv_physical_volume,
- manage_volumes => $manage_volumes,
- enabled => true,
- auth_host => $service_endpoint,
- bind_host => false,
- iscsi_bind_host => $cinder_iscsi_bind_addr,
- cinder_user_password => $cinder_user_password,
- use_syslog => $use_syslog,
- cinder_rate_limits => $cinder_rate_limits,
- rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
+ if ($cinder and $manage_volumes) {
+ if !(defined(Class['openstack::cinder'])) {
+ class { 'openstack::cinder':
+ sql_connection => "mysql://${cinder_db_user}:${cinder_db_password}@${db_host}/${cinder_db_dbname}?charset=utf8",
+ rabbit_password => $rabbit_password,
+ rabbit_host => false,
+ rabbit_nodes => $rabbit_nodes,
+ volume_group => $cinder_volume_group,
+ physical_volume => $nv_physical_volume,
+ manage_volumes => $manage_volumes,
+ enabled => true,
+ auth_host => $service_endpoint,
+ bind_host => false,
+ iscsi_bind_host => $cinder_iscsi_bind_addr,
+ cinder_user_password => $cinder_user_password,
+ use_syslog => $use_syslog,
+ cinder_rate_limits => $cinder_rate_limits,
+ rabbit_ha_virtual_ip => $rabbit_ha_virtual_ip,
+ }
}
+ }
- }
-
-
-
}
diff --git a/deployment/puppet/quantum/lib/puppet/provider/quantum_net/quantum.rb b/deployment/puppet/quantum/lib/puppet/provider/quantum_net/quantum.rb
index 465373339e..6a6813dea5 100644
--- a/deployment/puppet/quantum/lib/puppet/provider/quantum_net/quantum.rb
+++ b/deployment/puppet/quantum/lib/puppet/provider/quantum_net/quantum.rb
@@ -36,7 +36,6 @@ Puppet::Type.type(:quantum_net).provide(
optional_opts = []
{
:router_ext => '--router:external',
- :shared => '--shared',
:network_type => '--provider:network_type',
:physnet => '--provider:physical_network',
:segment_id => '--provider:segmentation_id'
@@ -45,6 +44,9 @@ Puppet::Type.type(:quantum_net).provide(
optional_opts.push(opt).push(@resource[param])
end
end
+ if @resource[:shared] == 'True'
+ optional_opts.push("--shared")
+ end
auth_quantum('net-create',
'--tenant_id', tenant_id[@resource[:tenant]],
diff --git a/deployment/puppet/quantum/manifests/agents/dhcp.pp b/deployment/puppet/quantum/manifests/agents/dhcp.pp
index 415d395e6f..946accf8cc 100644
--- a/deployment/puppet/quantum/manifests/agents/dhcp.pp
+++ b/deployment/puppet/quantum/manifests/agents/dhcp.pp
@@ -134,14 +134,14 @@ class quantum::agents::dhcp (
cs_colocation { 'dhcp-with-ovs':
ensure => present,
cib => 'dhcp',
- primitives => ["p_${::quantum::params::dhcp_agent_service}", "p_${::quantum::params::ovs_agent_service}"],
+ primitives => ["p_${::quantum::params::dhcp_agent_service}", "clone_p_${::quantum::params::ovs_agent_service}"],
score => 'INFINITY',
}
cs_order { 'dhcp-after-ovs':
ensure => present,
cib => 'dhcp',
- first => "p_${::quantum::params::ovs_agent_service}",
+ first => "clone_p_${::quantum::params::ovs_agent_service}",
second => "p_${::quantum::params::dhcp_agent_service}",
score => 'INFINITY',
}
diff --git a/deployment/puppet/quantum/manifests/agents/l3.pp b/deployment/puppet/quantum/manifests/agents/l3.pp
index dfca41d499..24fe123b6b 100644
--- a/deployment/puppet/quantum/manifests/agents/l3.pp
+++ b/deployment/puppet/quantum/manifests/agents/l3.pp
@@ -145,6 +145,7 @@ class quantum::agents::l3 (
subnet_gw => $external_gateway, # undef,
alloc_pool => $external_alloc_pool, # undef,
enable_dhcp => 'False', # 'True',
+ shared => 'True',
}
Quantum_l3_agent_config <| |> -> Quantum::Network::Setup['net04_ext']
@@ -273,18 +274,25 @@ class quantum::agents::l3 (
cs_colocation { 'l3-with-ovs':
ensure => present,
cib => 'l3',
- primitives => ["p_${::quantum::params::l3_agent_service}", "p_${::quantum::params::ovs_agent_service}"],
+ primitives => ["p_${::quantum::params::l3_agent_service}", "clone_p_${::quantum::params::ovs_agent_service}"],
score => 'INFINITY',
}
-
cs_order { 'l3-after-ovs':
ensure => present,
cib => 'l3',
- first => "p_${::quantum::params::ovs_agent_service}",
+ first => "clone_p_${::quantum::params::ovs_agent_service}",
second => "p_${::quantum::params::l3_agent_service}",
score => 'INFINITY',
}
+ # start DHCP and L3 agents on different controllers if it's possible
+ cs_colocation { 'dhcp-without-l3':
+ ensure => present,
+ cib => 'l3',
+ primitives => ["p_${::quantum::params::dhcp_agent_service}", "p_${::quantum::params::l3_agent_service}"],
+ score => '-100',
+ }
+
# Ensure service is stopped and disabled by upstart/init/etc.
Service['quantum-l3-init_stopped'] -> Cs_resource["p_${::quantum::params::l3_agent_service}"]
diff --git a/deployment/puppet/quantum/manifests/agents/ovs.pp b/deployment/puppet/quantum/manifests/agents/ovs.pp
index 19a41e0e0f..17f602577f 100644
--- a/deployment/puppet/quantum/manifests/agents/ovs.pp
+++ b/deployment/puppet/quantum/manifests/agents/ovs.pp
@@ -2,8 +2,7 @@ class quantum::agents::ovs (
$package_ensure = 'present',
$enabled = true,
$bridge_uplinks = ['br-ex:eth2'],
- $bridge_mappings = [
- 'physnet1:br-ex'],
+ $bridge_mappings = ['physnet1:br-ex'],
$integration_bridge = 'br-int',
$enable_tunneling = true,
$local_ip = undef,
@@ -46,14 +45,14 @@ class quantum::agents::ovs (
skip_existing => true,
# require => Service['quantum-plugin-ovs-service'],
}
-
quantum_plugin_ovs { 'OVS/local_ip': value => $local_ip; }
} else {
- quantum::plugins::ovs::bridge { $bridge_mappings: # require => Service['quantum-plugin-ovs-service'],
- }
-
- quantum::plugins::ovs::port { $bridge_uplinks: # require => Service['quantum-plugin-ovs-service'],
- }
+ quantum::plugins::ovs::bridge { $bridge_mappings: # Do not quote!!! may be array!
+ #require => Service['quantum-plugin-ovs-service'],
+ }
+ quantum::plugins::ovs::port { $bridge_uplinks: # Do not quote!!! may be array!
+ #require => Service['quantum-plugin-ovs-service'],
+ }
}
if $enabled {
@@ -91,7 +90,13 @@ class quantum::agents::ovs (
primitive_class => 'ocf',
provided_by => 'pacemaker',
primitive_type => 'quantum-agent-ovs',
- require => File['quantum-ovs-agent'] ,
+ require => File['quantum-ovs-agent'] ,
+ multistate_hash => {
+ 'type' => 'clone',
+ },
+ ms_metadata => {
+ 'interleave' => 'true',
+ },
parameters => {
}
,
@@ -108,7 +113,6 @@ class quantum::agents::ovs (
'stop' => {
'timeout' => '480'
}
-
}
,
}
@@ -122,20 +126,21 @@ class quantum::agents::ovs (
}
default: { fail("The $::osfamily operating system is not supported.") }
}
- service { 'quantum-plugin-ovs-service_stopped':
+ service { 'quantum-ovs-agent-service_stopped':
name => $::quantum::params::ovs_agent_service,
enable => false,
hasstatus => false,
}
- exec { 'quantum-plugin-ovs-service_stopped':
+ exec { 'quantum-ovs-agent-service_stopped':
+ #todo: rewrite as script, that returns zero or wait, when it can return zero
name => "bash -c \"service ${::quantum::params::ovs_agent_service} stop || ( kill `pgrep -f quantum-openvswitch-agent` || : )\"",
onlyif => "service ${::quantum::params::ovs_agent_service} status | grep \'${started_status}\'",
path => ['/usr/bin', '/usr/sbin', '/bin', '/sbin'],
returns => [0,""]
}
Package[$ovs_agent_package] ->
- Service['quantum-plugin-ovs-service_stopped'] ->
- Exec['quantum-plugin-ovs-service_stopped'] ->
+ Service['quantum-ovs-agent-service_stopped'] ->
+ Exec['quantum-ovs-agent-service_stopped'] ->
Cs_resource["p_${::quantum::params::ovs_agent_service}"]
service { 'quantum-plugin-ovs-service':
@@ -159,4 +164,14 @@ class quantum::agents::ovs (
}
Class[quantum::waistline] -> Service[quantum-plugin-ovs-service]
Package[$ovs_agent_package] -> Service[quantum-plugin-ovs-service]
+
+ service { 'quantum-ovs-agent-cleanup':
+ name => 'quantum-ovs-cleanup',
+ enable => $enabled,
+ ensure => false,
+ hasstatus => false,
+ hasrestart => false,
+ }
+ Service['quantum-plugin-ovs-service'] -> Service['quantum-ovs-agent-cleanup']
+
}
diff --git a/deployment/puppet/quantum/manifests/init.pp b/deployment/puppet/quantum/manifests/init.pp
index 0cee897cd6..0beb5cd971 100644
--- a/deployment/puppet/quantum/manifests/init.pp
+++ b/deployment/puppet/quantum/manifests/init.pp
@@ -101,8 +101,4 @@ class quantum (
}
# SELINUX=permissive
- if !defined(Class['selinux']) and ($::osfamily == 'RedHat') {
- class { 'selinux' : }
- }
-
}
diff --git a/deployment/puppet/quantum/manifests/network/setup.pp b/deployment/puppet/quantum/manifests/network/setup.pp
index eaf1a00830..33957c5b0b 100644
--- a/deployment/puppet/quantum/manifests/network/setup.pp
+++ b/deployment/puppet/quantum/manifests/network/setup.pp
@@ -13,6 +13,7 @@ define quantum::network::setup (
$alloc_pool = undef,
$enable_dhcp = 'True',
$nameservers = undef,
+ $shared = 'False',
) {
Quantum_l3_agent_config <||> ->Quantum_net <||>
@@ -28,6 +29,7 @@ define quantum::network::setup (
network_type => $network_type,
segment_id => $segment_id,
router_ext => $router_external,
+ shared => $shared,
}
# validate allocation pool
diff --git a/deployment/puppet/rsyslog/manifests/config.pp b/deployment/puppet/rsyslog/manifests/config.pp
index e24df2d8d8..3fdebf180f 100644
--- a/deployment/puppet/rsyslog/manifests/config.pp
+++ b/deployment/puppet/rsyslog/manifests/config.pp
@@ -17,6 +17,16 @@ class rsyslog::config {
require => Class["rsyslog::install"],
notify => Class["rsyslog::service"],
}
+
+ file { '/var/lib/rsyslog' :
+ owner => root,
+ group => $::rsyslog::params::run_group,
+ ensure => directory,
+ path => $::rsyslog::params::rsyslog_queues_dir,
+ require => Class["rsyslog::install"],
+ notify => Class["rsyslog::service"],
+ }
+
if $osfamily == "Debian"
{
file { $rsyslog::params::rsyslog_default:
diff --git a/deployment/puppet/rsyslog/manifests/params.pp b/deployment/puppet/rsyslog/manifests/params.pp
index a6ad4a8273..d18f9da216 100644
--- a/deployment/puppet/rsyslog/manifests/params.pp
+++ b/deployment/puppet/rsyslog/manifests/params.pp
@@ -6,6 +6,7 @@ class rsyslog::params {
$package_status = 'latest'
$rsyslog_d = '/etc/rsyslog.d/'
$rsyslog_conf = '/etc/rsyslog.conf'
+ $rsyslog_queues_dir = '/var/lib/rsyslog'
$rsyslog_default = '/etc/default/rsyslog'
$run_user = 'root'
$run_group = 'root'
@@ -22,6 +23,7 @@ class rsyslog::params {
$package_status = 'present'
$rsyslog_d = '/etc/syslog.d/'
$rsyslog_conf = '/etc/syslog.conf'
+ $rsyslog_queues_dir = '/var/lib/rsyslog'
$rsyslog_default = '/etc/defaults/syslogd'
$run_user = 'root'
$run_group = 'wheel'
diff --git a/deployment/puppet/rsyslog/templates/rsyslog.conf.erb b/deployment/puppet/rsyslog/templates/rsyslog.conf.erb
index ff828a3a37..fe8f7e4b9f 100644
--- a/deployment/puppet/rsyslog/templates/rsyslog.conf.erb
+++ b/deployment/puppet/rsyslog/templates/rsyslog.conf.erb
@@ -22,6 +22,25 @@ $DirCreateMode 0755
$PrivDropToUser <%= scope.lookupvar('rsyslog::params::run_user') %>
$PrivDropToGroup <%= scope.lookupvar('rsyslog::params::run_group') %>
+#
+# Disk-Assisted Memory Queues, async writes, no escape chars
+#
+$OMFileASyncWriting on
+#$EscapeControlCharactersOnReceive off
+$MainMsgQueueType LinkedList
+$WorkDirectory <%= scope.lookupvar('rsyslog::params::spool_dir') %>
+$MainMsgQueueFileName mainmsgqueue
+$MainMsgQueueSaveOnShutdown on
+$MainMsgQueueDequeueSlowdown 1000
+$MainMsgQueueWorkerThreads 2
+$MainMsgQueueDequeueBatchSize 128
+$ActionQueueType LinkedList
+$WorkDirectory <%= scope.lookupvar('rsyslog::params::spool_dir') %>
+$ActionQueueFileName acsdbq
+$ActionQueueDequeueSlowdown 1000
+$ActionQueueWorkerThreads 2
+$ActionQueueDequeueBatchSize 128
+
#
# Include all config files in <%= scope.lookupvar('rsyslog::params::rsyslog_d') %>
#
diff --git a/deployment/puppet/swift/lib/puppet/provider/swift_ring_builder.rb b/deployment/puppet/swift/lib/puppet/provider/swift_ring_builder.rb
index 3e5a9340a1..5e4312507c 100644
--- a/deployment/puppet/swift/lib/puppet/provider/swift_ring_builder.rb
+++ b/deployment/puppet/swift/lib/puppet/provider/swift_ring_builder.rb
@@ -14,13 +14,14 @@ class Puppet::Provider::SwiftRingBuilder < Puppet::Provider
if File.exists?(builder_file_path)
if rows = swift_ring_builder(builder_file_path).split("\n")[4..-1]
rows.each do |row|
- if row =~ /^\s+(\d+)\s+(\d+)\s+(\S+)\s+(\d+)\s+(\S+)\s+(\d+\.\d+)\s+(\d+)\s+(-?\d+\.\d+)\s+(\S*)$/
- object_hash["#{$3}:#{$4}"] = {
+ if row =~ /^\s+(\d+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(\d+)\s+(\S+)\s+(\d+\.\d+)\s+(\d+)\s+(-?\d+\.\d+)\s+(\S*)$/
+ object_hash["#{$4}:#{$5}"] = {
:id => $1,
- :zone => $2,
- :partitions => $7,
- :balance => $8,
- :meta => $9
+ :region => $2,
+ :zone => $3,
+ :partitions => $8,
+ :balance => $9,
+ :meta => $10,
}
else
Puppet.warning("Unexpected line: #{row}")
@@ -89,7 +90,7 @@ class Puppet::Provider::SwiftRingBuilder < Puppet::Provider
def used_devs
if devs = swift_ring_builder(builder_file_path).split("\n")[4..-1]
@used_devices = devs.collect do |line|
- line.strip.split(/\s+/)[4] if line.match(/#{resource[:name].split(':')[0]}/)
+ line.strip.split(/\s+/)[5] if line.match(/#{resource[:name].split(':')[0]}/)
end.compact.sort
else
[]
diff --git a/deployment/puppet/swift/manifests/proxy/authtoken.pp b/deployment/puppet/swift/manifests/proxy/authtoken.pp
index 06f4ed7fc0..761554fcf3 100644
--- a/deployment/puppet/swift/manifests/proxy/authtoken.pp
+++ b/deployment/puppet/swift/manifests/proxy/authtoken.pp
@@ -51,7 +51,7 @@ class swift::proxy::authtoken(
auth_host => $auth_host,
auth_port => $auth_port,
auth_protocol => $auth_protocol,
- signing_dir => '/tmp/keystone_signing_swift',
+ signing_dir => '/etc/swift',
}
}
diff --git a/deployment/puppet/swift/templates/proxy/keystone.conf.erb b/deployment/puppet/swift/templates/proxy/keystone.conf.erb
index e1442e9fff..490e4e0f5b 100644
--- a/deployment/puppet/swift/templates/proxy/keystone.conf.erb
+++ b/deployment/puppet/swift/templates/proxy/keystone.conf.erb
@@ -1,6 +1,6 @@
[filter:keystone]
-paste.filter_factory = keystone.middleware.swift_auth:filter_factory
+use = egg:swift#keystoneauth
operator_roles = <%= operator_roles.to_a.join(', ') %>
is_admin = <%= is_admin %>
cache = <%= cache %>
diff --git a/docs/conf.py b/docs/conf.py
index e6607faa81..5be3907ad3 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -48,10 +48,10 @@ copyright = u'2013, Mirantis'
# built documents.
#
# The short X.Y version.
-version = '2.2'
+version = '3.0'
# The full version, including alpha/beta/rc tags.
-release = '2.2'
+release = '3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@@ -185,7 +185,7 @@ latex_elements = {
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'fuel.tex', u'Fuel Documentation',
- u'Nick Bogdanov', 'manual'),
+ u'Mirantis', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
diff --git a/docs/pages/0040-reference-architecture.rst b/docs/pages/0040-reference-architecture.rst
index 6b636f9e8d..979c010e00 100644
--- a/docs/pages/0040-reference-architecture.rst
+++ b/docs/pages/0040-reference-architecture.rst
@@ -10,8 +10,8 @@ Reference Architecture
.. include:: /pages/reference-architecture/0020-logical-setup.rst
.. include:: /pages/reference-architecture/0030-cluster-sizing.rst
.. include:: /pages/reference-architecture/0040-network-setup.rst
-.. include:: /pages/reference-architecture/0010-technical-considerations-overview.rst
+.. include:: /pages/reference-architecture/0050-technical-considerations-overview.rst
.. include:: /pages/reference-architecture/0060-quantum-vs-nova-network.rst
-.. include:: /pages/reference-architecture/0050-cinder-vs-nova-volume.rst
-.. include:: /pages/reference-architecture/0070-swift-notes.rst
+.. include:: /pages/reference-architecture/0070-cinder-vs-nova-volume.rst
+.. include:: /pages/reference-architecture/0080-swift-notes.rst
diff --git a/docs/pages/0050-installation-instructions.rst b/docs/pages/0050-installation-instructions.rst
index 0aacb2ebaf..eaa29ee0e7 100644
--- a/docs/pages/0050-installation-instructions.rst
+++ b/docs/pages/0050-installation-instructions.rst
@@ -10,12 +10,9 @@ Create a multi-node OpenStack cluster using Fuel
.. include:: /pages/installation-instructions/0015-before-you-start.rst
.. include:: /pages/installation-instructions/0020-machines.rst
.. include:: /pages/installation-instructions/0040-installing-configuring-puppet-master.rst
-.. include:: /pages/installation-instructions/0042-installing-the-iso.rst
-.. include:: /pages/installation-instructions/0045-configuring-the-iso.rst
.. include:: /pages/installation-instructions/0050-configuring-cobbler.rst
-.. include:: /pages/installation-instructions/0055-installing-os-using-cobbler.rst
.. include:: /pages/installation-instructions/0057-prepare-for-deployment.rst
-.. include:: /pages/installation-instructions/0060-deploying-openstack.rst
-.. include:: /pages/installation-instructions/0062-orchestration.rst
-.. include:: /pages/installation-instructions/0065-testing-openstack.rst
+.. include:: /pages/installation-instructions/0060-understand-the-manifest.rst
+.. include:: /pages/installation-instructions/0070-orchestration.rst
+.. include:: /pages/installation-instructions/0080-testing-openstack.rst
diff --git a/docs/pages/0058-advanced-configuration.rst b/docs/pages/0058-advanced-configuration.rst
new file mode 100644
index 0000000000..d05d394fa0
--- /dev/null
+++ b/docs/pages/0058-advanced-configuration.rst
@@ -0,0 +1,11 @@
+.. _Production:
+
+Advanced Configuration Topics
+=============================
+
+.. contents:: :local:
+
+.. include:: /pages/advanced-topics/0010-introduction.rst
+.. include:: /pages/advanced-topics/0020-custom-plug-ins.rst
+.. include:: /pages/advanced-topics/0030-quantum-HA.rst
+.. include:: /pages/advanced-topics/0040-bonding.rst
diff --git a/docs/pages/0060-frequently-asked-questions.rst b/docs/pages/0060-frequently-asked-questions.rst
index 291e1fb52b..bae38d7571 100644
--- a/docs/pages/0060-frequently-asked-questions.rst
+++ b/docs/pages/0060-frequently-asked-questions.rst
@@ -11,5 +11,5 @@ Known Issues and Workarounds
.. include:: /pages/frequently-asked-questions/0010-rabbitmq.rst
.. include:: /pages/frequently-asked-questions/0020-galera.rst
.. include:: /pages/frequently-asked-questions/0070-common-technical-issues.rst
-.. include:: /pages/frequently-asked-questions/0020-other-questions.rst
+.. include:: /pages/frequently-asked-questions/0080-other-questions.rst
diff --git a/docs/pages/advanced-topics/0010-introduction.rst b/docs/pages/advanced-topics/0010-introduction.rst
new file mode 100644
index 0000000000..263220873c
--- /dev/null
+++ b/docs/pages/advanced-topics/0010-introduction.rst
@@ -0,0 +1 @@
+This section explains how to perform tasks that go beyond a simple OpenStack cluster, from configuring OpenStack Networking for high-availability to adding your own custom components to your cluster using Fuel.
\ No newline at end of file
diff --git a/docs/pages/advanced-topics/0020-custom-plug-ins.rst b/docs/pages/advanced-topics/0020-custom-plug-ins.rst
new file mode 100644
index 0000000000..b1fe397314
--- /dev/null
+++ b/docs/pages/advanced-topics/0020-custom-plug-ins.rst
@@ -0,0 +1,322 @@
+Adding and configuring custom services
+--------------------------------------
+
+Fuel is designed to help you easily install a standard OpenStack cluster, but what if your cluster is not standard? What if you need services or components that are not included with the standard Fuel distribution? This document is designed to give you all of the information you need in order to add custom services and packages to a Fuel-deployed cluster.
+
+Fuel usage scenarios and how they affect installation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Two basic Fuel usage scenarios exist.
+
+In the first scenario, a deployment engineer takes the Fuel ISO image, deploys the master node, makes necessary changes to configuration files, and then deploys OpenStack. In this scenario, each node gets a clean OpenStack installation.
+
+In the second scenario, the master node and other nodes in the cluster have already been installed, and the deployment engineer has to deploy OpenStack to an existing configuration.
+
+For the purposes of this discussion, the main difference between these two scenarios is that service in the second scenario may be using an operating system that has already been customized; for the clean install of the first scenario, any customizations have to be performed on-the-fly, as part of the deployment.
+
+In most cases, best practices dictate that you deploy and test OpenStack first, and then add any custom services. Fuel works using puppet manifests, so the simplest way to install a new service is to edit the current site.pp file on the Puppet master machine and start an additional deployment paths on the target node.
+
+While that is the ideal means for installing a new service or component, it's not an option in situations in which OpenStack actually requires the new service or component. For example, hardware drivers and management software often must be installed before OpenStack itself. You still, however, have the option to create a separate customized site.pp file and run a deployment pass before installing OpenStack. One advantage to this method is that any version mismatches between the component and OpenStack dependencies should be easy to isolate.
+
+Finally, if this is not an option, you can inject a custom component installation into the existing fuel manifests. If you elect to go this route, you'll need to be aware of software source compatibility issues, as well as installation stages, component versions, incompatible dependencies, and declared resource names.
+
+In short, simple custom component installation may be accomplished by editing the site.pp file, but more complex components should be added as new Fuel components.
+
+Let's look at what you need to know.
+
+Installing the new service along with Fuel
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When it comes to installing your new service or component alongside Fuel, you have several options. How you go about it depends on where in the process the component needs to be available. Let's look at each step and how it can impact your installation.
+
+**Boot the master node**
+
+In most cases, you will be installing the master node from the Fuel ISO. This is a semiautomatic step, and doesn't allow for any custom components. If for some reason you need to install a node at this level, you will need to use the manual Fuel installation procedure.
+
+**Cobbler configuration**
+
+If your customizations need to take place before the install of the operating system, or even as part of the operating system install, you can do them at this step. This is also where you would make customizations to other services. At this level, you are making changes to the operating system kickstart/pre-seed files, and may include any custom software source and components required to install the operating system for a node. Anything that needs to be installed before OpenStack should be configured during this step.
+
+**OpenStack installation**
+
+It is during this step that you perform any Puppet, Astute, or mCollective configuration. In most cases, this means customizing the Puppet site.pp file to add any custom components during the actual OpenStack installation.
+
+This step actually includes several different stages. (In fact, Puppet STDLib defines several additional default stages that fuel does not use.) These stages include:
+
+0. ``Puppetlabs-repo``. mCollective uses this stage to add the Puppetlabs repositories during operating system and Puppet deployment.
+
+1. ``Openstack-custom-repo``. Additional repositories required by OpenStack are configured at this stage. Additionally, to avoid compatibility issues, the Puppetlabs repositories are switched off at this stage. As a general rule, it is a good idea to turn off any unnecessary software repositories defined for operating system installation.
+
+2. ``FUEL``. During this stage, Fuel performs any actions defined for the current operating system.
+
+3. ``Netconfig``. During this stage, Fuel performs all network configuration actions. This means that you should include any custom components that are related to the network in this stage.
+
+4. ``Main``. The actual OpenStack installation process happens during this stage. Install any non-network-related components during this stage or after it.
+
+**Post-OpenStack install**
+
+At this point, OpenStack is installed. You may add any components you like at this point, as long as they don't break OpenStack itself.
+
+Defining a new component
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+In general, we recommend you follow these steps to define a new component:
+
+#. **Custom stages. Optional.**
+
+ Declare a custom stage or stages to help Puppet understand the required installation sequence.
+ Stages are special markers indicating the sequence of actions. Best practice is to use the input parameter Before for every stage, to help define the correct sequence. The default built-in stage is "main". Every Puppet action is automatically assigned to the main stage if no stage is explicitly specified for the action. However, because Fuel installs almost all of OpenStack during the main stage, custom stages may not help, so future plans include breaking the OpenStack installation to several stages.
+
+ Don't forget to take into account other existing stages; training several parallel sequences of stages increases the chances that Puppet will order them in correctly if you do not explicitly specify the order.
+
+ *Example*::
+
+ stage {'Custom stage 1':
+ before => Stage['Custom stage 2'],
+ }
+ stage {'Custom stage 2':
+ before => Stage['main'],
+ }
+
+ Note that there are several limitations to stages, and they should be used with caution and only with the simplest of classes. You can find more information here: http://docs.puppetlabs.com/puppet/2.7/reference/lang_run_stages.html.
+
+#. **Custom repositories. Optional.**
+
+ If the custom component requires a custom software source, you may declare a new repository and add it during one of the early stages of the installation.
+
+#. **Common variable definition**
+
+ It is a good idea to have all common variables defined in a single place. Unlike variables in many other languages, Puppet variables are actually constants, and may be assigned only once inside a given scope.
+
+#. **OS and condition-dependent variable definition**
+
+ It is also a good idea to assign all common operating system or condition-dependent variables to a single location, preferably near the other common variables. Also, be sure to always use a default section when defining conditional operators.
+
+*Example*::
+
+ case $::osfamily {
+ # RedHat in most cases should work for CentOS and Fedora as well
+ 'RedHat': {
+ # List of packages to get from URL/path.
+ # Separate list should be defined for each separate URL!
+ $custom_package_list_from_url = ['qpid-cpp-server-0.14-16.el6.x86_64.rpm']
+ }
+ 'Debian': {
+ # List of packages to get from URL/path.
+ # Separate list should be defined for each separate URL!
+ $custom_package_list_from_url = [ "qpidd_0.14-2_amd64.deb" ]
+ }
+ default: {
+ fail("Module install_custom_package does not support ${::operatingsystem}")
+ }
+ }
+
+#. **Define installation procedures for independent custom components as classes**
+
+ You can think of public classes as singleton collections, or simply as a named block of code with its own namespace. Each class should be defined only once, but every class may be used with different input variable sets. The best practice is to define a separate class for every component, define required sub-classes for sub-components, and include class-dependent required resources within the actual class/subclass.
+
+*Example*::
+
+ class add_custom_service (
+ # Input parameter definitions:
+ # Name of the service to place behind HAProxy. Mandatory.
+ # This name appears as a new HAProxy configuration block in /etc/haproxy/haproxy.cfg.
+ $service_name_in_haproxy_config,
+ $custom_package_download_url,
+ $custom_package_list_from_url,
+ #The list of remaining input parameters
+ ...
+ ) {
+ # HAProxy::params is a container class holding default parameters for the haproxy class. It adds and populates the Global and Default sections in /etc/haproxy/haproxy.cfg.
+ # If you install a custom service over the already deployed HAProxy configuration, it is probably better to comment out the following string:
+ include haproxy::params
+ #Class resources definitions:
+ # Define the list of package names to be installed
+ define install_custom_package_from_url (
+ $custom_package_download_url,
+ $package_provider = undef
+ ) {
+ exec { "download-${name}" :
+ command => "/usr/bin/wget -P/tmp ${custom_package_download_url}/${name}",
+ creates => "/tmp/${name}",
+ } ->
+ install_custom_package { "${name}" :
+ provider => $package_provider,
+ source => "/tmp/${name}",
+ }
+ }
+ define install_custom_package (
+ $package_provider = undef,
+ $package_source = undef
+ ) {
+ package { "custom-${name}" :
+ ensure => present,
+ provider => $package_provider,
+ source => $package_source
+ }
+ }
+
+ #Here we actually install all the packages from a single URL.
+ if is_array($custom_package_list_from_url) {
+ install_custom_package_from_url { $custom_package_list_from_url :
+ provider => $package_provider,
+ custom_package_download_url => $custom_package_download_url,
+ }
+ }
+ }
+
+#. **Target nodes**
+
+ Every component should be explicitly assigned to a particular target node or nodes.
+ To do that, declare the node or nodes within site.pp. When Puppet runs the manifest for each node, it compares each node definition with the name of the current hostname and applies only to classes assigned to the current node. Node definitions may include regular expressions. For example, you can apply the class 'add custom service' to all controller nodes with hostnames fuel-controller-00 to fuel-controller-xxx, where xxx = any integer value using the following definition:
+
+*Example*::
+
+ node /fuel-controller-[\d+]/ {
+ include stdlib
+ class { 'add_custom_service':
+ stage => 'Custom stage 1',
+ service_name_in_haproxy_config => $service_name_in_haproxy_config,
+ custom_package_download_url => $custom_package_download_url,
+ custom_package_list_from_url => $custom_package_list_from_url,
+ }
+ }
+
+Fuel API Reference
+^^^^^^^^^^^^^^^^^^
+
+**add_haproxy_service**
+Location: Top level
+
+As the name suggests, this function enables you to create a new HAProxy service. The service is defined in the ``/etc/haproxy/haproxy.cfg`` file, and generally looks something like this::
+
+ listen keystone-2
+ bind 10.0.74.253:35357
+ bind 10.0.0.110:35357
+ balance roundrobin
+ option httplog
+ server fuel-controller-01.example.com 10.0.0.101:35357 check
+ server fuel-controller-02.example.com 10.0.0.102:35357 check
+
+To accomplish this, you might create a Fuel statement such as::
+
+ add_haproxy_service { 'keystone-2' :
+ order => 30,
+ balancers => {'fuel-controller-01.example.com' => '10.0.0.101',
+ 'fuel-controller-02.example.com' => '10.0.0.102'},
+ virtual_ips => {'10.0.74.253', '10.0.0.110'},
+ port => '35357',
+ haproxy_config_options => { 'option' => ['httplog'], 'balance' => 'roundrobin' },
+ balancer_port => '35357',
+ balancermember_options => 'check',
+ mode => 'tcp',
+ define_cookies => false,
+ define_backend => false,
+ collect_exported => false
+ }
+
+Let's look at how the command works.
+
+**Usage:** ::
+
+ add_haproxy_service { '' :
+ order => $order,
+ balancers => $balancers,
+ virtual_ips => $virtual_ips,
+ port => $port,
+ haproxy_config_options => $haproxy_config_options,
+ balancer_port => $balancer_port,
+ balancermember_options => $balancermember_options,
+ mode => $mode, #Optional. Default is 'tcp'.
+ define_cookies => $define_cookies, #Optional. Default false.
+ define_backend => $define_backend,#Optional. Default false.
+ collect_exported => $collect_exported, #Optional. Default false.
+ }
+
+**Parameters:**
+
+``<'Service name'>``
+
+The name of the new HAProxy listener section. In our example it was ``keystone-2``. If you want to include an IP address or port in the listener name, you have the option to use a name such as::
+
+ 'stats 0.0.0.0:9000 #Listen on all IP's on port 9000'
+
+``order``
+
+This parameter determines the order of the file fragments. It is optional, but we strongly recommend setting it manually.
+Fuel already has several different order values from 1 to 100 hardcoded for HAProxy configuration. So if your HAProxy configuration fragments appear in the wrong places in ``/etc/haproxy/haproxy.cfg``, it is probably because of an incorrect order value. It is safe to set order values greater than 100 in order to place your custom configuration block at the end of ``haproxy.cfg``.
+
+Puppet assembles configuration files from fragments. First it creates several configuration fragments and temporarily stores all of them as separate files. Every fragment has a name such as ``${order}-${fragment_name}``, so the order determines the number of the current fragment in the fragment sequence.
+After all the fragments are created, Puppet reads the fragment names and sorts them in ascending order, concatenating all the fragments in that order. So a fragment with a smaller order value always goes before all fragments with a greater order value.
+
+The ``keystone-2`` fragment from the example above has ``order = 30`` so it's placed after the ``keystone-1`` section (``order = 20``) and the ``nova-api-1`` section (order = 40).
+
+``balancers``
+
+Balancers (or **Backends** in HAProxy terms) are a hash of ``{ "$::hostname" => $::ipaddress }`` values.
+The default is ``{ "" => }``, but that value is set for compatability only, and may not work correctly in HA mode. Instead, the default for HA mode is to explicitly set the Balancers as ::
+
+ Haproxy_service {
+ balancers => $controller_internal_addresses
+ }
+
+which ``$controller_internal_addresses`` representing a hash of all the controllers with a corresponding internal IP address; this value is set in ``site.pp``.
+
+So the ``balancers`` parameter is a list of HAProxy listener balance members (hostnames) with corresponding IP addresses. The following strings from the ``keystone-2`` listener example represent balancers::
+
+ server fuel-controller-01.example.com 10.0.0.101:35357 check
+ server fuel-controller-02.example.com 10.0.0.102:35357 check
+
+Every key pair in the ``balancers`` hash adds a new string to the list of listener section balancers. Different options may be set for every string.
+
+``virtual_ips``
+
+This parameter represents an array of IP addresses (or **Frontends** in HAProxy terms) of the current listener. Every IP address in this array adds a new string to the bind section of the current listeners. The following strings from the ``keystone-2`` listener example represent virtual IPs::
+
+ bind 10.0.74.253:35357
+ bind 10.0.0.110:35357
+
+``port``
+
+This parameters specifies the frontend port for the listeners. Currently you must set the same port frontends.
+The following strings from the ``keystone-2`` listener example represent the frontend port, where the port is 35357::
+
+ bind 10.0.74.253:35357
+ bind 10.0.0.110:35357
+
+``haproxy_config_options``
+
+This parameter represents a hash of key pairs of HAProxy listener options in the form ``{ 'option name' => 'option value' }``. Every key pair from this hash adds a new string to the listener options.
+Please note: Every HAProxy option may require a different input value type, such as strings or a list of multiple options per single string.
+
+The '`keystone-2`` listener example has the ``{ 'option' => ['httplog'], 'balance' => 'roundrobin' }`` option array and this array is represented as the following in the resulting /etc/haproxy/haproxy.cfg:
+balance roundrobin
+option httplog
+
+``balancer_port``
+
+This parameter represents the balancer (backend) port. By default, the balancer_port is the same as the frontend ``port``. The following strings from the ``keystone-2`` listener example represent ``balancer_port``, where port is ``35357``::
+
+ server fuel-controller-01.example.com 10.0.0.101:35357 check
+ server fuel-controller-02.example.com 10.0.0.102:35357 check
+
+``balancermember_options``
+
+This is a string of options added to each balancer (backend) member. The ``keystone-2`` listener example has the single ``check`` option::
+
+ server fuel-controller-01.example.com 10.0.0.101:35357 check
+ server fuel-controller-02.example.com 10.0.0.102:35357 check
+
+``mode``
+
+This optional parameter represents the HAProxy listener mode. The default value is ``tcp``, but Fuel writes ``mode http`` to the defaults section of ``/etc/haproxy/haproxy.cfg``. You can set the same option via ``haproxy_config_options``. A separate mode parameter is required to set some modes by default on every new listener addition. The ``keystone-2`` listener example has no ``mode`` option and so it works in the default Fuel-configured HTTP mode.
+
+``define_cookies``
+
+This optional boolean parameter is a Fuel-only feature. The default is ``false``, but if set to ``true``, Fuel directly adds ``cookie ${hostname}`` to every balance member (backend).
+
+The ``keystone-2`` listener example has no ``define_cookies`` option. Typically, frontend cookies are added with ``haproxy_config_options`` and backend cookies with ``balancermember_options``.
+
+``collect_exported``
+
+This optional boolean parameter has a default value of ``false``. True means 'collect exported @@balancermember resources' (when every balancermember node exports itself), while false means 'rely on the existing declared balancermember resources' (for when you know the full set of balancermembers in advance and use ``haproxy::balancermember`` with array arguments, which allows you to deploy everything in one run).
diff --git a/docs/pages/advanced-topics/0030-quantum-HA.rst b/docs/pages/advanced-topics/0030-quantum-HA.rst
new file mode 100644
index 0000000000..8351f0fdf5
--- /dev/null
+++ b/docs/pages/advanced-topics/0030-quantum-HA.rst
@@ -0,0 +1,7 @@
+OpenStack Networking HA
+-----------------------
+
+Fuel 2.1 introduces support for OpenStack Networking (formerly known as Quantum) in a high-availability configuration. To accomplish this, Fuel uses a combination of Pacemaker and Corosync to ensure that if the networking service goes down, it will be restarted, either on the existing node or on separate node.
+
+This document explains how to configure these options in your own installation.
+
diff --git a/docs/pages/advanced-topics/0040-bonding.rst b/docs/pages/advanced-topics/0040-bonding.rst
new file mode 100644
index 0000000000..acd30c1668
--- /dev/null
+++ b/docs/pages/advanced-topics/0040-bonding.rst
@@ -0,0 +1,283 @@
+L23network
+----------
+
+NOTE: THIS DOCUMENT HAS NOT BEEN EDITED AND IS NOT READY FOR PUBLIC CONSUMPTION.
+
+Puppet module for configuring network interfaces on 2nd and 3rd level (802.1q vlans, access ports, NIC-bonding, assign IP addresses, dhcp, and interfaces without IP addresses).
+
+Can work together with Open vSwitch or standard linux way.
+
+At this moment we support Centos 6.3 (RHEL6) and Ubuntu 12.04 or above.
+
+
+Usage
+^^^^^
+
+Place this module at /etc/puppet/modules or on another path that contains your puppet modules.
+
+Include L23network module and initialize it. I recommend to do it in an early stage::
+
+ #Network configuration
+ stage {'netconfig':
+ before => Stage['main'],
+ }
+ class {'l23network': stage=> 'netconfig'}
+
+If you do not plan to use Open vSwitch -- you can disable it::
+
+ class {'l23network': use_ovs=>false, stage=> 'netconfig'}
+
+
+
+
+L2 network configuation (Open vSwitch only)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Current layout is:
+* *bridges* -- A "Bridge" is a virtual ethernet L2 switch. You can plug ports into it.
+* *ports* -- A Port is an interface you plug into the bridge (switch). It's a virtual. (virtual what?)
+* *interface* -- A physical implementation of port.
+
+Then in your manifest you can either use the things as parameterized classes::
+
+ class {"l23network": }
+
+ l23network::l2::bridge{"br-mgmt": }
+ l23network::l2::port{"eth0": bridge => "br-mgmt"}
+ l23network::l2::port{"mmm0": bridge => "br-mgmt"}
+ l23network::l2::port{"mmm1": bridge => "br-mgmt"}
+
+ l23network::l2::bridge{"br-ex": }
+ l23network::l2::port{"eth0": bridge => "br-ex"}
+ l23network::l2::port{"eth1": bridge => "br-ex", ifname_order_prefix='ovs'}
+ l23network::l2::port{"eee0": bridge => "br-ex", skip_existing => true}
+ l23network::l2::port{"eee1": bridge => "br-ex", type=>'internal'}
+
+You can define type for the port. Port type can be
+'system', 'internal', 'tap', 'gre', 'ipsec_gre', 'capwap', 'patch', 'null'.
+If you do not define type for port (or define '') -- ovs-vsctl will have default behavior
+(see http://openvswitch.org/cgi-bin/ovsman.cgi?page=utilities%2Fovs-vsctl.8).
+
+You can use *skip_existing* option if you do not want to interrupt configuration while adding an existing port or bridge.
+
+
+
+L3 network configuration
+^^^^^^^^^^^^^^^^^^^^^^^^
+ ::
+
+ ### Simple IP address definition, DHCP or address-less interfaces
+ l23network::l3::ifconfig {"eth0": ipaddr=>'192.168.1.1/24'}
+ l23network::l3::ifconfig {"xXxXxXx":
+ interface => 'eth1',
+ ipaddr => '192.168.2.1',
+ netmask => '255.255.255.0'
+ }
+ l23network::l3::ifconfig {"eth2": ipaddr=>'dhcp'}
+ l23network::l3::ifconfig {"eth3": ipaddr=>'none'}
+
+Option *ipaddr* can contains IP address, 'dhcp', or 'none' string. In this example we describe configuration of 4 network interfaces:
+* Interface *eth0* have short CIDR-notated form of IP address definition.
+* Interface *eth1*
+* Interface *eth2* will be configured to use dhcp protocol.
+* Interface *eth3* will be configured as interface without IP address. Often you will need to create "master" interface for 802.1q vlans (in native linux implementation) or as slave interface for bonding.
+
+CIDR-notated form of IP address has more priority, that classic *ipaddr* and *netmask* definition.
+If you omitted *natmask* and did not use CIDR-notated form -- default *netmask* value will be used as '255.255.255.0'.::
+
+ ### Multiple IP addresses for one interface (aliases)
+
+ l23network::l3::ifconfig {"eth0":
+ ipaddr => ['192.168.0.1/24', '192.168.1.1/24', '192.168.2.1/24']
+ }
+
+You can pass a list of CIDR-notated IP addresses to the *ipaddr* parameter to assign many IP addresses to one interface. This will create aliases (not subinterfaces). Array can contain one or more elements. ::
+
+ ### UP and DOWN interface order
+
+ l23network::l3::ifconfig {"eth1":
+ ipaddr=>'192.168.1.1/24'
+ }
+ l23network::l3::ifconfig {"br-ex":
+ ipaddr=>'192.168.10.1/24',
+ ifname_order_prefix='ovs'
+ }
+ l23network::l3::ifconfig {"aaa0":
+ ipaddr=>'192.168.20.1/24',
+ ifname_order_prefix='zzz'
+ }
+
+Centos and Ubuntu (at startup OS) start and configure network interfaces in alphabetical order
+by interface configuration file names. In the example above we change configuration process order by *ifname_order_prefix* keyword. We will have this order::
+
+ ifcfg-eth1
+ ifcfg-ovs-br-ex
+ ifcfg-zzz-aaa0
+
+And OS will configure interfaces br-ex and aaa0 after eth0::
+
+ ### Default gateway
+
+ l23network::l3::ifconfig {"eth1":
+ ipaddr => '192.168.2.5/24',
+ gateway => '192.168.2.1',
+ check_by_ping => '8.8.8.8',
+ check_by_ping_timeout => '30'
+ }
+
+In this example we define default *gateway* and options for waiting so that the network stays up.
+Parameter *check_by_ping* define IP address, that will be pinged. Puppet will be blocked for waiting response for *check_by_ping_timeout* seconds.
+Parameter *check_by_ping* can be IP address, 'gateway', or 'none' string for disabling checking.
+By default gateway will be pinged. ::
+
+ ### DNS-specific options
+
+ l23network::l3::ifconfig {"eth1":
+ ipaddr => '192.168.2.5/24',
+ dns_nameservers => ['8.8.8.8','8.8.4.4'],
+ dns_search => ['aaa.com','bbb.com'],
+ dns_domain => 'qqq.com'
+ }
+
+Also we can specify DNS nameservers, and search list that will be inserted (by resolvconf lib) to /etc/resolv.conf .
+Option *dns_domain* implemented only in Ubuntu. ::
+
+ ### DHCP-specific options
+
+ l23network::l3::ifconfig {"eth2":
+ ipaddr => 'dhcp',
+ dhcp_hostname => 'compute312',
+ dhcp_nowait => false,
+ }
+
+
+
+Bonding
+^^^^^^^
+
+### Using standard linux bond (ifenslave)
+For bonding two interfaces you need to:
+* Specify these interfaces as interfaces without IP addresses
+* Specify that the interfaces depend on the master-bond-interface
+* Assign IP address to the master-bond-interface.
+* Specify bond-specific properties for master-bond-interface (if defaults are not suitable for you)
+
+for example (defaults included)::
+
+ l23network::l3::ifconfig {'eth1': ipaddr=>'none', bond_master=>'bond0'} ->
+ l23network::l3::ifconfig {'eth2': ipaddr=>'none', bond_master=>'bond0'} ->
+ l23network::l3::ifconfig {'bond0':
+ ipaddr => '192.168.232.1',
+ netmask => '255.255.255.0',
+ bond_mode => 0,
+ bond_miimon => 100,
+ bond_lacp_rate => 1,
+ }
+
+
+More information about bonding network interfaces you can get in manuals for your operating system:
+* https://help.ubuntu.com/community/UbuntuBonding
+* http://wiki.centos.org/TipsAndTricks/BondingInterfaces
+
+### Using Open vSwitch
+For bonding two interfaces you need:
+* Specify OVS bridge
+* Specify special resource "bond" and add it to bridge. Specify bond-specific parameters.
+* Assign IP address to the newly-created network interface (if needed).
+
+In this example we add "eth1" and "eth2" interfaces to bridge "bridge0" as bond "bond1". ::
+
+ l23network::l2::bridge{'bridge0': } ->
+ l23network::l2::bond{'bond1':
+ bridge => 'bridge0',
+ ports => ['eth1', 'eth2'],
+ properties => [
+ 'lacp=active',
+ 'other_config:lacp-time=fast'
+ ],
+ } ->
+ l23network::l3::ifconfig {'bond1':
+ ipaddr => '192.168.232.1',
+ netmask => '255.255.255.0',
+ }
+
+Open vSwitch provides lot of parameters for different configurations.
+We can specify them in the "properties" option as a list of parameter=value
+(or parameter:key=value) strings.
+The most of them you can see in [open vSwitch documentation page](http://openvswitch.org/support/).
+
+
+
+802.1q vlan access ports
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+### Using standard linux way
+We can use tagged vlans over ordinary network interfaces (or over bonds).
+L23networks support two variants of naming vlan interfaces:
+* *vlanXXX* -- 802.1q tag gives from the vlan interface name, but you need to specify
+parent interface name in the **vlandev** parameter.
+* *eth0.101* -- 802.1q tag and parent interface name gives from the vlan interface name
+
+If you need to use 802.1q vlans over bonds -- you can use only the first variant.
+
+In this example we can see both variants: ::
+
+ l23network::l3::ifconfig {'vlan6':
+ ipaddr => '192.168.6.1',
+ netmask => '255.255.255.0',
+ vlandev => 'bond0',
+ }
+ l23network::l3::ifconfig {'vlan5':
+ ipaddr => 'none',
+ vlandev => 'bond0',
+ }
+ L23network:L3:Ifconfig['bond0'] -> L23network:L3:Ifconfig['vlan6'] -> L23network:L3:Ifconfig['vlan5']
+
+ l23network::l3::ifconfig {'eth0':
+ ipaddr => '192.168.0.5',
+ netmask => '255.255.255.0',
+ gateway => '192.168.0.1',
+ } ->
+ l23network::l3::ifconfig {'eth0.101':
+ ipaddr => '192.168.101.1',
+ netmask => '255.255.255.0',
+ } ->
+ l23network::l3::ifconfig {'eth0.102':
+ ipaddr => 'none',
+ }
+
+### Using Open vSwitch
+In the Open vSwitch all internal traffic is virtually tagged.
+For creating the 802.1q tagged access port you need to specify vlan tag when adding a port to a bridge.
+In this example we create two ports with tags 10 and 20, and assign an IP address to interface with tag 10::
+
+ l23network::l2::bridge{'bridge0': } ->
+ l23network::l2::port{'vl10':
+ bridge => 'bridge0',
+ type => 'internal',
+ port_properties => [
+ 'tag=10'
+ ],
+ } ->
+ l23network::l2::port{'vl20':
+ bridge => 'bridge0',
+ type => 'internal',
+ port_properties => [
+ 'tag=20'
+ ],
+ } ->
+ l23network::l3::ifconfig {'vl10':
+ ipaddr => '192.168.101.1/24',
+ } ->
+ l23network::l3::ifconfig {'vl20':
+ ipaddr => 'none',
+ }
+
+Information about vlans in open vSwitch you can get in [open vSwitch documentation page](http://openvswitch.org/support/config-cookbooks/vlan-configuration-cookbook/).
+
+**IMPORTANT:** You can't use vlan interface names like vlanXXX if you do not want double-tagging of your network traffic.
+
+---
+When I began to write this module, I checked https://github.com/ekarlso/puppet-vswitch. Elcarso, big thanks...
+
+
diff --git a/docs/pages/creating-fuel-pm/0010-creating-fuel-pm-from-scratch.rst b/docs/pages/creating-fuel-pm/0010-creating-fuel-pm-from-scratch.rst
index 4806ffeefb..356dcce222 100644
--- a/docs/pages/creating-fuel-pm/0010-creating-fuel-pm-from-scratch.rst
+++ b/docs/pages/creating-fuel-pm/0010-creating-fuel-pm-from-scratch.rst
@@ -1,8 +1,4 @@
-If you already have Puppet Master installed, you can skip this
-installation step and go directly to :ref:`Installing the OS Using Fuel `.
-
-
Installing Puppet Master is a one-time procedure for the entire
infrastructure. Once done, Puppet Master will act as a single point of
@@ -13,7 +9,41 @@ these installation steps again.
Initial Setup
-------------
-For VirtualBox, follow these steps to create the virtual hardware:
+On VirtualBox (https://www.virtualbox.org/wiki/Downloads), please create or make sure the following
+hostonly adapters exist and are configured correctly:
+
+* VirtualBox -> File -> Preferences...
+
+ * Network -> Add HostOnly Adapter (vboxnet0)
+
+ * IPv4 Address: 10.0.0.1
+ * IPv4 Network Mask: 255.255.255.0
+ * DHCP server: disabled
+
+ * Network -> Add HostOnly Adapter (vboxnet1)
+
+ * IPv4 Address: 10.0.1.1
+ * IPv4 Network Mask: 255.255.255.0
+ * DHCP server: disabled
+
+ * Network -> Add HostOnly Adapter (vboxnet2)
+
+ * IPv4 Address: 0.0.0.0
+ * IPv4 Network Mask: 255.255.255.0
+ * DHCP server: disabled
+
+In this example, only the first two adapters will be used, but you can choose to use the third to handle your storage network traffic.
+
+After creating these interfaces, reboot the host machine to make sure that
+DHCP isn't running in the background.
+
+Installing on Windows isn't recommended, but if you're attempting it,
+you will also need to set up the IP address & network mask under
+Control Panel > Network and Internet > Network and Sharing Center for the
+Virtual HostOnly Network adapter.
+
+
+Next, follow these steps to create the virtual hardware:
* Machine -> New
@@ -29,16 +59,13 @@ For VirtualBox, follow these steps to create the virtual hardware:
* Machine -> Settings -> Network
+ * Adapter 1
-
- * Adapter 1
* Enable Network Adapter
* Attached to: Host-only Adapter
* Name: vboxnet0
-
-
- * Adapter 2
+ * Adapter 2
* Enable Network Adapter
* Attached to: Bridged Adapter
* Name: eth0 (or whichever physical network has your internet connection)
@@ -50,13 +77,12 @@ OS Installation
---------------
- * Pick and download an operating system image. This image will be used as the base OS for the Puppet master node. These insructions assume that you are using CentOS 6.3, but you can also use Ubuntu 12.04 or RHEL 6.3.
+ * Pick and download an operating system image. This image will be used as the base OS for the Puppet master node. These insructions assume that you are using CentOS 6.4, but you can also use Ubuntu 12.04.
- **PLEASE NOTE**: These are the only operating systems on which Fuel has been certified. Using other operating systems can, and in many cases will, produce unpredictable results.
+ **PLEASE NOTE**: These are the only operating systems on which Fuel 3.0 has been certified. Using other operating systems can, and in many cases will, produce unpredictable results.
-
-
- * `CentOS 6.3 `_: download CentOS-6.3-x86_64-minimal.iso
+ * `CentOS 6.4 `_: download CentOS-6.4-x86_64-minimal.iso
+ * `Ubuntu 12.04 Precise Pangolin `_: download the Ubuntu Minimal CD
* Mount the downloaded ISO to the machine's CD/DVD drive. In case of VirtualBox, mount it to the fuel-pm virtual machine:
@@ -72,16 +98,13 @@ OS Installation
* Boot the server (or VM) from the CD/DVD drive and install the chosen OS. Be sure to choose the root password carefully.
-
-
-
* Set up the eth0 interface. This interface will be used for communication between the Puppet Master and Puppet clients, as well as for Cobbler.
- ``vi/etc/sysconfig/network-scripts/ifcfg-eth0``::
+ ``vi /etc/sysconfig/network-scripts/ifcfg-eth0``::
DEVICE="eth0"
BOOTPROTO="static"
- IPADDR="10.20.0.100"
+ IPADDR="10.0.0.100"
NETMASK="255.255.255.0"
ONBOOT="yes"
TYPE="Ethernet"
@@ -116,7 +139,7 @@ OS Installation
* Add DNS for Internet hostnames resolution::
- vi/etc/resolv.conf
+ vi /etc/resolv.conf
@@ -130,7 +153,7 @@ OS Installation
* Check that a ping to your host machine works. This means that the management segment is available::
- ping 10.20.0.1
+ ping 10.0.0.1
@@ -147,30 +170,34 @@ OS Installation
* Next, set up the packages repository:
+ ``vi /etc/yum.repos.d/puppet.repo``::
-
-
- ``vi/etc/yum.repos.d/puppet.repo``::
+ [puppetlabs-dependencies]
+ name=Puppet Labs Dependencies
+ baseurl=http://yum.puppetlabs.com/el/$releasever/dependencies/$basearch/
+ enabled=1
+ gpgcheck=0
[puppetlabs]
name=Puppet Labs Packages
baseurl=http://yum.puppetlabs.com/el/$releasever/products/$basearch/
- enabled=1 gpgcheck=1 gpgkey=http://yum.puppetlabs.com/RPM-GPG-KEY-puppetlabs
-
-
-
+ enabled=1
+ gpgcheck=0
* Install Puppet Master::
-
- rpm -Uvh http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
+ rpm -Uvh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum upgrade
yum install puppet-server-2.7.19
- service puppetmaster
- start chkconfig puppetmaster on
+ service puppetmaster start
+ chkconfig puppetmaster on
service iptables stop
chkconfig iptables off
+ * Install PuppetDB::
+
+ yum install puppetdb puppetdb-terminus
+ chkconfig puppetdb on
diff --git a/docs/pages/creating-fuel-pm/0045-configuring-fuel-pm.rst b/docs/pages/creating-fuel-pm/0045-configuring-fuel-pm.rst
index 582c6b7d3c..bb50e0c109 100644
--- a/docs/pages/creating-fuel-pm/0045-configuring-fuel-pm.rst
+++ b/docs/pages/creating-fuel-pm/0045-configuring-fuel-pm.rst
@@ -106,7 +106,7 @@ Puppet to use a technique called stored configuration.
-* Finally, set up SSL for PuppetDB and restart the puppetmaster and puppetdb services::
+* Set up SSL for PuppetDB and restart the puppetmaster and puppetdb services::
service puppetmaster restart
@@ -116,18 +116,31 @@ Puppet to use a technique called stored configuration.
+* Finally, if you are planning to install Cobbler on the Puppet Master node as well (as we are in this example), make configuration changes on the Puppet Master so that it actually knows how to provision software onto itself:
+
+ ``vi /etc/puppet/puppet.conf``::
+
+
+ [main]
+ # server
+ server = fuel-pm.localdomain
+
+ # enable plugin sync
+ pluginsync = true
+
* **IMPORTANT**: Note that while these operations appear to finish quickly, it can actually take several minutes for puppetdb to complete its startup process. You'll know it has finished starting up when you can successfully telnet to port 8081::
- telnet pm.localdomain 8081
+ yum install telnet
+ telnet fuel-pm.localdomain 8081
Testing Puppet
^^^^^^^^^^^^^^
-Put a simple configuration into Puppet -- replace localdomain
-with your domain name -- so that when you run puppet on various nodes,
-it will display the appropriate Hello world message:
+
+Add a simple configuration to Puppet so that when you run puppet on various nodes,
+it will display a "Hello world" message:
``vi /etc/puppet/manifests/site.pp``::
@@ -135,38 +148,9 @@ it will display the appropriate Hello world message:
node /fuel-pm.localdomain/ {
notify{"Hello world from fuel-pm": }
}
- node /fuel-controller-01.localdomain/ {
- notify{"Hello world from fuel-controller-01": }
- }
- node /fuel-controller-02.localdomain/ {
- notify{"Hello world from fuel-controller-02": }
- }
- node /fuel-controller-03.localdomain/ {
- notify{"Hello world from fuel-controller-03": }
- }
- node /fuel-compute-01.localdomain/ {
- notify{"Hello world from fuel-compute-01": }
- }
-If you are planning to install Cobbler on the Puppet Master node as
-well (as we are in this example), make configuration changes on the
-Puppet Master so that it actually knows how to provision software onto
-itself (replace your-domain-name. com with your domain name):
-
-
-
-``vi /etc/puppet/puppet.conf``::
-
-
- [main]
- # server
- server = fuel-pm.localdomain
-
- # enable plugin sync
- pluginsync = true
-
Finally, to make sure everything is working properly, run puppet agent
and to see the ``Hello World from fuel-pm`` output::
diff --git a/docs/pages/creating-fuel-pm/0050-installing-configuring-cobbler.rst b/docs/pages/creating-fuel-pm/0050-installing-configuring-cobbler.rst
index ac9b4e00b4..11561b348b 100644
--- a/docs/pages/creating-fuel-pm/0050-installing-configuring-cobbler.rst
+++ b/docs/pages/creating-fuel-pm/0050-installing-configuring-cobbler.rst
@@ -1,4 +1,4 @@
-Installing & Configuring Cobbler
+Installing Fuel and Cobbler
--------------------------------
Cobbler performs bare metal provisioning and initial installation of
diff --git a/docs/pages/creating-fuel-pm/0090-creating-fuel-pm-from-scratch-DISPLAYSTUB.rst b/docs/pages/creating-fuel-pm/0090-creating-fuel-pm-from-scratch-DISPLAYSTUB.rst
new file mode 100644
index 0000000000..51247c8574
--- /dev/null
+++ b/docs/pages/creating-fuel-pm/0090-creating-fuel-pm-from-scratch-DISPLAYSTUB.rst
@@ -0,0 +1,4 @@
+.. include:: /pages/creating-fuel-pm/0010-creating-fuel-pm-from-scratch.rst
+.. include:: /pages/creating-fuel-pm/0045-configuring-fuel-pm.rst
+.. include:: /pages/creating-fuel-pm/0050-installing-configuring-cobbler.rst
+.. include:: /pages/creating-fuel-pm/0060-register-with-fuel.rst
diff --git a/docs/pages/frequently-asked-questions/0000-technical-DISPLAYSTUB.rst b/docs/pages/frequently-asked-questions/0000-technical-DISPLAYSTUB.rst
new file mode 100644
index 0000000000..e67a483311
--- /dev/null
+++ b/docs/pages/frequently-asked-questions/0000-technical-DISPLAYSTUB.rst
@@ -0,0 +1,6 @@
+Known Issues and Workarounds
+----------------------------
+
+.. include:: /pages/frequently-asked-questions/0010-rabbitmq.rst
+.. include:: /pages/frequently-asked-questions/0020-galera.rst
+
diff --git a/docs/pages/frequently-asked-questions/0070-common-technical-issues.rst b/docs/pages/frequently-asked-questions/0070-common-technical-issues.rst
index b57f5298e6..e5f3429e1d 100644
--- a/docs/pages/frequently-asked-questions/0070-common-technical-issues.rst
+++ b/docs/pages/frequently-asked-questions/0070-common-technical-issues.rst
@@ -125,3 +125,12 @@ In most casts, Fuel creates the XFS partition for you. If for some reason you n
noatime,nodiratime,nobarrier,logbufs=8 0 0" >> /etc/fstab
mount -a
+
+Redeploying a node from scratch
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Compute and Cinder nodes in an HA configuration and controller in any configuration cannot be redeployed without completely redeploying the cluster. However, in a non-HA situation you can redeploy a compute or Cinder node. Simply follow these steps:
+
+#. Remove the certificate for the node by executing the command ``puppet cert clean `` on fuel-pm.
+#. Re-boot the node over the network so it can be picked up by cobbler.
+#. Run the puppet agent on the target node using ``puppet agent --test``.
diff --git a/docs/pages/frequently-asked-questions/0020-other-questions.rst b/docs/pages/frequently-asked-questions/0080-other-questions.rst
similarity index 100%
rename from docs/pages/frequently-asked-questions/0020-other-questions.rst
rename to docs/pages/frequently-asked-questions/0080-other-questions.rst
diff --git a/docs/pages/installation-instructions/0010-introduction.rst b/docs/pages/installation-instructions/0010-introduction.rst
index 7420c0a501..0e8c284a1d 100644
--- a/docs/pages/installation-instructions/0010-introduction.rst
+++ b/docs/pages/installation-instructions/0010-introduction.rst
@@ -1,13 +1,13 @@
How installation works
----------------------
-While version 2.0 of Fuel provided the ability to simplify installation of OpenStack, version 2.1 includes orchestration capabilities that simplify deployment an OpenStack cluster. The deployment process follows this general procedure:
+While version 2.0 of Fuel provided the ability to simplify installation of OpenStack, versions 2.1 and above include orchestration capabilities that simplify deployment an OpenStack cluster. The deployment process follows this general procedure:
#. Design your architecture.
#. Install Fuel onto the fuel-pm machine.
#. Configure Fuel.
#. Create the basic configuration and load it into Cobbler.
-#. PXE-boot the servers so Cobbler can install the operating system.
+#. PXE-boot the servers so Cobbler can install the operating system and prepare them for orchestration.
#. Use Fuel's included templates and the configuration to populate Puppet's site.pp file.
#. Customize the site.pp file if necessary.
#. Use the orchestrator to coordinate the installation of the appropriate OpenStack components on each node.
diff --git a/docs/pages/installation-instructions/0015-before-you-start.rst b/docs/pages/installation-instructions/0015-before-you-start.rst
index 6107002fd5..d2d61bb738 100644
--- a/docs/pages/installation-instructions/0015-before-you-start.rst
+++ b/docs/pages/installation-instructions/0015-before-you-start.rst
@@ -5,18 +5,19 @@ Before you start
Before you begin your installation, you will need to make a number of important
decisions:
-* **OpenStack features.** You must choose which of the optional OpenStack features you want. For example, you must decide whether you want to install Swift, whether you want Glance to use Swift for image storage, whether you want Cinder for block storage, and whether you want nova-network or Quantum to handle your network connectivity. In the case of this example, we will be installing Swift, and Glance will be using it. We'll also be using Cinder for block storage. Because it can be easily installed using orchestration, we will also be using Quantum.
+* **OpenStack features.** Your first decision is which of the optional OpenStack features you want. For example, you must decide whether you want to install Swift, whether you want Glance to use Swift for image storage, whether you want Cinder for block storage, and whether you want nova-network or Quantum to handle your network connectivity. In the case of this example, we will be installing Swift, and Glance will be using it. We'll also be using Cinder for block storage. Because it can be easily installed using orchestration, we will also be using Quantum.
-* **Deployment configuration.** The first decision is whether your deployment requires high availability. If you do choose to do an HA deployment, you have a choice regarding the number of controllers you want to have. Following the recommendations in the previous section for a typical HA deployment configuration, we will use 3 OpenStack controllers.
+* **Deployment configuration.** Next you need to decide whether your deployment requires high availability. If you do choose to do an HA deployment, you have a choice regarding the number of controllers you want to include. Following the recommendations in the previous section for a typical HA deployment configuration, we will use 3 OpenStack controllers.
* **Cobbler server and Puppet Master.** The heart of a Fuel install is the combination of Puppet Master and Cobbler used to create your resources. Although Cobbler and Puppet Master can be installed on separate machines, it is common practice to install both on a single machine for small to medium size clouds, and that's what we'll be doing in this example. (By default, the Fuel ISO creates a single server with both services.)
-* **Domain name.** Puppet clients generate a Certificate Signing Request (CSR), which is then signed by Puppet Master. The signed certificate can then be used to authenticate the client during provisioning. Certificate generation requires a fully qualified hostname, so you must choose a domain name to be used in your installation. We'll leave this up to you.
-* **Network addresses.** OpenStack requires a minimum of three networks. If you are deploying on physical hardware two of them -- the public network and the internal, or management network -- must be routable in your networking infrastructure. Also, if you intend for your cluster to be accessible from the Internet, you'll want the public network to be on the proper network segment. For simplicity in this case, this example assumes an Ineternet router at 192.168.0.1. Additionally, a set of private network addresses should be selected for automatic assignment to guest VMs. (These are fixed IPs for the private network). In our case, we are allocating network addresses as follows:
+* **Domain name.** Puppet clients generate a Certificate Signing Request (CSR), which is then signed by Puppet Master. The signed certificate can then be used to authenticate the client during provisioning. Certificate generation requires a fully qualified hostname, so you must choose a domain name to be used in your installation. Future versions of Fuel will enable you to choose this domain name on your own; by default, Fuel 3.0 uses ``localdomain``.
+
+* **Network addresses.** OpenStack requires a minimum of three networks. If you are deploying on physical hardware, two of them -- the public network and the internal, or management network -- must be routable in your networking infrastructure. Also, if you intend for your cluster to be accessible from the Internet, you'll want the public network to be on the proper network segment. For simplicity in this case, this example assumes an Internet router at 192.168.0.1. Additionally, a set of private network addresses should be selected for automatic assignment to guest VMs. (These are fixed IPs for the private network). In our case, we are allocating network addresses as follows:
* Public network: 192.168.0.0/24
- * Internal network: 10.20.0.0/24
- * Private network: 10.20.1.0/24
+ * Internal network: 10.0.0.0/24
+ * Private network: 10.0.1.0/24
* **Network interfaces.** All of those networks need to be assigned to the available NIC cards on the allocated machines. Additionally, if a fourth NIC is available, Cinder or block storage traffic can also be separated and delegated to the fourth NIC. In our case, we're assigning networks as follows:
diff --git a/docs/pages/installation-instructions/0020-machines.rst b/docs/pages/installation-instructions/0020-machines.rst
index 8d5779e487..9cbc725e98 100644
--- a/docs/pages/installation-instructions/0020-machines.rst
+++ b/docs/pages/installation-instructions/0020-machines.rst
@@ -1,5 +1,5 @@
-Infrastructure allocation
--------------------------
+Infrastructure allocation and installation
+------------------------------------------
The next step is to make sure that you have all of the required
hardware and software in place.
@@ -10,54 +10,69 @@ Software
You can download the latest release of the Fuel ISO from http://fuel.mirantis.com/your-downloads/.
-Alternatively, if you can't use the pre-built ISO, Mirantis also offers the Fuel Library as a tar.gz file downloadable from `Downloads `_ section of the Fuel portal.
+Alternatively, if you can't use the pre-built ISO, Mirantis also offers the Fuel Library as a tar.gz file downloadable from `Downloads `_ section of the Fuel portal. Using this file requires a bit more manual effort, but will yeild the same results as using the ISO.
-Hardware for a virtual installation
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Network setup
+^^^^^^^^^^^^^
-For a virtual installation, you need only a single machine. You can get
-by on 8GB of RAM, but 16GB will be better.
-
-To actually perform the
-installation, you need a way to create Virtual Machines. This guide
-assumes that you are using version 4.2.6 of VirtualBox, which you can download from
-
-https://www.virtualbox.org/wiki/Downloads
-
-Make sure to also install the Extension Pack.
-
-You'll need to run VirtualBox on a stable host system. Mac OS 10.7.x,
-CentOS 6.3, or Ubuntu 12.04 are preferred; results in other operating
-systems are unpredictable.
-
-You will need to allocate the following resources:
-
-* 1 server to host both Puppet Master and Cobbler. The minimum configuration for this server is:
-
- * 32-bit or 64-bit architecture
- * 1+ CPU or vCPU
- * 1024+ MB of RAM
- * 16+ GB of HDD for OS, and Linux distro storage
-
-* 3 servers to act as OpenStack controllers (called fuel-controller-01, fuel-controller-02, and fuel-controller-03). The minimum configuration for a controller in Compact mode is:
-
- * 64-bit architecture
- * 1+ CPU 1024+ MB of RAM
- * 8+ GB of HDD for base OS
- * 10+ GB of HDD for Swift
-
-* 1 server to act as the OpenStack compute node (called fuel-compute-01). The minimum configuration for a compute node with Cinder deployed on it is:
- * 64-bit architecture
- * 2048+ MB of RAM
- * 50+ GB of HDD for OS, instances, and ephemeral storage
- * 50+ GB of HDD for Cinder
-
-Instructions for creating these resources will be provided in :ref:`Installing the OS using Fuel `.
+OpenStack requires a minimum of three distinct networks: internal (or
+management), public, and private. The simplest and best mapping is to
+assign each network to a different physical interface. However, not
+all machines have three NICs, and OpenStack can be configured and
+deployed with only two physical NICs, collapsing the internal and
+public traffic onto a single NIC.
-Hardware for a physical infrastructure installation
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you are deploying to a simulation environment, however, it makes
+sense to just allocate three NICs to each VM in your OpenStack
+infrastructure, one each for the internal, public, and private networks respectively.
+
+
+
+Finally, we must assign network ranges to the internal, public, and private
+networks, and ip addresses to fuel-pm, fuel-controllers, and fuel-compute nodes. For a real deployment using physical infrastructure you must work with your IT department to determine which IPs to use, but
+for the purposes of this exercise we will assume the below network and
+ip assignments:
+
+
+#. 10.0.0.0/24: management or internal network, for communication between Puppet master and Puppet clients, as well as PXE/TFTP/DHCP for Cobbler.
+#. 192.168.0.0/24: public network, for the High Availability (HA) Virtual IP (VIP), as well as floating IPs assigned to OpenStack guest VMs
+#. 10.0.1.0/24: private network, fixed IPs automatically assigned to guest VMs by OpenStack upon their creation
+
+
+
+
+Next we need to allocate a static IP address from the internal network
+to eth0 for fuel-pm, and eth1 for the controller, compute, and (if necessary) quantum
+nodes. For High Availability (HA) we must choose and assign an IP
+address from the public network to HAProxy running on the controllers.
+You can configure network addresses/network mask according to your
+needs, but our instructions will assume the following network settings
+on the interfaces:
+
+
+
+#. eth0: internal management network, where each machine will have a static IP address
+
+ * 10.0.0.100 for Puppet Master
+ * 10.0.0.101-10.0.0.103 for the controller nodes
+ * 10.0.0.110-10.0.0.126 for the compute nodes
+ * 10.0.0.10 internal Virtual IP for component access
+ * 255.255.255.0 network mask
+
+#. eth1: public network
+
+ * 192.168.0.10 public Virtual IP for access to the Horizon GUI (OpenStack management interface)
+
+#. eth2: for communication between OpenStack VMs without IP address with promiscuous mode enabled.
+
+
+
+
+Physical installation infrastructure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The amount of hardware necessary for an installation depends on the
choices you have made above. This sample installation requires the
@@ -74,7 +89,7 @@ following hardware:
* 64-bit architecture
* 1+ CPU
- * 1024+ MB of RAM
+ * 1024+ MB of RAM (2048+ MB preferred)
* 400+ GB of HDD
* 1 server to act as the OpenStack compute node (called fuel-compute-01). The minimum configuration for a compute node with Cinder deployed on it is:
@@ -88,14 +103,121 @@ following hardware:
additional server with specifications comparable to the controller
nodes.)
+Make sure your hardware is capable of PXE booting over the network from Cobbler. You'll also need each server's mac addresses.
+
+
For a list of certified hardware configurations, please `contact the
Mirantis Services team `_.
-Providing the OpenStack nodes
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Virtual installation infrastructure
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+For a virtual installation, you need only a single machine. You can get
+by on 8GB of RAM, but 16GB will be better.
+
+To actually perform the
+installation, you need a way to create Virtual Machines. This guide
+assumes that you are using version 4.2.12 of VirtualBox, which you can download from
+
+https://www.virtualbox.org/wiki/Downloads
+
+Make sure to also install the Extension Pack.
+
+You'll need to run VirtualBox on a stable host system. Mac OS 10.7.x,
+CentOS 6.3+, or Ubuntu 12.04 are preferred; results in other operating
+systems are unpredictable.
+
+
+Configuring VirtualBox
+++++++++++++++++++++++
+
+If you are on VirtualBox, please create or make sure the following
+hostonly adapters exist and are configured correctly:
+
+* VirtualBox -> File -> Preferences...
+
+ * Network -> Add HostOnly Adapter (vboxnet0)
+
+ * IPv4 Address: 10.0.0.1
+ * IPv4 Network Mask: 255.255.255.0
+ * DHCP server: disabled
+
+ * Network -> Add HostOnly Adapter (vboxnet1)
+
+ * IPv4 Address: 10.0.1.1
+ * IPv4 Network Mask: 255.255.255.0
+ * DHCP server: disabled
+
+ * Network -> Add HostOnly Adapter (vboxnet2)
+
+ * IPv4 Address: 0.0.0.0
+ * IPv4 Network Mask: 255.255.255.0
+ * DHCP server: disabled
+
+In this example, only the first two adapters will be used, but you can choose to use the third to handle your storage network traffic.
+
+After creating these interfaces, reboot the host machine to make sure that
+DHCP isn't running in the background.
+
+Installing on Windows isn't recommended, but if you're attempting it,
+you will also need to set up the IP address & network mask under
+Control Panel > Network and Internet > Network and Sharing Center for the
+Virtual HostOnly Network adapter.
+
+
+
+Creating fuel-pm
+++++++++++++++++
+
+The process of creating a virtual machine to host Fuel in VirtualBox depends on
+whether your deployment is purely virtual or consists of a physical or virtual
+fuel-pm controlling physical hardware. If your deployment is purely
+virtual then Adapter 1 may be a Hostonly adapter attached to
+vboxnet0, but if your deployment infrastructure consists of a virtual
+fuel-pm controlling physical machines, Adapter 1 must be a Bridged
+Adapter, connected to whatever network interface of the host machine
+is connected to your physical machines.
+
+To create fuel-pm, start up VirtualBox and create a new machine as follows:
+
+* Machine -> New...
+
+ * Name: fuel-pm
+ * Type: Linux
+ * Version: Red Hat (64 Bit)
+ * Memory: 2048 MB
+ * Drive space: 16 GB HDD
+
+* Machine -> Settings... -> Network
+
+ * Adapter 1
+
+ * Physical network
+ * Enable Network Adapter
+ * Attached to: Bridged Adapter
+ * Name: The host machine's network with access to the network on which the physical machines reside
+ * VirtualBox installation
+ * Enable Network Adapter
+ * Attached to: Hostonly Adapter
+ * Name: vboxnet0
+
+ * Adapter 2
+
+ * Enable Network Adapter
+ * Attached to: Bridged Adapter
+ * Name: eth0 (or whichever physical network is attached to the Internet)
+
+* Machine -> Storage
+
+ * Attach the downloaded ISO as a drive
+
+If you can't (or would rather not) install from the ISO, you can find instructions for installing from the Fuel Library in :ref:`Appendix A `.
+
+
+
+Creating the OpenStack nodes
+++++++++++++++++++++++++++++
-If you are using hardware, make sure it is capable of PXE booting over
-the network from Cobbler. You'll also need each server's mac address.
@@ -119,11 +241,11 @@ record the corresponding mac address.
* Name: fuel-controller-01 (you will need to repeat these steps for fuel-controller-02, fuel-controller-03, and fuel-compute-01)
* Type: Linux
* Version: Red Hat (64 Bit)
- * Memory: 1024MB
+ * Memory: 2048MB
+ * Drive space: 8GB
-
-* Machine -> System -> Motherboard...
+* Machine -> Settings -> System
* Check Network in Boot sequence
@@ -131,38 +253,28 @@ record the corresponding mac address.
* Controller: SATA
- * Click the Add icon at the bottom of the Storage Tree pane
+ * Click the Add icon at the bottom of the Storage Tree pane and choose Add Disk
* Add a second VDI disk of 10GB for storage
-* Machine -> Settings... -> Network
-
-
+* Machine -> Settings -> Network
* Adapter 1
-
-
* Enable Network Adapter
* Attached to: Hostonly Adapter
* Name: vboxnet0
-
-
* Adapter 2
-
-
* Enable Network Adapter
* Attached to: Bridged Adapter
* Name: eth0 (physical network attached to the Internet. You can also use a gateway.)
-
-
* Adapter 3
* Enable Network Adapter
* Attached to: Hostonly Adapter
- * Name: vboxnet2
+ * Name: vboxnet1
* Advanced -> Promiscuous mode: Allow All
diff --git a/docs/pages/installation-instructions/0040-installing-configuring-puppet-master.rst b/docs/pages/installation-instructions/0040-installing-configuring-puppet-master.rst
index ded1c94335..e4713fa221 100644
--- a/docs/pages/installation-instructions/0040-installing-configuring-puppet-master.rst
+++ b/docs/pages/installation-instructions/0040-installing-configuring-puppet-master.rst
@@ -1,6 +1,6 @@
-Installing & Configuring Puppet Master
---------------------------------------
+Installing & Configuring Fuel
+-----------------------------
Now that you know what you're going to install and where you're going to
install it, it's time to begin putting the pieces together. To do that,
you'll need to create the Puppet master and Cobbler servers, which will
@@ -18,160 +18,54 @@ network presence on the same network the physical machines will
ultimately PXE boot from. In a simulation environment fuel-pm only
needs virtual network (hostonlyif) connectivity.
-The easiest way to create an instance of fuel-pm is to download the
-Mirantis ISO from http://fuel.mirantis.com/your-downloads/
+At this point, you should have either a physical or virtual machine that
+can be booted from the Mirantis ISO, downloaded from http://fuel.mirantis.com/your-downloads/ .
This ISO can be used to create fuel-pm on a physical or virtual
-machine based on CentOS6.3x86_64minimal.iso. If for some reason you
+machine based on CentOS 6.4. If for some reason you
can't use this ISO, follow the instructions in :ref:`Creating the Puppet master ` to create
your own fuel-pm, then skip ahead to :ref:`Configuring fuel-pm `.
+Installing Fuel from the ISO
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Network setup
-^^^^^^^^^^^^^
+Start the new machine to install the ISO. The only real installation decision you will need to make is to specify the interface through which the installer can access the Internet. Choose eth1, as it's connected to the Internet-connected interface.
-OpenStack requires a minimum of three distinct networks: internal (or
-management), public, and private. The simplest and best mapping is to
-assign each network to a different physical interface. However, not
-all machines have three NICs, and OpenStack can be configured and
-deployed with only two physical NICs, collapsing the internal and
-public traffic onto a single NIC.
+Configuring fuel-pm from the ISO installation
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Once fuel-pm finishes installing, you'll be presented with a basic menu. You can use this menu to set the basic information Fuel will need to configure your installation. You can customize these steps for your own situation, of course, but here are the steps to take for the example installation:
+#. Future versions of Fuel will enable you to change the hostname and domain name for your admin node and cluster, respectively. For now, your admin node must be called ``fuel-pm``, and your domain name must be ``localdomain``.
+#. To configure the management interface, choose 2.
-If you are deploying to a simulation environment, however, it makes
-sense to just allocate three NICs to each VM in your OpenStack
-infrastructure. For VirtualBox, this means creating three Host Only
-interfaces, vboxnet0, vboxnet1, and vboxnet2, for the internal,
-public, and private networks respectively.
+ * The example specifies eth0 as the internal, or management interface, so enter that.
+ * The management network in the example is using static IP addresses, so specify no for for using DHCP.
+ * Enter the IP address of 10.0.0.100 for the Puppet Master, and the netmask of 255.255.255.0. Future versions of Fuel will enable you to choose a different IP range for your management interface.
+ * Set the gateway and DNS servers if desired. In this example, we'll use the router at 192.168.0.1 as the gateway.
+#. To configure the external interface, which VMs will use to send traffic to and from the internet, choose 3. Set the interface to eth1. By default, this interface uses DHCP, which is what the example calls for.
+#. To choose the start and end addresses to be used during PXE boot, choose 4. In the case of this example, the start address is 10.0.0.201 and the end address is 10.0.0.254. Later, these nodes will receive IP addresses from Cobbler.
-Finally, we must assign network ranges to the internal, public, and private
-networks, and ip addresses to fuel-pm, fuel-controllers, and fuel-compute nodes. For a real deployment using physical infrastructure you must work with your IT department to determine which IPs to use, but
-for the purposes of this exercise we will assume the below network and
-ip assignments:
+#. Future versions of Fuel will enable you to choose a custom set of repositories.
+#. If you need to specify a proxy through which fuel-pm will access the Internet, press 6.
-#. 10.20.0.0/24: management or internal network, for communication between Puppet master and Puppet clients, as well as PXE/TFTP/DHCP for Cobbler
-#. 192.168.0.0/24: public network, for the High Availability (HA) Virtual IP (VIP), as well as floating IPs assigned to OpenStack guest VMs
-#. 10.20.1.0/24: private network, fixed IPs automatically assigned to guest VMs by OpenStack upon their creation
+#. Once you've finished editing, choose 9 to save your changes and exit the menu.
+
+Please note: Even though defaults are shown, you must set actual values; if you simply press "enter" you will wind up with empty values.
+
+To re-enter the menu at any time, type::
+
+ bootstrap_admin_node.sh
-Next we need to allocate a static IP address from the internal network
-to eth0 for fuel-pm, and eth1 for the controller, compute, and (if necessary) quantum
-nodes. For High Availability (HA) we must choose and assign an IP
-address from the public network to HAProxy running on the controllers.
-You can configure network addresses/network mask according to your
-needs, but our instructions will assume the following network settings
-on the interfaces:
-
-
-
-#. eth0: internal management network, where each machine will have a static IP address
-
- * 10.20.0.100 for Puppet Master
- * 10.20.0.101-10.0.0.103 for the controller nodes
- * 10.20.0.110-10.0.0.126 for the compute nodes
- * 10.20.0.10 internal Virtual IP for component access
- * 255.255.255.0 network mask
-
-#. eth1: public network
-
- * 192.168.0.10 public Virtual IP for access to the Horizon GUI (OpenStack management interface)
-
-#. eth2: for communication between OpenStack VMs without IP address with promiscuous mode enabled.
-
-
-
-If you are on VirtualBox, please create or make sure the following
-hostonly adapters exist and are configured correctly:
-If you are on VirtualBox, create the following adapters:
-
-* VirtualBox -> Preferences...
- * Network -> Add HostOnly Adapter (vboxnet0)
- * IPv4 Address: 10.20.0.1
- * IPv4 Network Mask: 255.255.255.0
- * DHCP server: disabled
- * Network -> Add HostOnly Adapter (vboxnet1)
- * IPv4 Address: 10.20.1.1
- * IPv4 Network Mask: 255.255.255.0
- * DHCP server: disabled
- * Network -> Add HostOnly Adapter (vboxnet2)
- * IPv4 Address: 0.0.0.0
- * IPv4 Network Mask: 255.255.255.0
- * DHCP server: disabled
-
-After creating this interface, reboot the host machine to make sure that
-DHCP isn't running in the background.
-
-Installing on Windows isn't recommended, but if you're attempting it,
-you will also need to set up the IP address & network mask under
-Control Panel > Network and Internet > Network and Sharing Center for the
-Virtual HostOnly Network adapter.
-
-
-Creating fuel-pm on a Physical Machine
---------------------------------------
-
-If you plan to provision the Puppet master on hardware, you need to
-create a bootable DVD or USB disk from the downloaded ISO, then make
-sure that you can boot your server from the DVD or USB drive.
-
-
-Creating fuel-pm on a Virtual Machine
--------------------------------------
-
-The process of creating a virtual machine to host Fuel in VirtualBox depends on
-whether your deployment is purely virtual or consists of a virtual
-fuel-pm controlling physical hardware. If your deployment is purely
-virtual then Adapter 1 may be a Hostonly adapter attached to
-vboxnet0, but if your deployment infrastructure consists of a virtual
-fuel-pm controlling physical machines Adapter 1 must be a Bridged
-Adapter, connected to whatever network interface of the host machine
-is connected to your physical machines.
-
-Start up VirtualBox and create a new machine as follows:
-
-* Machine -> New...
-
- * Name: fuel-pm
- * Type: Linux
- * Version: Red Hat (32 or 64 Bit)
- * Memory: 2048 MB
- * Drive space: 16 GB HDD
-
-* Machine -> Settings... -> Network
-
- * Adapter 1
-
- * Physical network
- * Enable Network Adapter
- * Attached to: Bridged Adapter
- * Name: The host machine's network with access to the network on which the physical machines reside
- * VirtualBox installation
- * Enable Network Adapter
- * Attached to: Hostonly Adapter
- * Name: vboxnet0
-
- * Adapter 2
-
- * Enable Network Adapter
- * Attached to: Bridged Adapter
- * Name: eth0 (or whichever physical network is attached to the Internet)
-
-* Machine -> Storage
-
- * Attach the downloaded ISO as a drive
-
-
-If you can't (or would rather not) install from the ISO, you can find instructions for installing from the Fuel Library in :ref:`Appendix A `.
-
diff --git a/docs/pages/installation-instructions/0042-installing-the-iso.rst b/docs/pages/installation-instructions/0042-installing-the-iso.rst
deleted file mode 100644
index 08ff2dae44..0000000000
--- a/docs/pages/installation-instructions/0042-installing-the-iso.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Installing Fuel from the ISO
-----------------------------
-
-Start the new machine to install the ISO. The only real installation decision you will need to make is to specify the interface through which the installer can access the Internet. Choose eth1, as it's connected to the Internet-connected interface.
-
diff --git a/docs/pages/installation-instructions/0045-configuring-the-iso.rst b/docs/pages/installation-instructions/0045-configuring-the-iso.rst
deleted file mode 100644
index 7219d65078..0000000000
--- a/docs/pages/installation-instructions/0045-configuring-the-iso.rst
+++ /dev/null
@@ -1,36 +0,0 @@
-Configuring fuel-pm from the ISO installation
----------------------------------------------
-
-Once fuel-pm finishes installing, you'll be presented with a basic menu. You can use this menu to set the basic information Fuel will need to configure your installation. You can customize these steps for your own situation, of course, but here are the steps to take for the example installation:
-
-#. To set the fully-qualified domain name for the master node and cloud domain, choose 1.
-
- * Type ``fuel-pm`` for the hostname.
- * Set your own domain name.
-
-#. To configure the management interface, choose 2.
-
- * The example specifies eth0 as the internal, or management interface, so enter that.
- * The management network in the example is using static IP addresses, so specify no for for using DHCP.
- * Enter the IP address of 10.20.0.100 for the Puppet Master, and the netmask of 255.255.255.0.
- * Set the gateway and DNS servers if desired.
-
-#. To configure the external interface, which will be used to send traffic to and from the internet, choose 3. Set the interface to eth1. By default, this interface uses DHCP, which is what the example calls for.
-
-#. To choose the start and end addresses to be used during PXE boot, choose 4. In the case of this example, the start address is 10.20.0.110 and the end address is 10.20.0.126. Later, these notes will receive IP addresses from Cobbler.
-
-Future versions of Fuel will enable you to choose a custom set of repositories.
-
-Please note: You must set actual values; if you simply press "enter" you will wind up with empty values.
-
-5. Once you've finished editing, choose 6 to save your changes and exit the menu.
-
-To re-enter the menu at any time, type::
-
- bootstrap_admin_node.sh
-
-
-
-
-
-
diff --git a/docs/pages/installation-instructions/0050-configuring-cobbler.rst b/docs/pages/installation-instructions/0050-configuring-cobbler.rst
index 449e777838..a8b5971f4a 100644
--- a/docs/pages/installation-instructions/0050-configuring-cobbler.rst
+++ b/docs/pages/installation-instructions/0050-configuring-cobbler.rst
@@ -1,7 +1,15 @@
+.. _Install-OS-Using-Fuel:
+
+Installing the OS using Fuel
+----------------------------
+
+The first step in creating the actual OpenStack nodes is to let Fuel's Cobbler kickstart and preseed files assist in the installation of operating systems on the target servers.
+
+
.. _Configuring-Cobbler:
-Configuring Cobbler
--------------------
+Configuring Cobbler with config.yaml
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Fuel uses a single file, ``config.yaml``, to both configure Cobbler and assist in the configuration of the ``site.pp`` file. This file appears in the ``/root`` directory when the master node (fuel-pm) is provisioned and configured.
@@ -10,16 +18,16 @@ You'll want to configure this example for your own situation, but the example lo
common:
orchestrator_common:
attributes:
- deployment_mode: ha_compute
+ deployment_mode: ha_compact
deployment_engine: simplepuppet
task_uuid: deployment_task
-Possible values for ``deployment_mode`` are ``singlenode_compute``, ``multinode_compute``, ``ha_compute``, ``ha_compact``, ``ha_full``, and ``ha_minimal``. Change the ``deployment_mode`` to ``ha_compute`` to tell Fuel to use HA architecture. The ``simplepuppet`` ``deployment_engine`` means that the orchestrator will be calling Puppet on each of the nodes.
+Possible values for ``deployment_mode`` are ``singlenode_compute``, ``multinode_compute``, ``ha_compute``, ``ha_compact``, ``ha_full``, and ``ha_minimal``. Change the ``deployment_mode`` to ``ha_compact`` to tell Fuel to use HA architecture. Specifying the ``simplepuppet`` deployment engine means that the orchestrator will be calling Puppet on each of the nodes.
Next you'll need to set OpenStack's networking information::
openstack_common:
- internal_virtual_ip: 10.20.0.10
+ internal_virtual_ip: 10.0.0.10
public_virtual_ip: 192.168.0.10
create_networks: true
fixed_range: 172.16.0.0/16
@@ -31,7 +39,7 @@ Change the virtual IPs to match the target networks, and set the fixed and float
nv_physical_volumes:
- /dev/sdb
-By setting the ``nv_physical_volumes`` value, you are not only telling OpenStack to use this value for Cinder (you'll see more about that in the ``site.pp`` file), you're also telling Fuel where Cinder should be storing data.
+By setting the ``nv_physical_volumes`` value, you are not only telling OpenStack to use this value for Cinder (you'll see more about that in the ``site.pp`` file), but also where Cinder should store its data.
Later, we'll set up a new partition for Cinder, so tell Cobbler to create it here. ::
@@ -41,9 +49,15 @@ Later, we'll set up a new partition for Cinder, so tell Cobbler to create it her
pool_start: 192.168.0.110
pool_end: 192.168.0.126
-Set the ``public_net_router`` to point to the real router at the public network. The ``ext_bridge`` is the ip of the Quantum bridge. It should assigned to any available free IP on the public network that's outside the floating range. You also have the option to simply set it to ``0.0.0.0``. The ``pool_start`` and ``pool_end`` values represent the public addresses of your nodes, and should be within the ``floating_range``. ::
+Set the ``public_net_router`` to point to the real router at the public network. The ``ext_bridge`` is the IP of the Quantum bridge. It should assigned to any available free IP on the public network that's outside the floating range. You also have the option to simply set it to ``0.0.0.0``. The ``pool_start`` and ``pool_end`` values represent the public addresses of your nodes, and should be within the ``floating_range``. ::
segment_range: 900:999
+ network_manager: nova.network.manager.FlatDHCPManager
+ auto_assign_floating_ip: true
+ quantum_netnode_on_cnt: true
+
+Fuel provides two choices for your network manager: FlatDHCPManager, and VlanManager. By default, the system uses FlatDHCPManager. Here you can see that we're also telling OpenStack to automatically assing a floating IP to an instance when it's created, and to put the Quantum services on the controllers rather than a sepearate node. ::
+
use_syslog: false
syslog_server: 127.0.0.1
mirror_type: default
@@ -66,79 +80,82 @@ Depending on how you've set up your network, you can either set the ``default_ga
nagios_master: fuel-controller-01.localdomain
loopback: loopback
cinder: true
- cinder_nodes: [ 'controller' ]
+ cinder_nodes:
+ - controller
swift: true
The loopback setting determines how Swift stores data. If you set the value to ``loopback``, Swift will use 1gb files as storage devices. If you tuned Cobbler to create a partition for Swift and mounted it to ``/srv/nodes/``, then you should set ``loopback`` to ``false``.
In this example, you're using Cinder and including it on the compute nodes, so note that appropriately. Also, you're using Swift, so turn that on here. ::
- repo_proxy: http://10.20.0.100:3128
+ repo_proxy: http://10.0.0.100:3128
-One improvement in Fuel 2.1 was the ability for the master node to cache downloads in order to speed up installs; by default the ``repo_proxy`` is set to point to fuel-pm in order to let that happen. ::
+One improvement in Fuel 2.1 was the ability for the master node to cache downloads in order to speed up installs; by default the ``repo_proxy`` is set to point to fuel-pm in order to let that happen. One consequence of that is that your deployment will actually go faster if you let one install complete, then do all the others, rather than running all of them concurrently. ::
deployment_id: '53'
Fuel enables you to manage multiple clusters; setting the ``deployment_id`` will let Fuel know which deployment you're working with. ::
dns_nameservers:
- - 10.20.0.100
+ - 10.0.0.100
- 8.8.8.8
The slave nodes should first look to the master node for DNS, so mark that as your first nameserver.
-The next step is to define the nodes themselves. To do that, you'll list each node once for each role that needs to be installed. ::
+The next step is to define the nodes themselves. To do that, you'll list each node once for each role that needs to be installed. Note that by default the first node is called ``fuel-cobbler``; change it to ``fuel-pm``. ::
nodes:
- name: fuel-pm
role: cobbler
- internal_address: 10.20.0.100
+ internal_address: 10.0.0.100
public_address: 192.168.0.100
- name: fuel-controller-01
role: controller
- internal_address: 10.20.0.101
+ internal_address: 10.0.0.101
public_address: 192.168.0.101
swift_zone: 1
- name: fuel-controller-02
role: controller
- internal_address: 10.20.0.102
+ internal_address: 10.0.0.102
public_address: 192.168.0.102
swift_zone: 2
- name: fuel-controller-03
role: controller
- internal_address: 10.20.0.103
+ internal_address: 10.0.0.103
public_address: 192.168.0.103
swift_zone: 3
- name: fuel-controller-01
role: quantum
- internal_address: 10.20.0.101
+ internal_address: 10.0.0.101
public_address: 192.168.0.101
- name: fuel-compute-01
role: compute
- internal_address: 10.20.0.110
+ internal_address: 10.0.0.110
public_address: 192.168.0.110
-Notice that each node is listed multiple times; this is because each node fulfills multiple roles.
+Notice that each node can be listed multiple times; this is because each node fulfills multiple roles. Notice also that the IP address for fuel-compute-01 is *.110, not *.105.
The ``cobbler_common`` section applies to all machines::
cobbler_common:
# for Centos
- profile: "centos63_x86_64"
+ profile: "centos64_x86_64"
# for Ubuntu
# profile: "ubuntu_1204_x86_64"
-Fuel can install CentOS or Ubuntu on your servers, or you can add a profile of your own. By default, ``config.yaml`` uses Ubuntu, but for our example we'll use CentOS. ::
+Fuel can install CentOS or Ubuntu on your servers, or you can add a profile of your own. By default, ``config.yaml`` uses CentOS. ::
netboot-enabled: "1"
# for Ubuntu
# ksmeta: "puppet_version=2.7.19-1puppetlabs2 \
# for Centos
- name-servers: "10.20.0.100"
+ name-servers: "10.0.0.100"
name-servers-search: "localdomain"
- gateway: 10.20.0.100
+ gateway: 192.168.0.1
-Set the default nameserver to be fuel-pm, and change the domain name to your own domain name. Set the ``gateway`` to the public network's default gateway. Alternatively, if you don't plan to use your public networks actual gateway, you can set this value to be the IP address of the master node. **Please note:** You must specify a working gateway (or proxy) in order to install OpenStack, because the system will need to communicate with public repositories. ::
+Set the default nameserver to be fuel-pm, and change the domain name to your own domain name. Set the ``gateway`` to the public network's default gateway. Alternatively, if you don't plan to use your public networks actual gateway, you can set this value to be the IP address of the master node.
+
+**Please note:** You must specify a working gateway (or proxy) in order to install OpenStack, because the system will need to communicate with public repositories. ::
ksmeta: "puppet_version=2.7.19-1puppetlabs2 \
puppet_auto_setup=1 \
@@ -150,7 +167,7 @@ Change the fully-qualified domain name for the Puppet Master to reflect your own
ntp_enable=1 \
mco_auto_setup=1 \
mco_pskey=un0aez2ei9eiGaequaey4loocohjuch4Ievu3shaeweeg5Uthi \
- mco_stomphost=10.20.0.100 \
+ mco_stomphost=10.0.0.100 \
Make sure the ``mco_stomphost`` is set for the master node so that the orchestrator can find the nodes. ::
@@ -170,7 +187,7 @@ Next you'll define the actual servers. ::
eth0:
mac: "08:00:27:BD:3A:7D"
static: "1"
- ip-address: "10.20.0.101"
+ ip-address: "10.0.0.101"
netmask: "255.255.255.0"
dns-name: "fuel-controller-01.localdomain"
management: "1"
@@ -198,10 +215,10 @@ Also, make sure the ``ip-address`` is correct, and that the ``dns-name`` has you
In this example, IP addresses should be assigned as follows::
- fuel-controller-01: 10.20.0.101
- fuel-controller-02: 10.20.0.102
- fuel-controller-03: 10.20.0.103
- fuel-compute-01: 10.20.0.110
+ fuel-controller-01: 10.0.0.101
+ fuel-controller-02: 10.0.0.102
+ fuel-controller-03: 10.0.0.103
+ fuel-compute-01: 10.0.0.110
Repeat this step for each of the other controllers, and for the compute node. Note that the compute node has its own role::
@@ -212,7 +229,7 @@ Repeat this step for each of the other controllers, and for the compute node. N
eth0:
mac: "08:00:27:AE:A9:6E"
static: "1"
- ip-address: "10.20.0.110"
+ ip-address: "10.0.0.110"
netmask: "255.255.255.0"
dns-name: "fuel-compute-01.localdomain"
management: "1"
@@ -233,8 +250,8 @@ Repeat this step for each of the other controllers, and for the compute node. N
peerdns: "no"
-Load the configuration
-^^^^^^^^^^^^^^^^^^^^^^
+Loading the configuration
+^^^^^^^^^^^^^^^^^^^^^^^^^
Once you've completed the changes to ``config.yaml``, you need to load the information into Cobbler. To do that, use the ``cobbler_system`` script::
@@ -242,4 +259,39 @@ Once you've completed the changes to ``config.yaml``, you need to load the infor
Now you're ready to start spinning up the controllers and compute nodes.
+Installing the operating system
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Now that Cobbler has the correct configuration, the only thing you
+need to do is to PXE-boot your nodes. This means that they will boot over the network, with
+DHCP/TFTP provided by Cobbler, and will be provisioned accordingly,
+with the specified operating system and configuration.
+
+If you installed Fuel from the ISO, start fuel-controller-01 first and let the installation finish before starting the other nodes; Fuel will cache the downloads so subsequent installs will go faster.
+
+The process for each node looks like this:
+
+
+#. Start the VM.
+#. Press F12 immediately and select l (LAN) as a bootable media.
+#. Wait for the installation to complete.
+#. Log into the new machine using root/r00tme.
+#. **Change the root password.**
+#. Check that networking is set up correctly and the machine can reach the Internet::
+
+ ping fuel-pm.localdomain
+ ping www.mirantis.com
+
+If you're unable to ping outside addresses, add the fuel-pm server as a default gateway::
+
+ route add default gw 10.0.0.100
+
+**It is important to note** that if you use VLANs in your network
+configuration, you always have to keep in mind the fact that PXE
+booting does not work on tagged interfaces. Therefore, all your nodes,
+including the one where the Cobbler service resides, must share one
+untagged VLAN (also called native VLAN). If necessary, you can use the
+``dhcp_interface`` parameter of the ``cobbler::server`` class to bind the DHCP
+service to the appropriate interface.
+
diff --git a/docs/pages/installation-instructions/0055-installing-os-using-cobbler.rst b/docs/pages/installation-instructions/0055-installing-os-using-cobbler.rst
deleted file mode 100644
index ecd6fb8c7d..0000000000
--- a/docs/pages/installation-instructions/0055-installing-os-using-cobbler.rst
+++ /dev/null
@@ -1,40 +0,0 @@
-.. _Install-OS-Using-Fuel:
-
-Installing the OS using Fuel
-----------------------------
-
-The first step in creating the actual OpenStack nodes is to let Fuel's Cobbler kickstart and preseed files assist in the installation of operating systems on the target servers.
-
-Now that Cobbler has the correct configuration, the only thing you
-need to do is to PXE-boot your nodes. This means that they will boot over the network, with
-DHCP/TFTP provided by Cobbler, and will be provisioned accordingly,
-with the specified operating system and configuration.
-
-If you installed Fuel from the ISO, start fuel-controller-01 first and let the installation finish before starting the other nodes; Fuel will cache the downloads so subsequent installs will go faster.
-
-The process for each node looks like this:
-
-
-#. Start the VM.
-#. Press F12 immediately and select l (LAN) as a bootable media.
-#. Wait for the installation to complete.
-#. Log into the new machine using root/r00tme.
-#. **Change the root password.**
-#. Check that networking is set up correctly and the machine can reach the Internet::
-
- ping fuel-pm.localdomain
- ping www.mirantis.com
-
-If you're unable to ping outside addresses, add the fuel-pm server as a default gateway::
-
- route add default gw 10.20.0.100
-
-**It is important to note** that if you use VLANs in your network
-configuration, you always have to keep in mind the fact that PXE
-booting does not work on tagged interfaces. Therefore, all your nodes,
-including the one where the Cobbler service resides, must share one
-untagged VLAN (also called native VLAN). If necessary, you can use the
-``dhcp_interface`` parameter of the ``cobbler::server`` class to bind the DHCP
-service to the appropriate interface.
-
-
diff --git a/docs/pages/installation-instructions/0057-prepare-for-deployment.rst b/docs/pages/installation-instructions/0057-prepare-for-deployment.rst
index 4a1f7ee108..fda579c09b 100644
--- a/docs/pages/installation-instructions/0057-prepare-for-deployment.rst
+++ b/docs/pages/installation-instructions/0057-prepare-for-deployment.rst
@@ -1,5 +1,5 @@
-Preparing for deployment
-------------------------
+Generating the Puppet manifest
+------------------------------
Before you can deploy OpenStack, you will need to configure the site.pp file. While previous versions of Fuel required you to manually configure ``site.pp``, version 2.1 includes the ``openstack_system`` script, which uses both the ``config.yaml`` and template files for the various reference architectures to create the appropriate Puppet manifest. To create ``site.pp``, execute this command::
diff --git a/docs/pages/installation-instructions/0060-deploying-openstack.rst b/docs/pages/installation-instructions/0060-understand-the-manifest.rst
similarity index 67%
rename from docs/pages/installation-instructions/0060-deploying-openstack.rst
rename to docs/pages/installation-instructions/0060-understand-the-manifest.rst
index bcf8753f34..bfc905bce0 100644
--- a/docs/pages/installation-instructions/0060-deploying-openstack.rst
+++ b/docs/pages/installation-instructions/0060-understand-the-manifest.rst
@@ -37,7 +37,7 @@ In this case, we don't need to make any changes to the interface
settings, because they match what we've already set up. ::
# Public and Internal VIPs. These virtual addresses are required by HA topology and will be managed by keepalived.
- $internal_virtual_ip = '10.20.0.10'
+ $internal_virtual_ip = '10.0.0.10'
# Change this IP to IP routable from your 'public' network,
# e. g. Internet or your office LAN, in which your public
@@ -54,26 +54,26 @@ The next section sets up the servers themselves. If you are setting up Fuel man
{
'name' => 'fuel-pm',
'role' => 'cobbler',
- 'internal_address' => '10.20.0.100',
+ 'internal_address' => '10.0.0.100',
'public_address' => '192.168.0.100',
'mountpoints'=> "1 1\n2 1",
- 'storage_local_net_ip' => '10.20.0.100',
+ 'storage_local_net_ip' => '10.0.0.100',
},
{
'name' => 'fuel-controller-01',
'role' => 'primary-controller',
- 'internal_address' => '10.20.0.101',
+ 'internal_address' => '10.0.0.101',
'public_address' => '192.168.0.101',
'mountpoints'=> "1 1\n2 1",
- 'storage_local_net_ip' => '10.20.0.101',
+ 'storage_local_net_ip' => '10.0.0.101',
},
{
'name' => 'fuel-controller-02',
'role' => 'controller',
- 'internal_address' => '10.20.0.102',
+ 'internal_address' => '10.0.0.102',
'public_address' => '192.168.0.102',
'mountpoints'=> "1 1\n2 1",
- 'storage_local_net_ip' => '10.20.0.102',
+ 'storage_local_net_ip' => '10.0.0.102',
},
{
'name' => 'fuel-controller-03',
@@ -81,7 +81,7 @@ The next section sets up the servers themselves. If you are setting up Fuel man
'internal_address' => '10.0.0.105',
'public_address' => '192.168.0.105',
'mountpoints'=> "1 1\n2 1",
- 'storage_local_net_ip' => '10.20.0.105',
+ 'storage_local_net_ip' => '10.0.0.105',
},
{
'name' => 'fuel-compute-01',
@@ -89,7 +89,7 @@ The next section sets up the servers themselves. If you are setting up Fuel man
'internal_address' => '10.0.0.106',
'public_address' => '192.168.0.106',
'mountpoints'=> "1 1\n2 1",
- 'storage_local_net_ip' => '10.20.0.106',
+ 'storage_local_net_ip' => '10.0.0.106',
}
]
@@ -97,36 +97,32 @@ Because this section comes from a template, it will likely include a number of s
Next the ``site.pp`` file lists all of the nodes and roles you defined in the ``config.yaml`` file::
- $nodes = [{'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
- 'controller','internal_address' => '10.20.0.101'},
- {'public_address' => '10.20.1.102','name' => 'fuel-controller-02','role' =>
- 'controller','internal_address' => '10.20.0.102'},
- {'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
- 'compute','internal_address' => '10.20.0.101'},
- {'public_address' => '10.20.1.102','name' => 'fuel-controller-02','role' =>
- 'compute','internal_address' => '10.20.0.102'},
- {'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
- 'storage','internal_address' => '10.20.0.101'},
- {'public_address' => '10.20.1.102','name' => 'fuel-controller-02','role' =>
- 'storage','internal_address' => '10.20.0.102'},
- {'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
- 'swift-proxy','internal_address' => '10.20.0.101'},
- {'public_address' => '10.20.1.102','name' => 'fuel-controller-02','role' =>
- 'swift-proxy','internal_address' => '10.20.0.102'},
- {'public_address' => '10.20.1.101','name' => 'fuel-controller-01','role' =>
- 'quantum','internal_address' => '10.20.0.101'}]
+ $nodes = [{'public_address' => '192.168.0.101','name' => 'fuel-controller-01','role' =>
+ 'primary-controller','internal_address' => '10.0.0.101',
+ 'storage_local_net_ip' => '10.0.0.101', 'mountpoints' => '1 2\n2 1',
+ 'swift-zone' => 1 },
+ {'public_address' => '192.168.0.102','name' => 'fuel-controller-02','role' =>
+ 'controller','internal_address' => '10.0.0.102',
+ 'storage_local_net_ip' => '10.0.0.102', 'mountpoints' => '1 2\n2 1',
+ 'swift-zone' => 2},
+ {'public_address' => '192.168.0.103','name' => 'fuel-controller-03','role' =>
+ 'storage','internal_address' => '10.0.0.103',
+ 'storage_local_net_ip' => '10.0.0.103', 'mountpoints' => '1 2\n2 1',
+ 'swift-zone' => 3},
+ {'public_address' => '192.168.0.110','name' => 'fuel-compute-01','role' =>
+ 'compute','internal_address' => '10.0.0.110'}]
-Possible roles include ‘compute’, ‘controller’, ‘storage’, ‘swift-proxy’, ‘quantum’, ‘master’, and ‘cobbler’. Compute nodes cannot be described because it is required for them to disable network configuration. Alternatively, you can force DHCP configuration to ensure proper configuration of IP addresses, default gateways, and DNS servers. IMPORTANT: DNS servers must contain information about all nodes of the cluster. At the time of deployment of the cluster in a standard scenario, the cobbler node contains this information.
+Possible roles include ‘compute’, ‘controller’, ‘primary-controller’, ‘storage’, ‘swift-proxy’, ‘quantum’, ‘master’, and ‘cobbler’. Check the IP addresses for each node and make sure that they mesh with what's in this array.
The file also specifies the default gateway to be the fuel-pm machine::
- $default_gateway = '10.20.0.10'
+ $default_gateway = '192.168.0.1'
Next ``site.pp`` defines DNS servers and provides netmasks::
# Specify nameservers here.
# Need points to cobbler node IP, or to special prepared nameservers if you known what you do.
- $dns_nameservers = ['10.20.0.10','8.8.8.8']
+ $dns_nameservers = ['10.0.0.100','8.8.8.8']
# Specify netmasks for internal and external networks.
$internal_netmask = '255.255.255.0'
@@ -138,7 +134,7 @@ Next ``site.pp`` defines DNS servers and provides netmasks::
$ha_provider = 'pacemaker'
$use_unicast_corosync = false
-Next specify the main controller. ::
+Next specify the main controller as the Nagios master. ::
# Set nagios master fqdn
$nagios_master = 'fuel-controller-01.localdomain'
@@ -231,17 +227,18 @@ These values don't actually relate to Quantum; they are used by nova-network. I
# Which IP address will be used for creating GRE tunnels.
$quantum_gre_bind_addr = $internal_address
- #Which IP have Quantum network node?
- $quantum_net_node_hostname = 'fuel-controller-03'
- $quantum_net_node_address = $controller_internal_addresses[$quantum_net_node_hostname]
-
If you are installing Quantum in non-HA mode, you will need to specify which single controller controls Quantum. ::
# If $external_ipinfo option is not defined, the addresses will be allocated automatically from $floating_range:
# the first address will be defined as an external default router,
# the second address will be attached to an uplink bridge interface,
# the remaining addresses will be utilized for the floating IP address pool.
- $external_ipinfo = {'pool_start' => '192.168.56.30','public_net_router' => '192.168.0.1', 'pool_end' => '192.168.56.60','ext_bridge' => '192.168.0.1'}
+ $external_ipinfo = {
+ 'pool_start' => '192.168.0.115',
+ 'public_net_router' => '192.168.0.1',
+ 'pool_end' => '192.168.0.126',
+ 'ext_bridge' => '0.0.0.0'
+ }
# Quantum segmentation range.
# For VLAN networks: valid VLAN VIDs can be 1 through 4094.
@@ -274,7 +271,7 @@ The remaining configuration is used to define classes that will be added to each
stage {'netconfig':
before => Stage['main'],
}
- class {'l23network': stage=> 'netconfig'}
+ class {'l23network': use_ovs => $quantum, stage=> 'netconfig'}
class node_netconfig (
$mgmt_ipaddr,
$mgmt_netmask = '255.255.255.0',
@@ -359,9 +356,7 @@ We want Cinder to be on the controller nodes, so set this value to ``['controlle
$manage_volumes = true
# Setup network interface, which Cinder uses to export iSCSI targets.
- # This interface defines which IP to use to listen on iscsi port for
- # incoming connections of initiators
- $cinder_iscsi_bind_iface = $internal_int
+ $cinder_iscsi_bind_addr = $internal_address
@@ -380,6 +375,23 @@ and specify that rather than ``$internal_int``. ::
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Leave this parameter empty if you want to create [cinder|nova]-volumes VG by yourself
$nv_physical_volume = ['/dev/sdb']
+
+ #Evaluate cinder node selection
+ if ($cinder) {
+ if (member($cinder_nodes,'all')) {
+ $is_cinder_node = true
+ } elsif (member($cinder_nodes,$::hostname)) {
+ $is_cinder_node = true
+ } elsif (member($cinder_nodes,$internal_address)) {
+ $is_cinder_node = true
+ } elsif ($node[0]['role'] =~ /controller/)) {
+ $is_cinder_node = member($cinder_nodes, 'controller')
+ } else {
+ $is_cinder_node = member($cinder_nodes, $node[0]['role'])
+ }
+ } else {
+ $is_cinder_node = false
+ }
### CINDER/VOLUME END ###
@@ -405,8 +417,7 @@ Enabling Glance and Swift
There aren't many changes that you will need to make to the default
configuration in order to enable Swift to work properly in Swift
-Compact mode, but you will need to adjust for the fact that we are
-running Swift on physical partitions ::
+Compact mode, but you will need to adjust if you want to run Swift on physical partitions ::
...
@@ -420,7 +431,7 @@ running Swift on physical partitions ::
# set 'loopback' or false
# This parameter controls where swift partitions are located:
# on physical partitions or inside loopback devices.
- $swift_loopback = false
+ $swift_loopback = loopback
The default value is ``loopback``, which tells Swift to use a loopback storage device, which is basically a file that acts like a drive, rather than an actual physical drive. You can also set this value to ``false``, which tells OpenStack to use a physical file instead. ::
@@ -509,7 +520,7 @@ To tell Fuel to download packages from external repos provided by Mirantis and y
# though it is NOT recommended.
$mirror_type = 'default'
$enable_test_repo = false
- $repo_proxy = 'http://10.20.0.100:3128'
+ $repo_proxy = 'http://10.0.0.100:3128'
Once again, the ``$mirror_type`` **must** be set to ``default``. If you set it correctly in ``config.yaml`` and ran ``openstack_system`` this will already be taken care of. Otherwise, **make sure** to set this value yourself.
@@ -548,6 +559,7 @@ There are two hashes describing these limits: ``$nova_rate_limits`` and ``$cinde
'PUT' => 1000, 'GET' => 1000,
'DELETE' => 1000
}
+ ...
Enabling Horizon HTTPS/SSL mode
@@ -555,6 +567,7 @@ Enabling Horizon HTTPS/SSL mode
Using the ``$horizon_use_ssl`` variable, you have the option to decide whether the OpenStack dashboard (Horizon) uses HTTP or HTTPS::
+ ...
# 'custom': require fileserver static mount point [ssl_certs] and hostname based certificate existence
$horizon_use_ssl = false
@@ -592,9 +605,8 @@ Defining the node configurations
Now that we've set all of the global values, its time to make sure that
the actual node definitions are correct. For example, by default all
-nodes will enable Cinder on ``/dev/sdb``, but we don't want that for the
-controllers, so set ``nv_physical_volume`` to ``null``, and ``manage_volumes`` to ``false``. ::
-
+nodes will enable Cinder on ``/dev/sdb``. If you didn't want that for all
+controllers, you could set ``nv_physical_volume`` to ``null`` for a specific node or nodes. ::
...
@@ -608,13 +620,6 @@ controllers, so set ``nv_physical_volume`` to ``null``, and ``manage_volumes`` t
public_interface => $public_int,
internal_interface => $internal_int,
...
- manage_volumes => false,
- galera_nodes => $controller_hostnames,
- nv_physical_volume => null,
- use_syslog => $use_syslog,
- nova_rate_limits => $nova_rate_limits,
- cinder_rate_limits => $cinder_rate_limits,
- horizon_use_ssl => $horizon_use_ssl,
use_unicast_corosync => $use_unicast_corosync,
ha_provider => $ha_provider
}
@@ -629,16 +634,8 @@ controllers, so set ``nv_physical_volume`` to ``null``, and ``manage_volumes`` t
-Fortunately, Fuel includes a class for the controllers, so you don't
-have to make these changes for each individual controller. As you can
-see, the controllers generally use the global values, but in this case
-you're telling the controllers not to manage_volumes, and not to use
-``/dev/sdb`` for Cinder.
-
-
-
-If you look down a little further, this class then goes on to help
-specify the individual controllers and compute nodes::
+Fortunately, as you can see here, Fuel includes a class for the controllers, so you don't
+have to make global changes for each individual controller. If you look down a little further, this class then goes on to help specify the individual controllers and compute nodes::
...
@@ -680,15 +677,16 @@ specify the individual controllers and compute nodes::
class { 'openstack::swift::proxy':
swift_user_password => $swift_user_password,
swift_proxies => $swift_proxies,
- primary_proxy => $primary_proxy,
- controller_node_address => $internal_virtual_ip,
- swift_local_net_ip => $internal_address,
+ ...
+ rabbit_ha_virtual_ip => $internal_virtual_ip,
}
}
Notice also that each controller has the swift_zone specified, so each
of the three controllers can represent each of the three Swift zones.
+Similarly, site.pp defines a class for the compute nodes.
+
Installing Nagios Monitoring using Puppet
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -696,14 +694,14 @@ Fuel provides a way to deploy Nagios for monitoring your OpenStack cluster. It w
Nagios Agent
-~~~~~~~~~~~~
+++++++++++++
In order to install Nagios NRPE on a compute or controller node, a node should have the following settings: ::
class {'nagios':
proj_name => 'test',
services => ['nova-compute','nova-network','libvirt'],
- whitelist => ['127.0.0.1','10.0.97.5'],
+ whitelist => ['127.0.0.1', $nagios_master],
hostgroup => 'compute',
}
@@ -713,7 +711,7 @@ In order to install Nagios NRPE on a compute or controller node, a node should h
* ``hostgroup``: The group to be used in the nagios master (do not forget create the group in the nagios master).
Nagios Server
-~~~~~~~~~~~~~
++++++++++++++
In order to install Nagios Master on any convenient node, a node should have the following applied: ::
@@ -737,9 +735,9 @@ In order to install Nagios Master on any convenient node, a node should have the
Health Checks
-~~~~~~~~~~~~~
++++++++++++++
-The complete definition of the available services to monitor and their health checks can be viewed at ``deployment/puppet/nagios/manifests/params.pp``.
+You can see the complete definition of the available services to monitor and their health checks at ``deployment/puppet/nagios/manifests/params.pp``.
Here is the list: ::
@@ -779,76 +777,10 @@ Here is the list: ::
'host-alive' => 'check-host-alive',
}
-Finally, back in ``site.pp``, you define the compute nodes::
+Node definitions
+^^^^^^^^^^^^^^^^
- # Definition of OpenStack compute nodes.
- node /fuel-compute-[\d+]/ {
- ## Uncomment lines bellow if You want
- ## configure network of this nodes
- ## by puppet.
- class {'::node_netconfig':
- mgmt_ipaddr => $::internal_address,
- mgmt_netmask => $::internal_netmask,
- public_ipaddr => $::public_address,
- public_netmask => $::public_netmask,
- stage => 'netconfig',
- }
- include stdlib
- class { 'operatingsystem::checksupported':
- stage => 'setup'
- }
-
- class {'nagios':
- proj_name => $proj_name,
- services => [
- 'host-alive', 'nova-compute','nova-network','libvirt'
- ],
- whitelist => ['127.0.0.1', $nagios_master],
- hostgroup => 'compute',
- }
-
- class { 'openstack::compute':
- public_interface => $public_int,
- private_interface => $private_interface,
- internal_address => $internal_address,
- libvirt_type => 'kvm',
- fixed_range => $fixed_range,
- network_manager => $network_manager,
- network_config => { 'vlan_start' => $vlan_start },
- multi_host => $multi_host,
- sql_connection => "mysql://nova:${nova_db_password}@${internal_virtual_ip}/nova",
- rabbit_nodes => $controller_hostnames,
- rabbit_password => $rabbit_password,
- rabbit_user => $rabbit_user,
- rabbit_ha_virtual_ip => $internal_virtual_ip,
- glance_api_servers => "${internal_virtual_ip}:9292",
- vncproxy_host => $public_virtual_ip,
- verbose => $verbose,
- vnc_enabled => true,
- nova_user_password => $nova_user_password,
- cache_server_ip => $controller_hostnames,
- service_endpoint => $internal_virtual_ip,
- quantum => $quantum,
- quantum_sql_connection => $quantum_sql_connection,
- quantum_user_password => $quantum_user_password,
- quantum_host => $quantum_net_node_address,
- tenant_network_type => $tenant_network_type,
- segment_range => $segment_range,
- cinder => $cinder,
- manage_volumes => $is_cinder_node ? { true => $manage_volumes, false => false},
- cinder_iscsi_bind_iface=> $cinder_iscsi_bind_iface,
- nv_physical_volume => $nv_physical_volume,
- db_host => $internal_virtual_ip,
- ssh_private_key => 'puppet:///ssh_keys/openstack',
- ssh_public_key => 'puppet:///ssh_keys/openstack.pub',
- use_syslog => $use_syslog,
- nova_rate_limits => $nova_rate_limits,
- cinder_rate_limits => $cinder_rate_limits
- }
- }
-
-
-In the ``openstack/examples/site_openstack_full.pp`` example, the following nodes are specified:
+These are the node definitions generated for a Compact HA deployment. Other deployment configurations generate other definitions. For example, the ``openstack/examples/site_openstack_full.pp`` template specifies the following nodes:
* fuel-controller-01
* fuel-controller-02
@@ -865,97 +797,3 @@ Using this architecture, the system includes three stand-alone swift-storage ser
With ``site.pp`` prepared, you're ready to perform the actual installation.
-Installing OpenStack using Puppet directly
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Now that you've set all of your configurations, all that's left to stand
-up your OpenStack cluster is to run Puppet on each of your nodes; the
-Puppet Master knows what to do for each of them.
-
-You have two options for performing this step. The first, and by far the easiest, is to use the orchestrator. If you're going to take that option, skip ahead to :ref:`Deploying OpenStack via Orchestration `. If you choose not to use orchestration, or if for some reason you want to reload only one or two nodes, you can run Puppet manually on a the target nodes.
-
-If you're starting from scratch, start by logging in to fuel-controller-01 and running the Puppet
-agent.
-
-One optional step would be to use the script command to log all
-of your output so you can check for errors if necessary::
-
-
-
- script agent-01.log
- puppet agent --test
-
-You will to see a great number of messages scroll by, and the
-installation will take a significant amount of time. When the process
-has completed, press CTRL-D to stop logging and grep for errors::
-
-
-
- grep err: agent-01.log
-
-
-
-If you find any errors relating to other nodes, ignore them for now.
-
-
-
-Now you can run the same installation procedure on fuel-controller-01
-and fuel-controller-02, as well as fuel-compute-01.
-
-
-
-Note that the controllers must be installed sequentially due to the
-nature of assembling a MySQL cluster based on Galera, which means that
-one must complete its installation before the next begins, but that
-compute nodes can be installed concurrently once the controllers are
-in place.
-
-
-
-In some cases, you may find errors related to resources that are not
-yet available when the installation takes place. To solve that
-problem, simply re-run the puppet agent on the affected node after running the other controllers, and
-again grep for error messages.
-
-
-
-When you see no errors on any of your nodes, your OpenStack cluster is
-ready to go.
-
-
-Examples of OpenStack installation sequences
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-When running Puppet manually, the exact sequence depends on what it is you're trying to achieve. In most cases, you'll need to run Puppet more than once; with every deployment pass Puppet collects and adds necessary absent information to the OpenStack configuration, stores it to PuppedDB and applies necessary changes.
-
- **Note:** *Sequentially run* means you don't start the next node deployment until previous one is finished.
-
- **Example 1:** **Full OpenStack deployment with standalone storage nodes**
-
- * Create necessary volumes on storage nodes as described in :ref:`create-the-XFS-partition`.
- * Sequentially run a deployment pass on every SwiftProxy node (``fuel-swiftproxy-01 ... fuel-swiftproxy-xx``), starting with the ``primary-swift-proxy node``. Node names are set by the ``$swift_proxies`` variable in ``site.pp``. There are 2 Swift Proxies by default.
- * Sequentially run a deployment pass on every storage node (``fuel-swift-01`` ... ``fuel-swift-xx``).
- * Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``). starting with the ``primary-controller`` node.
- * Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
- * Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers, these nodes may be deployed in parallel.
- * Run an additional deployment pass on Controller 1 only (``fuel-controller-01``) to finalize the Galera cluster configuration.
-
- **Example 2:** **Compact OpenStack deployment with storage and swift-proxy combined with nova-controller on the same nodes**
-
- * Create the necessary volumes on controller nodes as described in :ref:`create-the-XFS-partition`
- * Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``), starting with the ``primary-controller node``. Errors in Swift storage such as */Stage[main]/Swift::Storage::Container/Ring_container_device[]: Could not evaluate: Device not found check device on * are expected during the deployment passes until the very final pass.
- * Run an additional deployment pass on Controller 1 only (``fuel-controller-01``) to finalize the Galera cluster configuration.
- * Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
- * Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes may be deployed in parallel.
-
- **Example 3:** **OpenStack HA installation without Swift**
-
- * Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``), starting with the primary controller. No errors should appear during this deployment pass.
- * Run an additional deployment pass on the primary controller only (``fuel-controller-01``) to finalize the Galera cluster configuration.
- * Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
- * Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes may be deployed in parallel.
-
- **Example 4:** **The most simple OpenStack installation: Controller + Compute on the same node**
-
- * Set the ``node /fuel-controller-[\d+]/`` variable in ``site.pp`` to match the hostname of the node on which you are going to deploy OpenStack. Set the ``node /fuel-compute-[\d+]/`` variable to **mismatch** the node name. Run a deployment pass on this node. No errors should appear during this deployment pass.
- * Set the ``node /fuel-compute-[\d+]/`` variable in ``site.pp`` to match the hostname of the node on which you are going to deploy OpenStack. Set the ``node /fuel-controller-[\d+]/`` variable to **mismatch** the node name. Run a deployment pass on this node. No errors should appear during this deployment pass.
diff --git a/docs/pages/installation-instructions/0062-orchestration.rst b/docs/pages/installation-instructions/0062-orchestration.rst
deleted file mode 100644
index cea32653d5..0000000000
--- a/docs/pages/installation-instructions/0062-orchestration.rst
+++ /dev/null
@@ -1,24 +0,0 @@
-.. _orchestration:
-
-Deploying via orchestration
-----------------------------
-
-Manually installing a handful of servers might be managable, but repeatable installations, or those that involve a large number of servers, require automated orchestration. Now you can use orchestration with Fuel through the ``astute`` script. This script is configured using the ``astute.yaml`` file you created when you ran ``openstack_system``.
-
-To run the orchestrator, log in to ``fuel-pm`` and execute::
-
- astute -f astute.yaml
-
-You will see a message on ``fuel-pm`` stating that the installation has started on fuel-controller-01. To see what's going on on the target node, type::
-
- tail -f /var/log/syslog
-
-for Ubuntu, or::
-
- tail -f /var/log/messages
-
-for CentOS/Red Hat.
-
-Note that Puppet will require several runs to install all the different roles, so the first time it runs, the orchestrator will show an error, but it just means that the installation isn't complete. Also, after the first run on each server, the orchestrator doesn't output messages on fuel-pm; when it's finished running, it will return you to the command prompt. In the meantime, you can see what's going on by watching the logs on each individual machine.
-
-
diff --git a/docs/pages/installation-instructions/0070-orchestration.rst b/docs/pages/installation-instructions/0070-orchestration.rst
new file mode 100644
index 0000000000..4a036b0bfd
--- /dev/null
+++ b/docs/pages/installation-instructions/0070-orchestration.rst
@@ -0,0 +1,119 @@
+Deploying OpenStack
+-------------------
+
+You have two options for deploying OpenStack. The eaiser method is to use orchestration, but you can also deploy your nodes manually.
+
+.. _orchestration:
+
+Deploying via orchestration
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Manually installing a handful of servers might be managable, but repeatable installations, or those that involve a large number of servers, require automated orchestration. Now you can use orchestration with Fuel through the ``astute`` script. This script is configured using the ``astute.yaml`` file you created when you ran ``openstack_system``.
+
+To confirm that your servers are ready for orchestration, execute the command::
+
+ mco ping
+
+You should see all three controllers, plus the compute node, answer the call::
+
+ fuel-compute-01 time=107.26 ms
+ fuel-controller-01 time=120.14 ms
+ fuel-controller-02 time=135.94 ms
+ fuel-controller-03 time=139.33 ms
+
+To run the orchestrator, log in to ``fuel-pm`` and execute::
+
+ astute -f astute.yaml
+
+You will see a message on ``fuel-pm`` stating that the installation has started on fuel-controller-01. To see what's going on on the target node, type::
+
+ tail -f /var/log/syslog
+
+for Ubuntu, or::
+
+ tail -f /var/log/messages
+
+for CentOS/Red Hat.
+
+Note that Puppet will require several runs to install all the different roles, so the first time it runs, the orchestrator will show an error, but it just means that the installation isn't complete. Also, after the first run on each server, the orchestrator doesn't output messages on fuel-pm; when it's finished running, it will return you to the command prompt. In the meantime, you can see what's going on by watching the logs on each individual machine.
+
+
+Installing OpenStack using Puppet directly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If for some reason you don't wish to use orchestration -- for example, if you are adding a new node to an existing (non-HA) cluster -- you have the option to install on an individual node or nodes using Puppet directly.
+
+Start by logging in to the target server -- fuel-controller-01 to start, if you're starting from scratch -- and running the Puppet agent.
+
+One optional step would be to use the script command to log all
+of your output so you can check for errors if necessary::
+
+ script agent-01.log
+ puppet agent --test
+
+You will to see a great number of messages scroll by, and the
+installation will take a significant amount of time. When the process
+has completed, press CTRL-D to stop logging and grep for errors::
+
+ grep err: agent-01.log
+
+If you find any errors relating to other nodes, ignore them for now.
+
+Now you can run the same installation procedure on fuel-controller-02
+and fuel-controller-03, as well as fuel-compute-01.
+
+Note that the controllers must be installed sequentially due to the
+nature of assembling a MySQL cluster based on Galera, which means that
+one must complete its installation before the next begins, but that
+compute nodes can be installed concurrently once the controllers are
+in place.
+
+
+
+In some cases, you may find errors related to resources that are not
+yet available when the installation takes place. To solve that
+problem, simply re-run the puppet agent on the affected node after running the other controllers, and
+again grep for error messages.
+
+
+
+When you see no errors on any of your nodes, your OpenStack cluster is
+ready to go.
+
+
+Examples of OpenStack installation sequences
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When running Puppet manually, the exact sequence depends on what it is you're trying to achieve. In most cases, you'll need to run Puppet more than once; with every deployment pass Puppet collects and adds necessary absent information to the OpenStack configuration, stores it to PuppedDB and applies necessary changes.
+
+ **Note:** *Sequentially run* means you don't start the next node deployment until previous one is finished.
+
+ **Example 1:** **Full OpenStack deployment with standalone storage nodes**
+
+ * Create necessary volumes on storage nodes as described in :ref:`create-the-XFS-partition`.
+ * Sequentially run a deployment pass on every SwiftProxy node (``fuel-swiftproxy-01 ... fuel-swiftproxy-xx``), starting with the ``primary-swift-proxy node``. Node names are set by the ``$swift_proxies`` variable in ``site.pp``. There are 2 Swift Proxies by default.
+ * Sequentially run a deployment pass on every storage node (``fuel-swift-01`` ... ``fuel-swift-xx``).
+ * Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``). starting with the ``primary-controller`` node.
+ * Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
+ * Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers, these nodes may be deployed in parallel.
+ * Run an additional deployment pass on Controller 1 only (``fuel-controller-01``) to finalize the Galera cluster configuration.
+
+ **Example 2:** **Compact OpenStack deployment with storage and swift-proxy combined with nova-controller on the same nodes**
+
+ * Create the necessary volumes on controller nodes as described in :ref:`create-the-XFS-partition`
+ * Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``), starting with the ``primary-controller node``. Errors in Swift storage such as */Stage[main]/Swift::Storage::Container/Ring_container_device[]: Could not evaluate: Device not found check device on * are expected during the deployment passes until the very final pass.
+ * Run an additional deployment pass on Controller 1 only (``fuel-controller-01``) to finalize the Galera cluster configuration.
+ * Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
+ * Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes may be deployed in parallel.
+
+ **Example 3:** **OpenStack HA installation without Swift**
+
+ * Sequentially run a deployment pass on the controller nodes (``fuel-controller-01 ... fuel-controller-xx``), starting with the primary controller. No errors should appear during this deployment pass.
+ * Run an additional deployment pass on the primary controller only (``fuel-controller-01``) to finalize the Galera cluster configuration.
+ * Run a deployment pass on the Quantum node (``fuel-quantum``) to install the Quantum router.
+ * Run a deployment pass on every compute node (``fuel-compute-01 ... fuel-compute-xx``) - unlike the controllers these nodes may be deployed in parallel.
+
+ **Example 4:** **The most simple OpenStack installation: Controller + Compute on the same node**
+
+ * Set the ``node /fuel-controller-[\d+]/`` variable in ``site.pp`` to match the hostname of the node on which you are going to deploy OpenStack. Set the ``node /fuel-compute-[\d+]/`` variable to **mismatch** the node name. Run a deployment pass on this node. No errors should appear during this deployment pass.
+ * Set the ``node /fuel-compute-[\d+]/`` variable in ``site.pp`` to match the hostname of the node on which you are going to deploy OpenStack. Set the ``node /fuel-controller-[\d+]/`` variable to **mismatch** the node name. Run a deployment pass on this node. No errors should appear during this deployment pass.
diff --git a/docs/pages/installation-instructions/0065-testing-openstack.rst b/docs/pages/installation-instructions/0080-testing-openstack.rst
similarity index 100%
rename from docs/pages/installation-instructions/0065-testing-openstack.rst
rename to docs/pages/installation-instructions/0080-testing-openstack.rst
diff --git a/docs/pages/introduction/0050-supported-software.rst b/docs/pages/introduction/0050-supported-software.rst
index 078dbafc59..c1b0d1a706 100644
--- a/docs/pages/introduction/0050-supported-software.rst
+++ b/docs/pages/introduction/0050-supported-software.rst
@@ -4,30 +4,45 @@ Supported Software
Fuel has been tested and is guaranteed to work with the following software components:
* Operating Systems
- * CentOS 6.3 (x86_64 architecture only)
- * RHEL 6.3 (x86_64 architecture only)
- * Ubuntu 12.04 (x86_64 architecture only)
+ * CentOS 6.4 (x86_64 architecture only)
+ * RHEL 6.4 (x86_64 architecture only)
* Puppet (IT automation tool)
* 2.7.19
- * 3.0.0
+
+* MCollective
+ * 2.2.4
* Cobbler (bare-metal provisioning tool)
* 2.2.3
* OpenStack
- * Folsom release
+ * Grizzly release 2013.1
* Hypervisor
* KVM
+* Open vSwitch
+ * 1.10.0
+
* HA Proxy
* 1.4.19
* Galera
- * 23.2.1
+ * 23.2.2
+
+* RabbitMQ
+ * 2.8.7
+
+* Pacemaker
+ * 1.1.8
+
+* Corosync
+ * 1.4.3
* Keepalived
* 1.2.4
+* Nagios
+ * 3.4.4
diff --git a/docs/pages/introduction/0060-download-fuel.rst b/docs/pages/introduction/0060-download-fuel.rst
index a08265c311..44b8333e8e 100644
--- a/docs/pages/introduction/0060-download-fuel.rst
+++ b/docs/pages/introduction/0060-download-fuel.rst
@@ -4,8 +4,7 @@ Download Fuel
The first step in installing Fuel is to download the version
appropriate for your environment.
-Fuel is available for both Essex and Folsom OpenStack installations, and will be available for Grizzly
-shortly after Grizzly's release.
+Fuel is available for Essex, Folsom and Grizzly OpenStack installations, and will be available for Havana shortly after Havana's release.
To make your installation easier, we also offer a pre-built ISO for installing the master node with Puppet Master and Cobbler. You can mount this ISO in a physical or VirtualBox machine in order to
easily create your master node. (Instructions for performing this step
diff --git a/docs/pages/introduction/0070-release-notes.rst b/docs/pages/introduction/0070-release-notes.rst
index 2e4340ee60..08f98b6fb2 100644
--- a/docs/pages/introduction/0070-release-notes.rst
+++ b/docs/pages/introduction/0070-release-notes.rst
@@ -1,7 +1,7 @@
Release Notes
-------------
-.. include:: /pages/introduction/release-notes/v2-2-folsom.rst
+.. include:: /pages/introduction/release-notes/v3-0-grizzly.rst
.. include:: /pages/introduction/release-notes/v2-1-folsom.rst
.. include:: /pages/introduction/release-notes/v2-0-folsom.rst
.. include:: /pages/introduction/release-notes/v1-0-essex.rst
diff --git a/docs/pages/introduction/release-notes/v3-0-grizzly.rst b/docs/pages/introduction/release-notes/v3-0-grizzly.rst
new file mode 100644
index 0000000000..b36ccb8dba
--- /dev/null
+++ b/docs/pages/introduction/release-notes/v3-0-grizzly.rst
@@ -0,0 +1,34 @@
+v3.0-grizzly
+^^^^^^^^^^^^
+
+**New Features in Fuel and Fuel Web 3.0**
+
+* Support for OpenStack Grizzly
+* Support for CentOS 6.4
+* Deployment improvements
+
+ * Deployment of Cinder as a standalone node
+ * Users may now choose where to store Cinder volumes
+ * User defined disk space allocation for the base OS, Cinder and Virtual Machines
+ * Ability to add new compute nodes without redeployment of the whole environment
+ * Swift installation occurs in a single pass instead of multiple passes
+
+* Network configuration enhancements
+
+ * Support for NIC bonding
+ * Ability to map logical networks to physical interfaces
+ * Improved firewall module
+
+**Support for OpenStack Grizzly**
+
+OpenStack Grizzly is the seventh release of the open source software for building public, private, and hybrid clouds. Fuel now supports deploying the Grizzly version of OpenStack in a variety of configurations including High Availability (HA). For a list of known limitations, please refer to the Known Issues section below.
+
+**Support for CentOS 6.4**
+
+CentOS 6.4 is now the base operating system for the Fuel master node, as well as the deployed slave nodes.
+
+**Deployment Improvements**
+
+* Deployment of Cinder as a standalone node / User choice
+
+ Previously, Cinder could only be deployed onto a compute node. Now, you may choose to deploy Cinder as a standalone node separate from a compute node. Both options – either deployed with a compute node or standalone – are available.
diff --git a/docs/pages/production-considerations/0010-introduction-DISPLAYSTUB.rst b/docs/pages/production-considerations/0010-introduction-DISPLAYSTUB.rst
new file mode 100644
index 0000000000..347e714dcd
--- /dev/null
+++ b/docs/pages/production-considerations/0010-introduction-DISPLAYSTUB.rst
@@ -0,0 +1,2 @@
+.. include:: /pages/production-considerations/0010-introduction.rst
+.. include:: /pages/production-considerations/0015-sizing-hardware.rst
diff --git a/docs/pages/reference-architecture/0020-logical-setup.rst b/docs/pages/reference-architecture/0020-logical-setup.rst
index c3760aa488..0cbd6f563e 100644
--- a/docs/pages/reference-architecture/0020-logical-setup.rst
+++ b/docs/pages/reference-architecture/0020-logical-setup.rst
@@ -1,12 +1,14 @@
Logical Setup
--------------
+^^^^^^^^^^^^^
An OpenStack HA cluster involves, at a minimum, three types of nodes:
controller nodes, compute nodes, and storage nodes.
Controller Nodes
-^^^^^^^^^^^^^^^^
+++++++++++++++++
+
+
The first order of business in achieving high availability (HA) is
redundancy, so the first step is to provide multiple controller nodes.
You must keep in mind, however, that the database uses Galera to
@@ -45,7 +47,7 @@ mechanism for achieving HA:
Compute Nodes
-^^^^^^^^^^^^^
++++++++++++++
OpenStack compute nodes are, in many ways, the foundation of your
cluster; they are the servers on which your users will create their
@@ -60,7 +62,7 @@ controller nodes using the VIP and going through HAProxy.
Storage Nodes
-^^^^^^^^^^^^^
++++++++++++++
In this OpenStack cluster reference architecture, shared storage acts
diff --git a/docs/pages/reference-architecture/0030-cluster-sizing.rst b/docs/pages/reference-architecture/0030-cluster-sizing.rst
index 39a486d706..c22403cece 100644
--- a/docs/pages/reference-architecture/0030-cluster-sizing.rst
+++ b/docs/pages/reference-architecture/0030-cluster-sizing.rst
@@ -1,6 +1,6 @@
Cluster Sizing
---------------
+^^^^^^^^^^^^^^
This reference architecture is well suited for production-grade
OpenStack deployments on a medium and large scale when you can afford
diff --git a/docs/pages/reference-architecture/0040-network-setup.rst b/docs/pages/reference-architecture/0040-network-setup.rst
index 21422b5cdc..be93eefdb8 100644
--- a/docs/pages/reference-architecture/0040-network-setup.rst
+++ b/docs/pages/reference-architecture/0040-network-setup.rst
@@ -1,6 +1,6 @@
Network Architecture
---------------------
+^^^^^^^^^^^^^^^^^^^^
The current architecture assumes the presence of 3 NIC cards in
@@ -26,7 +26,8 @@ cluster.
Public Network
-^^^^^^^^^^^^^^
+++++++++++++++
+
This network allows inbound connections to VMs from the outside world
(allowing users to connect to VMs from the Internet). It also allows
outbound connections from VMs to the outside world.
@@ -61,7 +62,7 @@ The public network also provides VIPs for Endpoint nodes, which are
used to connect to OpenStack services APIs.
Internal (Management) Network
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++++++++++++++++++++++++++++++
The internal network connects all OpenStack nodes in the cluster. All
components of an OpenStack cluster communicate with each other using
@@ -80,7 +81,7 @@ non-globally routed IP address range.
Private Network
-^^^^^^^^^^^^^^^
++++++++++++++++
The private network facilitates communication between each tenant's
VMs. Private network address spaces are part of the enterprise network
diff --git a/docs/pages/reference-architecture/0050-technical-considerations-DISPLAYSTUB.rst b/docs/pages/reference-architecture/0050-technical-considerations-DISPLAYSTUB.rst
new file mode 100644
index 0000000000..1a4d41d460
--- /dev/null
+++ b/docs/pages/reference-architecture/0050-technical-considerations-DISPLAYSTUB.rst
@@ -0,0 +1,3 @@
+.. include:: /pages/reference-architecture/0050-technical-considerations-overview.rst
+.. include:: /pages/reference-architecture/0060-quantum-vs-nova-network.rst
+
diff --git a/docs/pages/reference-architecture/0010-technical-considerations-overview.rst b/docs/pages/reference-architecture/0050-technical-considerations-overview.rst
similarity index 100%
rename from docs/pages/reference-architecture/0010-technical-considerations-overview.rst
rename to docs/pages/reference-architecture/0050-technical-considerations-overview.rst
diff --git a/docs/pages/reference-architecture/0050-cinder-vs-nova-volume.rst b/docs/pages/reference-architecture/0070-cinder-vs-nova-volume.rst
similarity index 100%
rename from docs/pages/reference-architecture/0050-cinder-vs-nova-volume.rst
rename to docs/pages/reference-architecture/0070-cinder-vs-nova-volume.rst
diff --git a/docs/pages/reference-architecture/0070-swift-notes.rst b/docs/pages/reference-architecture/0080-swift-notes.rst
similarity index 100%
rename from docs/pages/reference-architecture/0070-swift-notes.rst
rename to docs/pages/reference-architecture/0080-swift-notes.rst
diff --git a/fuel_test/README.rst b/fuel_test/README.rst
new file mode 100644
index 0000000000..3dcf2d1244
--- /dev/null
+++ b/fuel_test/README.rst
@@ -0,0 +1,114 @@
+::
+
+Grizzly CI TEMPEST parameterized job example
+==============================================
+
+This is an example of grizzly TEMPEST job for CI cycle, i.e. commit & verify.
+
+Quickstart
+----------
+
+- Copy job from the nearest-best-fitting-one, edit job name to match its environment
+- Set up SCM for repos needed and provide its local directories names (fuel & tempest is a minimum required)
+- Add parameters for job, f.e. ``test_name`` with values
+
+ - fuel_test.cobbler.test_simple:SimpleTestCase.test_simple
+ - fuel_test.cobbler.test_simple
+ - fuel_test.cobbler.test_single
+ - fuel_test.cobbler.test_full:FullTestCase.test_full
+ - fuel_test.cobbler.test_compact:CompactTestCase.test_deploy_compact_quantum
+ - fuel_test.cobbler.test_compact:CompactTestCase.test_deploy_compact_wo_quantum
+ - fuel_test.cobbler.test_quantum_standalone_no_swift:QstTestCase.test_quantum_standalone_no_swift
+ - fuel_test.cobbler.test_minimal:MinimalTestCase.test_minimal
+ - fuel_test.cobbler.test_orchestration:CobblerCase.test_orchestrating_minimal
+ - TEMPEST
+ - tempest/tempest/tests/network/test_network_basic_ops.py
+ - tempest/tempest/tests/compute/servers/test_create_server.py:ServersTestJSON.test_can_log_into_created_server
+ - tempest/tempest/tests/compute/floating_ips
+
+ and ``erase`` with values
+
+ - false
+ - true
+
+- Configure shell command to execute
+- Run the job
+
+Shell env. varaibles used for job
+---------------------------------
+
+Accepted values for ``test_name`` parameter are
+
+- TEMPEST = full tempest run onto lab was deployed before
+- tempest/tempest/tests/.../ModuleName.py:ClassName.MethodName = run single tempest test specified only, e.g. tempest/tempest/tests/compute/servers/test_create_server.py:ServersTestJSON.test_can_log_into_created_server
+- Any other = redeploy lab from 'nodes-deployed' snapshots have been made after BM by cobbler have finished (uncomented dos.py would cause full erase and redeploy with BM including vm networks recreation)
+
+Accepted values for ``erase`` parameter are
+
+- false = do not erase existing virtual nodes and networks before nosetests execution
+- true = run dos.py script to erase and recreate virtual networks and nodes for lab, next run the nosetests (note: always use ``true`` if public_pool have changed)
+
+Other shell script keys
+
+- DOMAIN_NAME = domain name to use for nodes (default ``.your-domain-name.com``), note: this option is broken
+- OS_FAMILY = OS type for nodes, ``cetnos`` or ``ubuntu`` (default ``centos``)
+- CURRENT_PROFILE = ``centos64_x86_64`` or ``ubuntu_1204_x86_64`` - cobbler ks profile to use (default depends on OS_FAMILY)
+- CONTROLLERS,COMPUTES,STORAGES,PROXIES = number of nodes of corresponding role type to deploy (defaults ``3,3,3,2``)
+- PARENT_PROXY = parent-proxy server for squid at master node (``172.18.67.168`` Saratov, ``172.18.3.14`` Moscow) (default none)
+- CIRROS_IMAGE = cirros url (default ``http://srv08-srt.srt.mirantis.net/cirros-0.3.0-x86_64-disk.img``)
+- ISO_IMAGE = Fuel iso image to use for master node (default ``~/fuel-centos-6.4-x86_64.iso``)
+- USE_ISO = use ISO for deployment (default ``True``), note: this option is broken
+- ASTUTE_USE = use astute addon for mcollective to deploy nodes (default ``True``)
+- PUPPET_GEN = puppet generation ``(2,3)`` to use, i.e. ``2 => v2.x.x``, ``3 => v3.x.x`` (default ``2``)
+- PUPPET_AGENT_COMMAND = command to run puppet agents (default ``puppet agent -tvd -evaltrace 2>&1``)
+- CLEAN = clean exitsting dirty state, will revert nodes to snapshot ``nodes-deployed``, if any (default ``True``)
+- CREATE_SNAPSHOTS = make ``openstack`` snapshots after lab have deployed or ``openstack-upgraded`` in case of upgrade (default ``False``)
+- UPGRADE = tell jenkins to revert nodes to ``openstack`` snapshots while cleaning (default ``False``)
+- PUBLIC_POOL = use custom IP allocation pool for public & ext networking (use with dos.py only). See also: fuel_test/settings.py, note: this option is broken
+- PUBLIC_FORWARD = ``nat`` or ``route`` forwarding mode for public pool, use ``route`` for custom forwarded pools (default ``nat``)
+
+Shell script example
+--------------------
+
+.. code:: bash
+
+ . ~/work/venv/bin/activate
+ export ENV_NAME=$JOB_NAME
+ export PUPPET_AGENT_COMMAND='puppet agent -t 2>&1'
+ export CREATE_SNAPSHOTS=true
+ export UPGRADE=false
+ export CLEAN=true
+ export PUPPET_GEN=2
+ export ASTUTE_USE=false
+
+ export PARENT_PROXY=172.18.67.168
+ export CIRROS_IMAGE=http://download.cirros-cloud.net/0.3.1/cirros-0.3.1-x86_64-disk.img
+ export ISO=/var/lib/libvirt/images/fuel-centos-6.3-x86_64.iso
+
+ export CONTROLLERS=1
+ export COMPUTES=3
+ export STORAGES=0
+ export PROXIES=0
+
+ export OS_FAMILY=centos
+ export CURRENT_PROFILE=centos64_x86_64
+
+ if [ "$test_name" == "TEMPEST" ] || [ "$(echo $test_name | cut -d"/" -f1)" == "tempest" ]; then
+ export run_tests=tempest/tempest/tests
+ [ "$test_name" != "TEMPEST" ] && export run_tests="-v $test_name"
+ pushd fuel
+ pip install -r fuel_test/pip-requires
+ PYTHONPATH=. python fuel_test/prepare.py || true
+ popd
+ deactivate
+ cp tempest.conf $WORKSPACE/tempest/etc/
+ virtualenv venv --no-site-packages
+ . venv/bin/activate
+ pip install -r tempest/tools/pip-requires
+ nosetests $run_tests --with-xunit -d -l DEBUG || echo ignore error code
+ deactivate
+ else
+ [ "$erase" == "true" ] && dos.py erase $ENV_NAME
+ nosetests -w $fuel_release $test_name --with-xunit -s -d -l DEBUG || echo ignore exit code
+ fi
+
diff --git a/fuel_test/ci/ci_base.py b/fuel_test/ci/ci_base.py
index e8f03382c1..884ab21135 100644
--- a/fuel_test/ci/ci_base.py
+++ b/fuel_test/ci/ci_base.py
@@ -141,12 +141,14 @@ class CiBase(object):
self.environment().network_by_name('internal').ip_network)[-2])
def floating_network(self):
+ prefix = IPNetwork(self.environment().network_by_name('public').ip_network).prefixlen
return str(
- IPNetwork(self.environment().network_by_name('public').ip_network).subnet(new_prefix=29)[-1])
+ IPNetwork(self.environment().network_by_name('public').ip_network).subnet(new_prefix=prefix + 2)[-1])
def public_virtual_ip(self):
- return str(
- IPNetwork(self.environment().network_by_name('public').ip_network).subnet(new_prefix=29)[-2][
+ prefix = IPNetwork(self.environment().network_by_name('public').ip_network).prefixlen
+ return str(
+ IPNetwork(self.environment().network_by_name('public').ip_network).subnet(new_prefix=prefix + 2)[-2][
-1])
def public_router(self):
diff --git a/fuel_test/ci/ci_vm.py b/fuel_test/ci/ci_vm.py
index d00da0ce0a..0e56d4e6f4 100644
--- a/fuel_test/ci/ci_vm.py
+++ b/fuel_test/ci/ci_vm.py
@@ -7,7 +7,8 @@ from fuel_test.helpers import add_nmap
from fuel_test.node_roles import NodeRoles
from fuel_test.settings import CONTROLLERS, COMPUTES, \
STORAGES, PROXIES, \
- EMPTY_SNAPSHOT, POOLS, INTERFACE_ORDER, FORWARDING, DHCP, ISO_IMAGE
+ EMPTY_SNAPSHOT, POOLS, INTERFACE_ORDER, FORWARDING, DHCP, ISO_IMAGE, \
+ SETUP_TIMEOUT
class CiVM(CiBase):
@@ -66,7 +67,7 @@ class CiVM(CiBase):
start_nodes = self.get_startup_nodes()
self.environment().start(start_nodes)
for node in start_nodes:
- node.await('public', timeout=600)
+ node.await('public', timeout=SETUP_TIMEOUT)
master_remote = master_node.remote('public', login='root', password='r00tme')
add_nmap(master_remote)
self.environment().snapshot(EMPTY_SNAPSHOT)
diff --git a/fuel_test/cobbler/test_minimal.py b/fuel_test/cobbler/test_minimal.py
index c029034b0f..74e827734b 100644
--- a/fuel_test/cobbler/test_minimal.py
+++ b/fuel_test/cobbler/test_minimal.py
@@ -3,7 +3,7 @@ from fuel_test.cobbler.vm_test_case import CobblerTestCase
from fuel_test.config import Config
from fuel_test.helpers import write_config
from fuel_test.manifest import Template, Manifest
-from fuel_test.settings import CREATE_SNAPSHOTS, ASTUTE_USE
+from fuel_test.settings import CREATE_SNAPSHOTS, ASTUTE_USE, PUPPET_AGENT_COMMAND
class MinimalTestCase(CobblerTestCase):
@@ -20,14 +20,14 @@ class MinimalTestCase(CobblerTestCase):
ci=self.ci(),
controllers=self.nodes().controllers,
quantums=self.nodes().quantums,
- quantum=True
- )
+ quantum=True)
+
Manifest().write_manifest(remote=self.remote(), manifest=manifest)
-
- self.validate(self.nodes().controllers[:1], 'puppet agent --test')
- self.validate(self.nodes().controllers[1:], 'puppet agent --test')
- self.validate(self.nodes().controllers[:1], 'puppet agent --test')
- self.validate(self.nodes().computes, 'puppet agent --test')
+
+ self.validate(self.nodes().controllers[:1], PUPPET_AGENT_COMMAND)
+ self.validate(self.nodes().controllers[1:], PUPPET_AGENT_COMMAND)
+ self.validate(self.nodes().controllers[:1], PUPPET_AGENT_COMMAND)
+ self.validate(self.nodes().computes, PUPPET_AGENT_COMMAND)
def deploy_by_astute(self):
self.remote().check_stderr("astute -f /root/astute.yaml -v")
@@ -54,4 +54,4 @@ class MinimalTestCase(CobblerTestCase):
self.environment().snapshot('minimal', force=True)
if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
+ unittest.main()
diff --git a/fuel_test/cobbler/test_simple.py b/fuel_test/cobbler/test_simple.py
index 44f7a33b2d..78784b221c 100644
--- a/fuel_test/cobbler/test_simple.py
+++ b/fuel_test/cobbler/test_simple.py
@@ -26,10 +26,10 @@ class SimpleTestCase(CobblerTestCase):
template=Template.simple(),
ci=self.ci(),
controllers=self.nodes().controllers,
- use_syslog=False,
+ use_syslog=True,
quantum=True, quantums=self.nodes().controllers,
ha=False, ha_provider='generic',
- cinder=False, swift=False
+ cinder=True, cinder_nodes=['all'], swift=False
)
Manifest().write_manifest(remote=self.remote(), manifest=manifest)
diff --git a/fuel_test/cobbler/test_single.py b/fuel_test/cobbler/test_single.py
index b26b3b4056..ca33f4b875 100644
--- a/fuel_test/cobbler/test_single.py
+++ b/fuel_test/cobbler/test_single.py
@@ -1,23 +1,69 @@
import unittest
from fuel_test.cobbler.vm_test_case import CobblerTestCase
-from fuel_test.manifest import Manifest
-from fuel_test.settings import OPENSTACK_SNAPSHOT, CREATE_SNAPSHOTS, PUPPET_AGENT_COMMAND
+from fuel_test.config import Config
+from fuel_test.helpers import write_config
+from fuel_test.manifest import Manifest, Template
+from fuel_test.settings import CREATE_SNAPSHOTS, PUPPET_AGENT_COMMAND, ASTUTE_USE
class SingleTestCase(CobblerTestCase):
- def test_single(self):
- Manifest.write_manifest(
- self.remote(),
- Manifest().generate_openstack_single_manifest(
+ def deploy(self):
+ if ASTUTE_USE:
+ self.prepare_astute()
+ self.deploy_by_astute()
+ else:
+ self.prepare_only_site_pp()
+ self.deploy_one_by_one()
+
+ def deploy_one_by_one(self):
+ self.validate(self.nodes().controllers[:1], PUPPET_AGENT_COMMAND)
+
+ def deploy_by_astute(self):
+ self.remote().check_stderr("astute -f /root/astute.yaml -v", True)
+
+ def prepare_only_site_pp(self):
+
+# Which one is right?
+
+# manifest = Manifest().generate_openstack_manifest(
+# template=Template.single(),
+# ci=self.ci(),
+# controllers=self.nodes().controllers,
+# quantum=False,
+# quantums=[]
+# )
+
+ manifest = Manifest().generate_openstack_manifest(
+ template=Template.single(),
ci=self.ci(),
- quantum=False,
+ controllers=self.nodes().controllers,
+ use_syslog=True,
+ quantum=False, quantums=[],
+ ha=False, ha_provider='generic',
+ cinder=True, cinder_nodes=['all'], swift=False,
)
+
+ Manifest().write_manifest(remote=self.remote(), manifest=manifest)
+
+# TODO fix astute deploy
+ def prepare_astute(self):
+ config = Config().generate(
+ template=Template.single(),
+ ci=self.ci(),
+ nodes=[self.ci().nodes().controllers[:1]],
+ quantum=False
)
- self.validate(
- self.nodes().controllers[:1],
- PUPPET_AGENT_COMMAND)
+ print "Generated config.yaml:", config
+ config_path = "/root/config.yaml"
+ write_config(self.remote(), config_path, str(config))
+ self.remote().check_call("cobbler_system -f %s" % config_path)
+ self.remote().check_stderr("openstack_system -c %s -o /etc/puppet/manifests/site.pp -a /root/astute.yaml" % config_path, True)
+
+ def test_single(self):
+ self.deploy()
+
if CREATE_SNAPSHOTS:
- self.environment().snapshot(OPENSTACK_SNAPSHOT, force=True)
+ self.environment().snapshot("single", force=True)
if __name__ == '__main__':
unittest.main()
diff --git a/fuel_test/config/tempest.conf.grizzly.sample b/fuel_test/config/tempest.conf.grizzly.sample
new file mode 100644
index 0000000000..d2fccebb9c
--- /dev/null
+++ b/fuel_test/config/tempest.conf.grizzly.sample
@@ -0,0 +1,261 @@
+[identity]
+# This section contains configuration options that a variety of Tempest
+# test clients use when authenticating with different user/tenant
+# combinations
+
+# The type of endpoint for a Identity service. Unless you have a
+# custom Keystone service catalog implementation, you probably want to leave
+# this value as "identity"
+catalog_type = %(IDENTITY_CATALOG_TYPE)s
+# Ignore SSL certificate validation failures? Use when in testing
+# environments that have self-signed SSL certs.
+disable_ssl_certificate_validation = %(IDENTITY_DISABLE_SSL_CHECK)s
+# URL for where to find the OpenStack Identity API endpoint (Keystone)
+uri = %(IDENTITY_URI)s
+# Should typically be left as keystone unless you have a non-Keystone
+# authentication API service
+
+# Set to True if your test environment's Keystone authentication service should
+# be accessed over HTTPS
+use_ssl = %(IDENTITY_USE_SSL)s
+# This is the main host address of the authentication service API
+host = %(IDENTITY_HOST)s
+# Port that the authentication service API is running on
+port = %(IDENTITY_PORT)s
+# Version of the authentication service API (a string)
+api_version = %(IDENTITY_API_VERSION)s
+# Path to the authentication service tokens resource (do not modify unless you
+# have a custom authentication API and are not using Keystone)
+path = %(IDENTITY_PATH)s
+# Should typically be left as keystone unless you have a non-Keystone
+# authentication API service
+strategy = %(IDENTITY_STRATEGY)s
+# The identity region
+region = %(IDENTITY_REGION)s
+# This should be the username of a user WITHOUT administrative privileges
+username = %(USERNAME)s
+# The above non-administrative user's password
+password = %(PASSWORD)s
+# The above non-administrative user's tenant name
+tenant_name = %(TENANT_NAME)s
+# This should be the username of an alternate user WITHOUT
+# administrative privileges
+alt_username = %(ALT_USERNAME)s
+# The above non-administrative user's password
+alt_password = %(ALT_PASSWORD)s
+# The above non-administrative user's tenant name
+alt_tenant_name = %(ALT_TENANT_NAME)s
+
+# This should be the username of a user WITH administrative privileges
+admin_username = %(ADMIN_USER_NAME)s
+# The above non-administrative user's password
+admin_password = %(ADMIN_PASSWORD)s
+# The above non-administrative user's tenant name
+admin_tenant_name = %(ADMIN_TENANT_NAME)s
+
+[compute]
+# This section contains configuration options used when executing tests
+# against the OpenStack Compute API.
+
+# Allows test cases to create/destroy tenants and users. This option
+# enables isolated test cases and better parallel execution,
+# but also requires that OpenStack Identity API admin credentials
+# are known.
+allow_tenant_isolation = %(COMPUTE_ALLOW_TENANT_ISOLATION)s
+
+# Allows test cases to create/destroy tenants and users. This option
+# enables isolated test cases and better parallel execution,
+# but also requires that OpenStack Identity API admin credentials
+# are known.
+allow_tenant_reuse = %(COMPUTE_ALLOW_TENANT_REUSE)s
+
+# This should be the username of a user WITHOUT administrative privileges
+username = %(USERNAME)s
+# The above non-administrative user's password
+password = %(PASSWORD)s
+# The above non-administrative user's tenant name
+tenant_name = %(TENANT_NAME)s
+
+# This should be the username of an alternate user WITHOUT
+# administrative privileges
+alt_username = %(ALT_USERNAME)s
+# The above non-administrative user's password
+alt_password = %(ALT_PASSWORD)s
+# The above non-administrative user's tenant name
+alt_tenant_name = %(ALT_TENANT_NAME)s
+
+# Reference data for tests. The ref and ref_alt should be
+# distinct images/flavors.
+image_ref = %(IMAGE_ID)s
+image_ref_alt = %(IMAGE_ID_ALT)s
+flavor_ref = %(FLAVOR_REF)s
+flavor_ref_alt = %(FLAVOR_REF_ALT)s
+
+# Number of seconds to wait while looping to check the status of an
+# instance that is building.
+build_interval = %(COMPUTE_BUILD_INTERVAL)s
+
+# Number of seconds to time out on waiting for an instance
+# to build or reach an expected status
+build_timeout = %(COMPUTE_BUILD_TIMEOUT)s
+
+# Run additional tests that use SSH for instance validation?
+# This requires the instances be routable from the host
+# executing the tests
+run_ssh = %(RUN_SSH)s
+network_for_ssh = %(NETWORK_FOR_SSH)s
+ssh_user = %(SSH_USER)s
+
+# IP version of the address used for SSH
+ip_version_for_ssh = 4
+
+# Number of seconds to wait to authenticate to an instance
+ssh_timeout = 300
+
+# Number of seconds to wait for output from ssh channel
+ssh_channel_timeout = 60
+
+# The type of endpoint for a Compute API service. Unless you have a
+# custom Keystone service catalog implementation, you probably want to leave
+# this value as "compute"
+catalog_type = %(COMPUTE_CATALOG_TYPE)s
+
+# Does the Compute API support creation of images?
+create_image_enabled = %(COMPUTE_CREATE_IMAGE_ENABLED)s
+
+# For resize to work with libvirt/kvm, one of the following must be true:
+# Single node: allow_resize_to_same_host=True must be set in nova.conf
+# Cluster: the 'nova' user must have scp access between cluster nodes
+resize_available = %(COMPUTE_RESIZE_AVAILABLE)s
+
+# Does the compute API support changing the admin password?
+change_password_available = %(COMPUTE_CHANGE_PASSWORD_AVAILABLE)s
+
+# Level to log Compute API request/response details.
+log_level = %(COMPUTE_LOG_LEVEL)s
+
+# Run live migration tests (requires 2 hosts)
+live_migration_available = %(LIVE_MIGRATION)s
+
+# Use block live migration (Otherwise, non-block migration will be
+# performed, which requires XenServer pools in case of using XS)
+use_block_migration_for_live_migration = %(USE_BLOCKMIG_FOR_LIVEMIG)s
+
+# By default, rely on the status of the diskConfig extension to
+# decide if to execute disk config tests. When set to false, tests
+# are forced to skip, regardless of the extension status
+disk_config_enabled_override = true
+
+
+[whitebox]
+# Whitebox options for compute. Whitebox options enable the
+# whitebox test cases, which look at internal Nova database state,
+# SSH into VMs to check instance state, etc.
+
+# Should we run whitebox tests for Compute?
+whitebox_enabled = %(COMPUTE_WHITEBOX_ENABLED)s
+
+# Path of nova source directory
+source_dir = %(COMPUTE_SOURCE_DIR)s
+
+# Path of nova configuration file
+config_path = %(COMPUTE_CONFIG_PATH)s
+
+# Directory containing nova binaries such as nova-manage
+bin_dir = %(COMPUTE_BIN_DIR)s
+
+# Path to a private key file for SSH access to remote hosts
+path_to_private_key = %(COMPUTE_PATH_TO_PRIVATE_KEY)s
+
+# Connection string to the database of Compute service
+db_uri = %(COMPUTE_DB_URI)s
+
+[image]
+# This section contains configuration options used when executing tests
+# against the OpenStack Images API
+
+# The type of endpoint for an Image API service. Unless you have a
+# custom Keystone service catalog implementation, you probably want to leave
+# this value as "image"
+catalog_type = %(IMAGE_CATALOG_TYPE)s
+
+# The version of the OpenStack Images API to use
+api_version = %(IMAGE_API_VERSION)s
+
+# This is the main host address of the Image API
+host = %(IMAGE_HOST)s
+
+# Port that the Image API is running on
+port = %(IMAGE_PORT)s
+
+# This should be the username of a user WITHOUT administrative privileges
+username = %(IMAGE_USERNAME)s
+# The above non-administrative user's password
+password = %(IMAGE_PASSWORD)s
+# The above non-administrative user's tenant name
+tenant_name = %(IMAGE_TENANT_NAME)s
+
+[compute-admin]
+# This section contains configuration options for an administrative
+# user of the Compute API. These options are used in tests that stress
+# the admin-only parts of the Compute API
+
+# This should be the username of a user WITH administrative privileges
+username = %(COMPUTE_ADMIN_USERNAME)s
+# The above administrative user's password
+password = %(COMPUTE_ADMIN_PASSWORD)s
+# The above administrative user's tenant name
+tenant_name = %(COMPUTE_ADMIN_TENANT_NAME)s
+
+[identity-admin]
+# This section contains configuration options for an administrative
+# user of the Compute API. These options are used in tests that stress
+# the admin-only parts of the Compute API
+
+# This should be the username of a user WITH administrative privileges
+username = %(IDENTITY_ADMIN_USERNAME)s
+# The above administrative user's password
+password = %(IDENTITY_ADMIN_PASSWORD)s
+# The above administrative user's tenant name
+tenant_name = %(IDENTITY_ADMIN_TENANT_NAME)s
+
+[volume]
+# This section contains the configuration options used when executing tests
+# against the OpenStack Block Storage API service
+
+# The type of endpoint for a Cinder or Block Storage API service.
+# Unless you have a custom Keystone service catalog implementation, you
+# probably want to leave this value as "volume"
+catalog_type = %(VOLUME_CATALOG_TYPE)s
+# Number of seconds to wait while looping to check the status of a
+# volume that is being made available
+build_interval = %(VOLUME_BUILD_INTERVAL)s
+# Number of seconds to time out on waiting for a volume
+# to be available or reach an expected status
+build_timeout = %(VOLUME_BUILD_TIMEOUT)s
+
+[network]
+catalog_type = %(NETWORK_CATALOG_TYPE)s
+api_version = %(NETWORK_API_VERSION)s
+# A large private cidr block from which to allocate smaller blocks for
+# tenant networks.
+tenant_network_cidr = %(TENANT_NETWORK_CIDR)s
+
+# The mask bits used to partition the tenant block.
+tenant_network_mask_bits = %(TENANT_NETWORK_MASK_BITS)s
+
+# If tenant networks are reachable, connectivity checks will be
+# performed directly against addresses on those networks.
+tenant_networks_reachable = %(TENANT_NETS_REACHABLE)s
+
+# Id of the public network that provides external connectivity.
+public_network_id = %(PUBLIC_NETWORK_ID)s
+
+# Id of a shared public router that provides external connectivity.
+# A shared public router would commonly be used where IP namespaces
+# were disabled. If namespaces are enabled, it would be preferable
+# for each tenant to have their own router.
+public_router_id = %(PUBLIC_ROUTER_ID)s
+
+# Whether or not quantum is expected to be available
+quantum_available = %(QUANTUM)s
diff --git a/fuel_test/manifest.py b/fuel_test/manifest.py
index df16c2682e..8682ef471e 100644
--- a/fuel_test/manifest.py
+++ b/fuel_test/manifest.py
@@ -123,11 +123,11 @@ class Manifest(object):
return dict(map(
lambda x:
(str(x.name),
- {
- 'internal_address': x.get_ip_address_by_network_name('internal'),
- 'public_address': x.get_ip_address_by_network_name('public'),
- },
- ),
+ {
+ 'internal_address': x.get_ip_address_by_network_name('internal'),
+ 'public_address': x.get_ip_address_by_network_name('public'),
+ },
+ ),
nodes)
)
@@ -211,23 +211,6 @@ class Manifest(object):
else:
return ci.fixed_network()
- def generate_openstack_single_manifest(self, ci,
- use_syslog=True,
- quantum=True,
- cinder=True):
- return Template.single().replace(
- floating_range=self.floating_network(ci, quantum),
- fixed_range=self.fixed_network(ci, quantum),
- public_interface=self.public_interface(),
- private_interface=self.private_interface(),
- mirror_type=self.mirror_type(),
- use_syslog=use_syslog,
- cinder=cinder,
- ntp_servers=['pool.ntp.org', ci.internal_router()],
- quantum=quantum,
- enable_test_repo=TEST_REPO,
- )
-
def generate_openstack_manifest(self, template,
ci,
controllers,
@@ -237,10 +220,12 @@ class Manifest(object):
quantum=True,
loopback=True,
cinder=True,
- cinder_nodes=None,
- quantum_netnode_on_cnt=True,
+ cinder_nodes=None,
+ quantum_netnode_on_cnt=True,
swift=True,
ha_provider='pacemaker', ha=True):
+ if not cinder_nodes:
+ cinder_nodes = []
if ha:
template.replace(
internal_virtual_ip=ci.internal_virtual_ip(),
@@ -267,24 +252,24 @@ class Manifest(object):
deployment_id=self.deployment_id(ci),
public_netmask=ci.public_net_mask(),
internal_netmask=ci.internal_net_mask(),
- quantum=quantum,
- quantum_netnode_on_cnt=quantum_netnode_on_cnt,
+ quantum=quantum,
+ quantum_netnode_on_cnt=quantum_netnode_on_cnt,
ha_provider=ha_provider
- )
+ )
if swift:
template.replace(swift_loopback=self.loopback(loopback))
return template
def generate_swift_manifest(self, controllers,
- proxies=None):
+ proxies=None):
template = Template(
root('deployment', 'puppet', 'swift', 'examples',
- 'site.pp'))
+ 'site.pp'))
template.replace(
swift_proxy_address=proxies[0].get_ip_address_by_network_name(
'internal'),
controller_node_public=controllers[
- 0].get_ip_address_by_network_name(
+ 0].get_ip_address_by_network_name(
'public'),
)
return template
@@ -295,14 +280,14 @@ class Manifest(object):
cobbler_address = cobbler.get_ip_address_by_network_name('internal')
network = IPNetwork(ci.environment().network_by_name('internal').ip_network)
self.replace = site_pp.replace(server=cobbler_address,
- name_server=cobbler_address,
- next_server=cobbler_address,
- dhcp_start_address=network[5],
- dhcp_end_address=network[-1],
- dhcp_netmask=network.netmask,
- dhcp_gateway=network[1],
- pxetimeout='3000',
- mirror_type=self.mirror_type(),
+ name_server=cobbler_address,
+ next_server=cobbler_address,
+ dhcp_start_address=network[5],
+ dhcp_end_address=network[-1],
+ dhcp_netmask=network.netmask,
+ dhcp_gateway=network[1],
+ pxetimeout='3000',
+ mirror_type=self.mirror_type(),
)
def generate_stomp_manifest(self):
diff --git a/fuel_test/pip-requires b/fuel_test/pip-requires
index 2902e2bdbf..7d14a15ace 100644
--- a/fuel_test/pip-requires
+++ b/fuel_test/pip-requires
@@ -1,4 +1,5 @@
nose
python-glanceclient
python-keystoneclient
+python-quantumclient
git+ssh://git@github.com/Mirantis/devops.git@stable
diff --git a/fuel_test/prepare.py b/fuel_test/prepare.py
index 9408923923..3bf003f47e 100644
--- a/fuel_test/prepare.py
+++ b/fuel_test/prepare.py
@@ -3,6 +3,8 @@ from time import sleep
from devops.helpers.helpers import ssh
import glanceclient
import keystoneclient.v2_0
+#from quantumclient.quantum import client as q_client
+from quantumclient.v2_0 import client as q_client
import os
from fuel_test.ci.ci_vm import CiVM
from fuel_test.helpers import load, retry, install_packages, switch_off_ip_tables
@@ -12,10 +14,14 @@ from fuel_test.settings import ADMIN_USERNAME, ADMIN_PASSWORD, ADMIN_TENANT_ESSE
class Prepare(object):
def __init__(self):
- self.public_ip = self.ci().public_virtual_ip()
- print "public", self.public_ip
- self.internal_ip = self.ci().public_virtual_ip()
self.controllers = self.ci().nodes().controllers
+ if len(self.controllers) == 1:
+ self.public_ip = self.controllers[0].get_ip_address_by_network_name('public')
+ self.internal_ip = self.controllers[0].get_ip_address_by_network_name('internal')
+ else:
+ self.public_ip = self.ci().public_virtual_ip()
+ print "public", self.public_ip
+ self.internal_ip = self.ci().public_virtual_ip()
def remote(self):
return ssh(self.public_ip,
@@ -34,7 +40,7 @@ class Prepare(object):
return ADMIN_PASSWORD
def tenant(self):
- return ADMIN_TENANT_FOLSOM
+ return ADMIN_TENANT_FOLSOM
def get_auth_url(self):
return 'http://%s:5000/v2.0/' % self.public_ip
@@ -70,6 +76,98 @@ class Prepare(object):
compute_db_uri='mysql://nova:nova@%s/nova' % self.ci().internal_virtual_ip()
))
+ def prepare_tempest_grizzly_simple(self):
+ image_ref, image_ref_alt, net_id, router_id = self.make_tempest_objects()
+ self.tempest_write_config(
+ self.tempest_config_grizzly(
+ image_ref=image_ref,
+ image_ref_alt=image_ref_alt,
+ public_network_id=net_id,
+ public_router_id=router_id,
+ path_to_private_key=root('fuel_test', 'config', 'ssh_keys',
+ 'openstack'),
+ compute_db_uri='mysql://nova:nova@%s/nova' % self.internal_ip
+ ))
+
+ def tempest_config_grizzly(self, image_ref, image_ref_alt, public_network_id, public_router_id,
+ path_to_private_key,
+ compute_db_uri='mysql://user:pass@localhost/nova'):
+ sample = load(
+ root('fuel_test', 'config', 'tempest.conf.grizzly.sample'))
+ config = sample % {
+ 'IDENTITY_CATALOG_TYPE': 'identity',
+ 'IDENTITY_DISABLE_SSL_CHECK': 'true',
+ 'IDENTITY_USE_SSL': 'false',
+ 'IDENTITY_URI': 'http://%s:5000/v2.0/' % self.public_ip,
+ 'IDENTITY_REGION': 'RegionOne',
+ 'IDENTITY_HOST': self.public_ip,
+ 'IDENTITY_PORT': '5000',
+ 'IDENTITY_API_VERSION': 'v2.0',
+ 'IDENTITY_PATH': 'tokens',
+ 'IDENTITY_STRATEGY': 'keystone',
+ 'COMPUTE_ALLOW_TENANT_ISOLATION': 'true',
+ 'COMPUTE_ALLOW_TENANT_REUSE': 'true',
+ 'USERNAME': 'tempest1',
+ 'PASSWORD': 'secret',
+ 'TENANT_NAME': 'tenant1',
+ 'ALT_USERNAME': 'tempest2',
+ 'ALT_PASSWORD': 'secret',
+ 'ALT_TENANT_NAME': 'tenant2',
+ 'IMAGE_ID': image_ref,
+ 'IMAGE_ID_ALT': image_ref_alt,
+ 'FLAVOR_REF': '1',
+ 'FLAVOR_REF_ALT': '1', # skip flavor '2' which provides 20Gb ephemerals and lots of RAM...
+ 'COMPUTE_BUILD_INTERVAL': '10',
+ 'COMPUTE_BUILD_TIMEOUT': '600',
+ 'RUN_SSH': 'true',
+ 'NETWORK_FOR_SSH': 'net04', # todo use private instead of floating?
+ 'SSH_USER': 'cirros',
+ 'LIVE_MIGRATION': 'true',
+ 'USE_BLOCKMIG_FOR_LIVEMIG' : 'true',
+ 'COMPUTE_CATALOG_TYPE': 'compute',
+ 'COMPUTE_CREATE_IMAGE_ENABLED': 'true',
+ 'COMPUTE_RESIZE_AVAILABLE': 'false', # not supported with QEMU...
+ 'COMPUTE_CHANGE_PASSWORD_AVAILABLE': 'false',
+ 'COMPUTE_LOG_LEVEL': 'ERROR',
+ 'COMPUTE_WHITEBOX_ENABLED': 'true',
+ 'COMPUTE_SOURCE_DIR': '/opt/stack/nova',
+ 'COMPUTE_CONFIG_PATH': '/etc/nova/nova.conf',
+ 'COMPUTE_BIN_DIR': '/usr/local/bin',
+ 'COMPUTE_PATH_TO_PRIVATE_KEY': path_to_private_key,
+ 'COMPUTE_DB_URI': compute_db_uri,
+ 'IMAGE_CATALOG_TYPE': 'image',
+ 'IMAGE_API_VERSION': '1',
+ 'IMAGE_HOST': self.public_ip,
+ 'IMAGE_PORT': '9292',
+ 'IMAGE_USERNAME': 'tempest1',
+ 'IMAGE_PASSWORD': 'secret',
+ 'IMAGE_TENANT_NAME': 'tenant1',
+ 'ADMIN_USER_NAME': ADMIN_USERNAME,
+ 'ADMIN_PASSWORD': ADMIN_PASSWORD,
+ 'ADMIN_TENANT_NAME': ADMIN_TENANT_FOLSOM,
+ 'IDENTITY_ADMIN_USERNAME': ADMIN_USERNAME,
+ 'IDENTITY_ADMIN_PASSWORD': ADMIN_PASSWORD,
+ 'IDENTITY_ADMIN_TENANT_NAME': ADMIN_TENANT_FOLSOM,
+ 'COMPUTE_ADMIN_USERNAME': ADMIN_USERNAME,
+ 'COMPUTE_ADMIN_PASSWORD': ADMIN_PASSWORD,
+ 'COMPUTE_ADMIN_TENANT_NAME': ADMIN_TENANT_FOLSOM,
+ 'IDENTITY_ADMIN_USERNAME': ADMIN_USERNAME,
+ 'IDENTITY_ADMIN_PASSWORD': ADMIN_PASSWORD,
+ 'IDENTITY_ADMIN_TENANT_NAME': ADMIN_TENANT_FOLSOM,
+ 'VOLUME_CATALOG_TYPE': 'volume',
+ 'VOLUME_BUILD_INTERVAL': '15',
+ 'VOLUME_BUILD_TIMEOUT': '400',
+ 'NETWORK_CATALOG_TYPE': 'network',
+ 'NETWORK_API_VERSION': 'v2.0',
+ 'QUANTUM': 'true',
+ 'TENANT_NETS_REACHABLE': 'false',
+ 'TENANT_NETWORK_CIDR': '192.168.112.0/24', # choose do not overlap with 'net04'
+ 'TENANT_NETWORK_MASK_BITS': '28', # 29 is too less to test quantum quotas (at least 50 ips needed)
+ 'PUBLIC_NETWORK_ID': public_network_id,
+ 'PUBLIC_ROUTER_ID': public_router_id,
+ }
+ return config
+
def tempest_config_folsom(self, image_ref, image_ref_alt,
path_to_private_key,
compute_db_uri='mysql://user:pass@localhost/nova'):
@@ -153,14 +251,41 @@ class Prepare(object):
with open(root('..', 'tempest.conf'), 'w') as f:
f.write(config)
+ def _get_images(self, glance, name):
+ """ Retrieve all images with a certain name """
+ images = [x for x in glance.images.list() if x.name == name]
+ return images
+
+ def _get_tenants(self, keystone, name1, name2):
+ """ Retrieve all tenants with a certain names """
+ tenants = [x for x in keystone.tenants.list() if x.name == name1 or x.name == name2]
+ return tenants
+
+ def _get_users(self, keystone, name1, name2):
+ """ Retrieve all users with a certain names """
+ users = [x for x in keystone.users.list() if x.name == name1 or x.name == name2]
+ return users
+
def make_tempest_objects(self, ):
keystone = self._get_identity_client()
- tenant1 = retry(10, keystone.tenants.create, tenant_name='tenant1')
- tenant2 = retry(10, keystone.tenants.create, tenant_name='tenant2')
- retry(10, keystone.users.create, name='tempest1', password='secret', email='tempest1@example.com', tenant_id=tenant1.id)
- retry(10, keystone.users.create, name='tempest2', password='secret', email='tempest2@example.com', tenant_id=tenant2.id)
+ tenants = self._get_tenants(keystone, 'tenant1', 'tenant2')
+ if len(tenants) > 1:
+ tenant1 = tenants[0].id
+ tenant2 = tenants[1].id
+ else:
+ tenant1 = retry(10, keystone.tenants.create, tenant_name='tenant1')
+ tenant2 = retry(10, keystone.tenants.create, tenant_name='tenant2')
+
+ users = self._get_users(keystone, 'tempest1', 'tempest2')
+ if len(users) == 0:
+ retry(10, keystone.users.create, name='tempest1', password='secret',
+ email='tempest1@example.com', tenant_id=tenant1.id)
+ retry(10, keystone.users.create, name='tempest2', password='secret',
+ email='tempest2@example.com', tenant_id=tenant2.id)
+
image_ref, image_ref_alt = self.tempest_add_images()
- return image_ref, image_ref_alt
+ net_id, router_id = self.tempest_get_netid_routerid()
+ return image_ref, image_ref_alt, net_id, router_id
def _get_identity_client(self):
keystone = retry(10, keystoneclient.v2_0.client.Client,
@@ -176,6 +301,13 @@ class Prepare(object):
return glanceclient.Client('1', endpoint=endpoint,
token=keystone.auth_token)
+ def _get_networking_client(self):
+ quantum = retry(10, q_client.Client,
+ username=self.username(), password=self.password(),
+ tenant_name=self.tenant(),
+ auth_url=self.get_auth_url())
+ return quantum
+
def upload(self, glance, name, path):
image = glance.images.create(
name=name,
@@ -189,11 +321,23 @@ class Prepare(object):
if not os.path.isfile('cirros-0.3.0-x86_64-disk.img'):
subprocess.check_call(['wget', CIRROS_IMAGE])
glance = self._get_image_client()
- return self.upload(glance, 'cirros_0.3.0',
+ images = self._get_images(glance, 'cirros_0.3.0')
+ if len(images) > 1:
+ return images[0].id, images[1].id
+ else:
+ return self.upload(glance, 'cirros_0.3.0',
'cirros-0.3.0-x86_64-disk.img'), \
self.upload(glance, 'cirros_0.3.0',
'cirros-0.3.0-x86_64-disk.img')
+ def tempest_get_netid_routerid(self):
+ networking = self._get_networking_client()
+ params = {'router:external': True}
+ # Assume only 1 ext net and 1 ext router exists
+ network = networking.list_networks(**params)['networks'][0]['id']
+ router = networking.list_routers()['routers'][0]['id']
+ return network, router
+
def tempest_share_glance_images(self, network):
if OS_FAMILY == "centos":
self.remote().check_stderr('chkconfig rpcbind on')
@@ -233,5 +377,4 @@ class Prepare(object):
if __name__ == '__main__':
- Prepare().prepare_tempest_folsom()
-
+ Prepare().prepare_tempest_grizzly_simple()
diff --git a/fuel_test/settings.py b/fuel_test/settings.py
index 594f2261ff..d482dbc520 100644
--- a/fuel_test/settings.py
+++ b/fuel_test/settings.py
@@ -1,7 +1,7 @@
import os
OS_FAMILY = os.environ.get('OS_FAMILY', "centos")
-PUPPET_GEN = os.environ.get('PUPPET_GEN', "3")
+PUPPET_GEN = os.environ.get('PUPPET_GEN', "2")
DEFAULT_IMAGES = {
'centos': '/var/lib/libvirt/images/centos63-cobbler-base.qcow2',
@@ -14,7 +14,7 @@ PUPPET_VERSIONS = {
'centos': {
"2": '2.7.19-1.el6',
"3": '3.0.1-1.el6',
- },
+ },
'ubuntu': {
"2": '2.7.19-1puppetlabs2',
"3": '3.0.1-1puppetlabs1'
@@ -28,7 +28,7 @@ PUPPET_CLIENT_PACKAGES = {
'centos': {
"2": 'puppet-2.7.19-1.el6',
"3": 'puppet-3.0.1-1.el6',
- },
+ },
'ubuntu': {
"2": 'puppet=2.7.19-1puppetlabs2 puppet-common=2.7.19-1puppetlabs2',
"3": 'puppet=3.0.1-1puppetlabs1 puppet-common=3.0.1-1puppetlabs1'
@@ -58,7 +58,7 @@ ADMIN_PASSWORD = 'nova'
ADMIN_TENANT_ESSEX = 'openstack'
ADMIN_TENANT_FOLSOM = 'admin'
-CIRROS_IMAGE = 'http://srv08-srt.srt.mirantis.net/cirros-0.3.0-x86_64-disk.img'
+CIRROS_IMAGE = os.environ.get('CIRROS_IMAGE', 'http://srv08-srt.srt.mirantis.net/cirros-0.3.0-x86_64-disk.img')
CONTROLLERS = int(os.environ.get('CONTROLLERS', 3))
COMPUTES = int(os.environ.get('COMPUTES', 3))
STORAGES = int(os.environ.get('STORAGES', 3))
@@ -92,12 +92,12 @@ DEFAULT_POOLS = {
'public': '10.108.0.0/16:24',
'private': '10.108.0.0/16:24',
'internal': '10.108.0.0/16:24',
- },
+ },
'ubuntu': {
'public': '10.107.0.0/16:24',
'private': '10.107.0.0/16:24',
'internal': '10.107.0.0/16:24',
- },
+ },
}
POOLS = {
@@ -115,7 +115,7 @@ CREATE_SNAPSHOTS = os.environ.get('CREATE_SNAPSHOTS', 'true') == 'true'
CLEAN = os.environ.get('CLEAN', 'true') == 'true'
ISO_IMAGE = os.environ.get('ISO_IMAGE', '~/fuel-centos-6.3-x86_64.iso')
USE_ISO = os.environ.get('USE_ISO', 'true') == 'true'
-PARENT_PROXY = os.environ.get('PARENT_PROXY', "172.18.3.14")
+PARENT_PROXY = os.environ.get('PARENT_PROXY', '')
PROFILES_COBBLER_COMMON = {
'centos': 'centos64_x86_64',
'ubuntu': 'ubuntu_1204_x86_64'
@@ -125,4 +125,6 @@ CURRENT_PROFILE = PROFILES_COBBLER_COMMON.get(OS_FAMILY)
ASTUTE_USE = os.environ.get('ASTUTE_USE', 'true') == 'true'
DOMAIN_NAME = os.environ.get('DOMAIN_NAME', '.localdomain')
-PUPPET_AGENT_COMMAND = 'puppet agent --test 2>&1'
+PUPPET_AGENT_COMMAND = 'puppet agent -tvd --evaltrace 2>&1'
+SETUP_TIMEOUT = int(os.environ.get('SETUP_TIMEOUT',600))
+
diff --git a/iso/Makefile b/iso/Makefile
index 2814fd14ca..89176b4611 100644
--- a/iso/Makefile
+++ b/iso/Makefile
@@ -1,5 +1,5 @@
COMMIT_SHA:=$(shell git rev-parse --verify HEAD)
-FUEL_VERSION:=2.1-folsom
+FUEL_VERSION:=3.0
CENTOS_MAJOR:=6
CENTOS_MINOR:=4
diff --git a/iso/bootstrap_admin_node.sh b/iso/bootstrap_admin_node.sh
index 9c663b7429..78a02ac991 100644
--- a/iso/bootstrap_admin_node.sh
+++ b/iso/bootstrap_admin_node.sh
@@ -40,6 +40,9 @@ puppet apply -e "
class {puppetdb::master::config: puppet_service_name=>'thin'} "
service thin restart
+yum versionlock puppet
+yum versionlock puppet-server
+
# Walking aroung nginx's default server config
rm -f /etc/nginx/conf.d/default.conf
service nginx restart
diff --git a/iso/functions.sh b/iso/functions.sh
index e452742e86..6b5e90f770 100644
--- a/iso/functions.sh
+++ b/iso/functions.sh
@@ -171,17 +171,13 @@ function show_top {
else
echo $parent_proxy
fi
- column -t -s% <(
- echo "Management interface: $mgmt_if%External interface: $ext_if"
- echo "IP address: ${mgmt_ip:-"DHCP"}%IP address: ${ext_ip:-"DHCP"}"
- echo "Netmask: $mgmt_mask%Netmask: $ext_mask"
- echo "Gateway: $mgmt_gw%Gateway:$ext_gw"
- echo "DNS Server 1: $mgmt_dns1%DNS Server 1: $ext_dns1"
- echo "DNS Server 2: $mgmt_dns2%DNS Server 2: $ext_dns2"
- echo
- )
- echo
-}
+ echo -e "Management interface: $mgmt_if%External interface: $ext_if\n\
+IP address: ${mgmt_ip:-"DHCP"}%IP address: ${ext_ip:-"DHCP"}\n\
+Netmask: $mgmt_mask%Netmask: $ext_mask\n\
+Gateway: $mgmt_gw%Gateway:$ext_gw\n\
+DNS Server 1: $mgmt_dns1%DNS Server 1: $ext_dns1\n\
+DNS Server 2: $mgmt_dns2%DNS Server 2: $ext_dns2\n" | column -t -s%
+ }
function show_msg {
echo "Menu:"
diff --git a/iso/isolinux/isolinux.cfg b/iso/isolinux/isolinux.cfg
index b3de560b90..2e0beb58be 100644
--- a/iso/isolinux/isolinux.cfg
+++ b/iso/isolinux/isolinux.cfg
@@ -5,7 +5,7 @@ timeout 300
display boot.msg
menu background splash.jpg
-menu title Welcome to Fuel-2.1 on CentOS 6.4 Installer !
+menu title Welcome to Fuel-3.0 on CentOS 6.4 Installer !
menu color border 0 #ffffffff #00000000
menu color sel 7 #ffffffff #ff000000
menu color title 0 #ffffffff #00000000
@@ -16,6 +16,6 @@ menu color hotkey 7 #ffffffff #ff000000
menu color scrollbar 0 #ffffffff #00000000
label fuel
- menu label Install Fuel-2.1 Master Node
+ menu label Install Fuel-3.0 Master Node
kernel vmlinuz
append initrd=initrd.img ks=cdrom:/ks.cfg edd=off
diff --git a/iso/ks.cfg b/iso/ks.cfg
index 7cb2f14080..fa68713265 100644
--- a/iso/ks.cfg
+++ b/iso/ks.cfg
@@ -4,14 +4,9 @@ text
reboot --eject
lang en_US.UTF-8
selinux --disabled
-# #url --url http://172.18.67.168/centos-repo/centos-6.3
-# #url --url http://mirror.stanford.edu/yum/pub/centos/6.3/os/x86_64/
-# #url --url http://mirror.centos.org/centos/6.3/os/x86_64/
url --url http://download.mirantis.com/centos-6.4
network --bootproto=dhcp
-# #repo --name=Base --mirrorlist=http://mirrorlist.centos.org/?release=6.3&arch=x86_64&repo=os
-# #repo --name=Updates --mirrorlist=http://mirrorlist.centos.org/?release=6.3&arch=x86_64&repo=updates
-repo --name=Mirantis --mirrorlist=http://download.mirantis.com/epel-fuel-folsom-2.1/mirror.external.list
+repo --name=Mirantis --mirrorlist=http://download.mirantis.com/epel-fuel-grizzly/mirantis.mirror
keyboard us
rootpw r00tme
timezone --utc UTC
@@ -85,6 +80,7 @@ python-argparse
mcollective
mcollective-client
rubygem-astute
+yum-plugin-versionlock
man
yum
openssh-clients