Initial import of TripleO docs

Co-Authored-By: Ana Krivokapic <akrivoka@redhat.com>
Co-Authored-By: Ben Nemec <bnemec@redhat.com>
Co-Authored-By: Ben Nemec <cybertron@nemebean.com>
Co-Authored-By: Brad P. Crochet <brad@redhat.com>
Co-Authored-By: Crag Wolfe <cwolfe@redhat.com>
Co-Authored-By: Dan Sneddon <dsneddon@redhat.com>
Co-Authored-By: David Kranz <dkranz@redhat.com>
Co-Authored-By: Derek Higgins <derekh@redhat.com>
Co-Authored-By: Dimitri Savineau <dsavinea@redhat.com>
Co-Authored-By: Dmitry Tantsur <divius.inside@gmail.com>
Co-Authored-By: Dmitry Tantsur <dtantsur@redhat.com>
Co-Authored-By: Dougal Matthews <dougal@redhat.com>
Co-Authored-By: François Charlier <francois.charlier@redhat.com>
Co-Authored-By: Giulio Fidente <gfidente@redhat.com>
Co-Authored-By: Imre Farkas <ifarkas@redhat.com>
Co-Authored-By: James Slagle <jslagle@redhat.com>
Co-Authored-By: Jan Provaznik <jprovazn@redhat.com>
Co-Authored-By: Jaromir Coufal <jcoufal@redhat.com>
Co-Authored-By: Jay Dobies <jason.dobies@redhat.com>
Co-Authored-By: Jeff Peeler <jpeeler@redhat.com>
Co-Authored-By: Jiri Stransky <jistr@redhat.com>
Co-Authored-By: Jiri Tomasek <jtomasek@redhat.com>
Co-Authored-By: John Trowbridge <trown@redhat.com>
Co-Authored-By: Lennart Regebro <regebro@gmail.com>
Co-Authored-By: Lucas Alvares Gomes <lucasagomes@gmail.com>
Co-Authored-By: Marek Aufart <maufart@redhat.com>
Co-Authored-By: Ronelle Landy <rlandy@redhat.com>
Co-Authored-By: Sasha Chuzhoy <sasha@redhat.com>
Co-Authored-By: Sasha Chuzhoy <sashac88@hotmail.com>
Co-Authored-By: Steven Hardy <shardy@redhat.com>
Co-Authored-By: Zane Bitter <zbitter@redhat.com>
Co-Authored-By: marios <marios@redhat.com>
This commit is contained in:
James Slagle 2015-09-01 15:13:26 -04:00
parent 29658f5f37
commit 8ec4cae920
62 changed files with 10836 additions and 59 deletions

13
.gitignore vendored Normal file
View File

@ -0,0 +1,13 @@
*.swp
*~
*.qcow2
.DS_Store
*.egg
*.egg-info
*.pyc
.tox
doc/build
build

4
README.md Normal file
View File

@ -0,0 +1,4 @@
TripleO Documentation
=====================
TripleO documentation.

View File

@ -0,0 +1,58 @@
/*
This function will search for all classes matching all IDs which are under
#admonition_selector element and display/hide their content.
State is saved in cookies so user doesn't lose his settings after page
reload or changing pages.
To make this feature work, you need to:
- add checkbox to _templates/layout.html file with proper ID
- in admonitions use proper class which matches above mentioned ID
*/
// after document is loaded
$(document).ready(function() {
// for each checkbox in #admonition_selector do
$('#admonition_selector :checkbox').each(function() {
// check value of cookies and set state to the related element
if ($.cookie($(this).attr("id")) == "true") {
$(this).prop("checked", true);
} else if (($.cookie($(this).attr("id")) == "false")) {
$(this).prop("checked", false);
}
// show/hide elements after page loaded
toggle_admonition($(this).attr("id"));
});
// when user clicks on the checkbox, react
$('#admonition_selector :checkbox').change(function() {
// show/hide related elements
toggle_admonition($(this).attr("id"));
// save the state in the cookies
$.cookie($(this).attr("id"), $(this).is(':checked'), { path: '/' });
});
});
// function to show/hide elements based on checkbox state
// checkbox has ID and it toggles elements having class named same way as the ID
function toggle_admonition(admonition) {
// for each element having class as the checkbox's ID
$(".admonition." + admonition).each(function() {
// set show/hide
if($("#" + admonition).is(':checked')) {
$(this).show();
} else {
$(this).hide();
}
});
}

View File

@ -0,0 +1,117 @@
/*!
* jQuery Cookie Plugin v1.4.1
* https://github.com/carhartl/jquery-cookie
*
* Copyright 2013 Klaus Hartl
* Released under the MIT license
*/
(function (factory) {
if (typeof define === 'function' && define.amd) {
// AMD
define(['jquery'], factory);
} else if (typeof exports === 'object') {
// CommonJS
factory(require('jquery'));
} else {
// Browser globals
factory(jQuery);
}
}(function ($) {
var pluses = /\+/g;
function encode(s) {
return config.raw ? s : encodeURIComponent(s);
}
function decode(s) {
return config.raw ? s : decodeURIComponent(s);
}
function stringifyCookieValue(value) {
return encode(config.json ? JSON.stringify(value) : String(value));
}
function parseCookieValue(s) {
if (s.indexOf('"') === 0) {
// This is a quoted cookie as according to RFC2068, unescape...
s = s.slice(1, -1).replace(/\\"/g, '"').replace(/\\\\/g, '\\');
}
try {
// Replace server-side written pluses with spaces.
// If we can't decode the cookie, ignore it, it's unusable.
// If we can't parse the cookie, ignore it, it's unusable.
s = decodeURIComponent(s.replace(pluses, ' '));
return config.json ? JSON.parse(s) : s;
} catch(e) {}
}
function read(s, converter) {
var value = config.raw ? s : parseCookieValue(s);
return $.isFunction(converter) ? converter(value) : value;
}
var config = $.cookie = function (key, value, options) {
// Write
if (value !== undefined && !$.isFunction(value)) {
options = $.extend({}, config.defaults, options);
if (typeof options.expires === 'number') {
var days = options.expires, t = options.expires = new Date();
t.setTime(+t + days * 864e+5);
}
return (document.cookie = [
encode(key), '=', stringifyCookieValue(value),
options.expires ? '; expires=' + options.expires.toUTCString() : '', // use expires attribute, max-age is not supported by IE
options.path ? '; path=' + options.path : '',
options.domain ? '; domain=' + options.domain : '',
options.secure ? '; secure' : ''
].join(''));
}
// Read
var result = key ? undefined : {};
// To prevent the for loop in the first place assign an empty array
// in case there are no cookies at all. Also prevents odd result when
// calling $.cookie().
var cookies = document.cookie ? document.cookie.split('; ') : [];
for (var i = 0, l = cookies.length; i < l; i++) {
var parts = cookies[i].split('=');
var name = decode(parts.shift());
var cookie = parts.join('=');
if (key && key === name) {
// If second argument (value) is a function it's a converter...
result = read(cookie, value);
break;
}
// Prevent storing a cookie that we couldn't decode.
if (!key && (cookie = read(cookie)) !== undefined) {
result[name] = cookie;
}
}
return result;
};
config.defaults = {};
$.removeCookie = function (key, options) {
if ($.cookie(key) === undefined) {
return false;
}
// Must not alter options, thus extending a fresh object...
$.cookie(key, '', $.extend({}, options, { expires: -1 }));
return !$.cookie(key);
};
}));

View File

@ -0,0 +1,129 @@
/* CUSTOM CSS OVERRIDES GO HERE */
/* ============================ */
/* remove backgrounds */
#admonition_selector {
background: none !important;
color: black !important;
}
/* admonition selector */
#admonition_selector {
border-top: 0 none !important;
}
#admonition_selector .title {
color: rgba(0, 0, 0, 0.6) !important;
}
.trigger {
color: rgba(0, 0, 0, 0.7) !important;
border-top: 1px solid rgba(0, 0, 0, 0.2);
border-bottom: 1px solid rgba(0, 0, 0, 0.2);
background: rgba(0, 0, 0, 0.05);
}
.trigger:hover {
color: rgba(0, 0, 0, 0.9) !important;
}
/* NOTES, ADMONITTIONS AND TAGS */
.admonition {
font-size: 85%; /* match code size */
background: rgb(240, 240, 240);
color: rgba(0, 0, 0, 0.55);
border: 1px solid rgba(0, 0, 0, 0.1);
padding: 0.5em 1em 0.75em 1em;
margin-bottom: 24px;
}
.admonition p {
font-size: inherit;
}
.admonition p.last {
margin-bottom: 0;
}
.admonition p.first.admonition-title {
display: inline;
background: none;
font-weight: bold;
color: rgba(0, 0, 0, 0.75);
}
/* notes */
.rst-content .note {
background: rgb(240, 240, 240);
}
/* tags */
.rhel {background: #fee;}
.portal {background-color: #ded;}
.satellite {background-color: #dee;}
.centos {background: #fef;}
.baremetal {background: #eef;}
.virtual {background: #efe;}
.ceph {background: #eff;}
/* admonition selector */
#admonition_selector {
color: white;
font-size: 85%;
line-height: 1.4;
background: #2980b9;
border-top: 1px solid rgba(255, 255, 255, 0.4);
}
.trigger {
display: block;
font-size: 110%;
color: rgba(255, 255, 255, 0.75);
line-height: 2.5;
position: relative;
cursor: pointer;
padding: 0 1.618em;
}
.trigger:after {
content: '▾';
display: block;
font-family: FontAwesome;
font-size: 70%;
position: absolute;
right: 1.618em;
top: 6px;
}
.trigger:hover {
color: white;
}
.content {
display: none;
border-top: 1px solid rgba(255, 255, 255, 0.1);
background: rgba(255, 255, 255, 0.1);
padding: 0.5em 1.618em;
}
.displayed .trigger:after {
content: '▴';
}
#admonition_selector .title {
color: rgba(255, 255, 255, 0.45);
}
#admonition_selector ul {
margin-bottom: 0.75em;
}
#admonition_selector ul li {
display: block;
}
#admonition_selector label {
display: inline;
color: inherit;
text-decoration: underline dotted;
}

View File

@ -0,0 +1,32 @@
$(document).ready(function() {
// for each trigger
$('.trigger').each(function() {
// check if cookie has value on true
if ($.cookie($(this).parent().prop('id')) == "true") {
// add displayed class and show the content
$(this).parent().addClass("displayed");
$(this).next('.content').show();
} else {
// remove displayed class and hide the content
$(this).parent().removeClass("displayed");
$(this).next('.content').hide();
}
});
// if user clicked trigger element
$('.trigger').click(function() {
// toggle parent's class and animate the content
$(this).parent().toggleClass('displayed');
$(this).next('.content').slideToggle("fast");
// save the state to cookies
var parent_id =
$.cookie($(this).parent().prop('id'),
$(this).parent().hasClass('displayed'),
{ path: '/' });
});
});

View File

@ -0,0 +1,223 @@
/*
* jQuery One Page Nav Plugin
* http://github.com/davist11/jQuery-One-Page-Nav
*
* Copyright (c) 2010 Trevor Davis (http://trevordavis.net)
* Dual licensed under the MIT and GPL licenses.
* Uses the same license as jQuery, see:
* http://jquery.org/license
*
* @version 3.0.0
*
* Example usage:
* $('#nav').onePageNav({
* currentClass: 'current',
* changeHash: false,
* scrollSpeed: 750
* });
*/
;(function($, window, document, undefined){
// our plugin constructor
var OnePageNav = function(elem, options){
this.elem = elem;
this.$elem = $(elem);
this.options = options;
this.metadata = this.$elem.data('plugin-options');
this.$win = $(window);
this.sections = {};
this.didScroll = false;
this.$doc = $(document);
this.docHeight = this.$doc.height();
};
// the plugin prototype
OnePageNav.prototype = {
defaults: {
navItems: 'a',
currentClass: 'active',
changeHash: false,
easing: 'swing',
filter: '',
scrollSpeed: 750,
scrollThreshold: 0.2,
begin: false,
end: false,
scrollChange: false
},
init: function() {
// Introduce defaults that can be extended either
// globally or using an object literal.
this.config = $.extend({}, this.defaults, this.options, this.metadata);
this.$nav = this.$elem.find(this.config.navItems);
//Filter any links out of the nav
if(this.config.filter !== '') {
this.$nav = this.$nav.filter(this.config.filter);
}
//Handle clicks on the nav
this.$nav.on('click.onePageNav', $.proxy(this.handleClick, this));
//Get the section positions
this.getPositions();
//Handle scroll changes
this.bindInterval();
//Update the positions on resize too
this.$win.on('resize.onePageNav', $.proxy(this.getPositions, this));
return this;
},
adjustNav: function(self, $parent) {
self.$elem.find('.' + self.config.currentClass).removeClass(self.config.currentClass);
$parent.addClass(self.config.currentClass);
},
bindInterval: function() {
var self = this;
var docHeight;
self.$win.on('scroll.onePageNav', function() {
self.didScroll = true;
});
self.t = setInterval(function() {
docHeight = self.$doc.height();
//If it was scrolled
if(self.didScroll) {
self.didScroll = false;
self.scrollChange();
}
//If the document height changes
if(docHeight !== self.docHeight) {
self.docHeight = docHeight;
self.getPositions();
}
}, 250);
},
getHash: function($link) {
return $link.attr('href').split('#')[1];
},
getPositions: function() {
var self = this;
var linkHref;
var topPos;
var $target;
self.$nav.each(function() {
linkHref = self.getHash($(this));
$target = $('#' + linkHref);
if($target.length) {
topPos = $target.offset().top;
self.sections[linkHref] = Math.round(topPos);
}
});
},
getSection: function(windowPos) {
var returnValue = null;
var windowHeight = Math.round(this.$win.height() * this.config.scrollThreshold);
for(var section in this.sections) {
if((this.sections[section] - windowHeight) < windowPos) {
returnValue = section;
}
}
return returnValue;
},
handleClick: function(e) {
var self = this;
var $link = $(e.currentTarget);
var $parent = $link.parent();
var newLoc = '#' + self.getHash($link);
if(!$parent.hasClass(self.config.currentClass)) {
//Start callback
if(self.config.begin) {
self.config.begin();
}
//Change the highlighted nav item
self.adjustNav(self, $parent);
//Removing the auto-adjust on scroll
self.unbindInterval();
//Scroll to the correct position
self.scrollTo(newLoc, function() {
//Do we need to change the hash?
if(self.config.changeHash) {
window.location.hash = newLoc;
}
//Add the auto-adjust on scroll back in
self.bindInterval();
//End callback
if(self.config.end) {
self.config.end();
}
});
}
e.preventDefault();
},
scrollChange: function() {
var windowTop = this.$win.scrollTop();
var position = this.getSection(windowTop);
var $parent;
//If the position is set
if(position !== null) {
$parent = this.$elem.find('a[href$="#' + position + '"]').parent();
//If it's not already the current section
if(!$parent.hasClass(this.config.currentClass)) {
//Change the highlighted nav item
this.adjustNav(this, $parent);
//If there is a scrollChange callback
if(this.config.scrollChange) {
this.config.scrollChange($parent);
}
}
}
},
scrollTo: function(target, callback) {
var offset = $(target).offset().top;
$('html, body').animate({
scrollTop: offset
}, this.config.scrollSpeed, this.config.easing, callback);
},
unbindInterval: function() {
clearInterval(this.t);
this.$win.unbind('scroll.onePageNav');
}
};
OnePageNav.defaults = OnePageNav.prototype.defaults;
$.fn.onePageNav = function(options) {
return this.each(function() {
new OnePageNav(this, options).init();
});
};
})( jQuery, window , document );

View File

@ -0,0 +1,208 @@
/*!
* jQuery.scrollTo
* Copyright (c) 2007-2015 Ariel Flesler - aflesler<a>gmail<d>com | http://flesler.blogspot.com
* Licensed under MIT
* http://flesler.blogspot.com/2007/10/jqueryscrollto.html
* @projectDescription Easy element scrolling using jQuery.
* @author Ariel Flesler
* @version 2.1.0
*/
;(function(define) {
'use strict';
define(['jquery'], function($) {
var $scrollTo = $.scrollTo = function(target, duration, settings) {
return $(window).scrollTo(target, duration, settings);
};
$scrollTo.defaults = {
axis:'xy',
duration: 0,
limit:true
};
function isWin(elem) {
return !elem.nodeName ||
$.inArray(elem.nodeName.toLowerCase(), ['iframe','#document','html','body']) !== -1;
}
$.fn.scrollTo = function(target, duration, settings) {
if (typeof duration === 'object') {
settings = duration;
duration = 0;
}
if (typeof settings === 'function') {
settings = { onAfter:settings };
}
if (target === 'max') {
target = 9e9;
}
settings = $.extend({}, $scrollTo.defaults, settings);
// Speed is still recognized for backwards compatibility
duration = duration || settings.duration;
// Make sure the settings are given right
var queue = settings.queue && settings.axis.length > 1;
if (queue) {
// Let's keep the overall duration
duration /= 2;
}
settings.offset = both(settings.offset);
settings.over = both(settings.over);
return this.each(function() {
// Null target yields nothing, just like jQuery does
if (target === null) return;
var win = isWin(this),
elem = win ? this.contentWindow || window : this,
$elem = $(elem),
targ = target,
attr = {},
toff;
switch (typeof targ) {
// A number will pass the regex
case 'number':
case 'string':
if (/^([+-]=?)?\d+(\.\d+)?(px|%)?$/.test(targ)) {
targ = both(targ);
// We are done
break;
}
// Relative/Absolute selector
targ = win ? $(targ) : $(targ, elem);
if (!targ.length) return;
/* falls through */
case 'object':
// DOMElement / jQuery
if (targ.is || targ.style) {
// Get the real position of the target
toff = (targ = $(targ)).offset();
}
}
var offset = $.isFunction(settings.offset) && settings.offset(elem, targ) || settings.offset;
$.each(settings.axis.split(''), function(i, axis) {
var Pos = axis === 'x' ? 'Left' : 'Top',
pos = Pos.toLowerCase(),
key = 'scroll' + Pos,
prev = $elem[key](),
max = $scrollTo.max(elem, axis);
if (toff) {// jQuery / DOMElement
attr[key] = toff[pos] + (win ? 0 : prev - $elem.offset()[pos]);
// If it's a dom element, reduce the margin
if (settings.margin) {
attr[key] -= parseInt(targ.css('margin'+Pos), 10) || 0;
attr[key] -= parseInt(targ.css('border'+Pos+'Width'), 10) || 0;
}
attr[key] += offset[pos] || 0;
if (settings.over[pos]) {
// Scroll to a fraction of its width/height
attr[key] += targ[axis === 'x'?'width':'height']() * settings.over[pos];
}
} else {
var val = targ[pos];
// Handle percentage values
attr[key] = val.slice && val.slice(-1) === '%' ?
parseFloat(val) / 100 * max
: val;
}
// Number or 'number'
if (settings.limit && /^\d+$/.test(attr[key])) {
// Check the limits
attr[key] = attr[key] <= 0 ? 0 : Math.min(attr[key], max);
}
// Don't waste time animating, if there's no need.
if (!i && settings.axis.length > 1) {
if (prev === attr[key]) {
// No animation needed
attr = {};
} else if (queue) {
// Intermediate animation
animate(settings.onAfterFirst);
// Don't animate this axis again in the next iteration.
attr = {};
}
}
});
animate(settings.onAfter);
function animate(callback) {
var opts = $.extend({}, settings, {
// The queue setting conflicts with animate()
// Force it to always be true
queue: true,
duration: duration,
complete: callback && function() {
callback.call(elem, targ, settings);
}
});
$elem.animate(attr, opts);
}
});
};
// Max scrolling position, works on quirks mode
// It only fails (not too badly) on IE, quirks mode.
$scrollTo.max = function(elem, axis) {
var Dim = axis === 'x' ? 'Width' : 'Height',
scroll = 'scroll'+Dim;
if (!isWin(elem))
return elem[scroll] - $(elem)[Dim.toLowerCase()]();
var size = 'client' + Dim,
doc = elem.ownerDocument || elem.document,
html = doc.documentElement,
body = doc.body;
return Math.max(html[scroll], body[scroll]) - Math.min(html[size], body[size]);
};
function both(val) {
return $.isFunction(val) || $.isPlainObject(val) ? val : { top:val, left:val };
}
// Add special hooks so that window scroll properties can be animated
$.Tween.propHooks.scrollLeft =
$.Tween.propHooks.scrollTop = {
get: function(t) {
return $(t.elem)[t.prop]();
},
set: function(t) {
var curr = this.get(t);
// If interrupt is true and user scrolled, stop animating
if (t.options.interrupt && t._last && t._last !== curr) {
return $(t.elem).stop();
}
var next = Math.round(t.now);
// Don't waste CPU
// Browsers don't render floating point scroll
if (curr !== next) {
$(t.elem)[t.prop](next);
t._last = this.get(t);
}
}
};
// AMD requirement
return $scrollTo;
});
}(typeof define === 'function' && define.amd ? define : function(deps, factory) {
'use strict';
if (typeof module !== 'undefined' && module.exports) {
// Node
module.exports = factory(require('jquery'));
} else {
factory(jQuery);
}
}));

View File

@ -0,0 +1,3 @@
$(document).ready(function() {
$('.wy-menu').onePageNav();
});

Binary file not shown.

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

View File

@ -0,0 +1,192 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 16.0.4, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="1075.511px" height="793.5px" viewBox="1020 0 1075.511 793.5" enable-background="new 1020 0 1075.511 793.5"
xml:space="preserve">
<g>
<rect x="1046.5" y="42.271" fill="#FFFFFF" width="230.863" height="82.729"/>
<g>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1277.363,122 1277.363,125 1274.363,125 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="6.0774,6.0774" x1="1268.286" y1="125" x2="1052.539" y2="125"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1049.5,125 1046.5,125 1046.5,122 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="5.9022,5.9022" x1="1046.5" y1="116.098" x2="1046.5" y2="48.222"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1046.5,45.271 1046.5,42.271 1049.5,42.271 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="6.0774,6.0774" x1="1055.577" y1="42.271" x2="1271.324" y2="42.271"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1274.363,42.271 1277.363,42.271 1277.363,45.271 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="5.9022,5.9022" x1="1277.363" y1="51.173" x2="1277.363" y2="119.049"/>
</g>
</g>
<rect x="1048.5" y="73.745" fill="none" width="228.863" height="21.781"/>
<text transform="matrix(1 0 0 1 1138.9512 87.4243)" font-family="'OpenSans'" font-size="18">Client</text>
<g>
<rect x="1310.675" y="42.271" fill="#FFFFFF" width="230.863" height="82.729"/>
<g>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1541.538,122 1541.538,125 1538.538,125 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="6.0774,6.0774" x1="1532.461" y1="125" x2="1316.714" y2="125"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1313.675,125 1310.675,125 1310.675,122 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="5.9022,5.9022" x1="1310.675" y1="116.097" x2="1310.675" y2="48.222"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1310.675,45.271 1310.675,42.271 1313.675,42.271 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="6.0774,6.0774" x1="1319.752" y1="42.271" x2="1535.499" y2="42.271"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1538.538,42.271 1541.538,42.271 1541.538,45.271 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="5.9022,5.9022" x1="1541.538" y1="51.173" x2="1541.538" y2="119.048"/>
</g>
</g>
<rect x="1310.675" y="73.745" fill="none" width="230.863" height="21.781"/>
<text transform="matrix(1 0 0 1 1402.4023 87.4243)" font-family="'OpenSans'" font-size="18">Ironic</text>
<g>
<rect x="1577.004" y="43.271" fill="#FFFFFF" width="230.863" height="82.729"/>
<g>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1807.867,123 1807.867,126 1804.867,126 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="6.0774,6.0774" x1="1798.79" y1="126" x2="1583.043" y2="126"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1580.004,126 1577.004,126 1577.004,123 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="5.9022,5.9022" x1="1577.004" y1="117.097" x2="1577.004" y2="49.222"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1577.004,46.271 1577.004,43.271 1580.004,43.271 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="6.0774,6.0774" x1="1586.081" y1="43.271" x2="1801.828" y2="43.271"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1804.867,43.271 1807.867,43.271 1807.867,46.271 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="5.9022,5.9022" x1="1807.867" y1="52.173" x2="1807.867" y2="120.048"/>
</g>
</g>
<rect x="1578.582" y="74.745" fill="none" width="230.863" height="21.781"/>
<text transform="matrix(1 0 0 1 1615.3076 88.4243)" font-family="'OpenSans'" font-size="18">Discovery Ramdisk</text>
<g>
<rect x="1837.637" y="42.271" fill="#FFFFFF" width="230.863" height="82.729"/>
<g>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="2068.5,122 2068.5,125 2065.5,125 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="6.0774,6.0774" x1="2059.423" y1="125" x2="1843.676" y2="125"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1840.637,125 1837.637,125 1837.637,122 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="5.9022,5.9022" x1="1837.637" y1="116.097" x2="1837.637" y2="48.222"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="1837.637,45.271 1837.637,42.271 1840.637,42.271 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="6.0774,6.0774" x1="1846.714" y1="42.271" x2="2062.461" y2="42.271"/>
<polyline fill="none" stroke="#000000" stroke-miterlimit="10" points="2065.5,42.271 2068.5,42.271 2068.5,45.271 "/>
<line fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="5.9022,5.9022" x1="2068.5" y1="51.173" x2="2068.5" y2="119.048"/>
</g>
</g>
<rect x="1835.637" y="73.745" fill="none" width="232.863" height="21.781"/>
<text transform="matrix(1 0 0 1 1910.4697 87.4243)" font-family="'OpenSans'" font-size="18">Discoverd</text>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" x1="1161.932" y1="126" x2="1161.932" y2="132"/>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="12.1939,12.1939" x1="1161.932" y1="144.193" x2="1161.932" y2="723.403"/>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" x1="1161.932" y1="729.5" x2="1161.932" y2="735.5"/>
</g>
</g>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" x1="1426.106" y1="126" x2="1426.106" y2="132"/>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="12.1939,12.1939" x1="1426.106" y1="144.193" x2="1426.106" y2="723.403"/>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" x1="1426.106" y1="729.5" x2="1426.106" y2="735.5"/>
</g>
</g>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" x1="1692.436" y1="126" x2="1692.436" y2="132"/>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="12.1939,12.1939" x1="1692.436" y1="144.193" x2="1692.436" y2="723.403"/>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" x1="1692.436" y1="729.5" x2="1692.436" y2="735.5"/>
</g>
</g>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" x1="1953.068" y1="126" x2="1953.068" y2="132"/>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" stroke-dasharray="12.1939,12.1939" x1="1953.068" y1="144.193" x2="1953.068" y2="723.403"/>
<line fill="none" stroke="#000000" stroke-width="2" stroke-miterlimit="10" x1="1953.068" y1="729.5" x2="1953.068" y2="735.5"/>
</g>
</g>
<rect x="1156.932" y="169.729" fill="#820E0A" stroke="#000000" stroke-width="2" stroke-linejoin="round" stroke-miterlimit="10" width="12" height="537.771"/>
<rect x="1420.106" y="169.729" fill="#820E0A" stroke="#000000" stroke-width="2" stroke-linejoin="round" stroke-miterlimit="10" width="12" height="158.771"/>
<rect x="1420.106" y="386" fill="#820E0A" stroke="#000000" stroke-width="2" stroke-linejoin="round" stroke-miterlimit="10" width="12" height="319.635"/>
<rect x="1946.068" y="612.5" fill="#820E0A" stroke="#000000" stroke-width="2" stroke-linejoin="round" stroke-miterlimit="10" width="12" height="92.089"/>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-linejoin="round" x1="1182.5" y1="169.729" x2="1394.43" y2="169.729"/>
<g>
<path d="M1406.5,169.729c-5.68,2.107-12.727,5.703-17.095,9.512l3.44-9.512l-3.44-9.51
C1393.773,164.028,1400.82,167.624,1406.5,169.729z"/>
</g>
</g>
</g>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-linejoin="round" x1="1182.5" y1="383.729" x2="1394.43" y2="383.729"/>
<g>
<path d="M1406.5,383.729c-5.68,2.107-12.727,5.703-17.095,9.512l3.44-9.512l-3.44-9.509
C1393.773,378.027,1400.82,381.623,1406.5,383.729z"/>
</g>
</g>
</g>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-linejoin="round" x1="1706.5" y1="612.5" x2="1918.43" y2="612.5"/>
<g>
<path d="M1930.5,612.5c-5.68,2.107-12.727,5.703-17.095,9.512l3.44-9.512l-3.44-9.51
C1917.773,606.799,1924.82,610.395,1930.5,612.5z"/>
</g>
</g>
</g>
<rect x="1686.436" y="386" fill="#820E0A" stroke="#000000" stroke-width="2" stroke-linejoin="round" stroke-miterlimit="10" width="12" height="226.5"/>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-linejoin="round" x1="1450.436" y1="386" x2="1662.365" y2="386"/>
<g>
<path d="M1674.436,386c-5.68,2.107-12.727,5.703-17.095,9.512l3.44-9.512l-3.44-9.51
C1661.709,380.299,1668.756,383.895,1674.436,386z"/>
</g>
</g>
</g>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-linejoin="round" x1="1460.506" y1="704.589" x2="1930.5" y2="704.589"/>
<g>
<path d="M1448.436,704.589c5.68,2.107,12.727,5.703,17.095,9.512l-3.44-9.512l3.44-9.51
C1461.162,698.888,1454.115,702.483,1448.436,704.589z"/>
</g>
</g>
</g>
<rect x="1180.068" y="134.706" fill="none" width="228.863" height="33.317"/>
<text transform="matrix(1 0 0 1 1180.0684 145.3457)"><tspan x="0" y="0" font-family="'OpenSans'" font-size="14">Register nodes</tspan><tspan x="0" y="16.8" font-family="'OpenSans'" font-size="14">power management details</tspan></text>
<rect x="1182.5" y="348.047" fill="none" width="228.863" height="33.316"/>
<text transform="matrix(1 0 0 1 1182.5 375.4868)" font-family="'OpenSans'" font-size="14">Send nodes for introspection</text>
<rect x="1448.004" y="350.047" fill="none" width="228.863" height="33.316"/>
<text transform="matrix(1 0 0 1 1448.0039 360.687)"><tspan x="0" y="0" font-family="'OpenSans'" font-size="14">Reboot nodes -&gt; PXE boot generic </tspan><tspan x="0" y="16.799" font-family="'OpenSans'" font-size="14">discovery ramdisk image</tspan></text>
<rect x="1584.004" y="669.545" fill="none" width="346.496" height="33.316"/>
<text transform="matrix(1 0 0 1 1599.0186 696.9844)" font-family="'OpenSans'" font-size="14">Facts checking and registration of hardware details</text>
<rect x="1705.068" y="579.184" fill="none" width="228.863" height="33.316"/>
<text transform="matrix(1 0 0 1 1705.0684 606.623)" font-family="'OpenSans'" font-size="14">Post hardware metrics</text>
<g>
<g>
<line fill="none" stroke="#000000" stroke-width="2" stroke-linejoin="round" x1="1192.139" y1="321.729" x2="1404.068" y2="321.729"/>
<g>
<path d="M1180.068,321.729c5.68,2.107,12.727,5.703,17.095,9.512l-3.44-9.512l3.44-9.51
C1192.795,316.027,1185.748,319.623,1180.068,321.729z"/>
</g>
</g>
</g>
<rect x="1179.068" y="286.047" fill="none" width="228.863" height="33.316"/>
<text transform="matrix(1 0 0 1 1295.7676 313.4868)" font-family="'OpenSans'" font-size="14">Nodes registered</text>
<rect x="1258.858" y="748.158" fill="none" width="346.496" height="33.316"/>
<text transform="matrix(1 0 0 1 1341.6709 758.7988)"><tspan x="0" y="0" font-family="'OpenSans-Bold'" font-size="14">Nodes are fully registered</tspan><tspan x="-38.877" y="16.799" font-family="'OpenSans-Bold'" font-size="14">wth full stack of hardware attributes</tspan></text>
</svg>

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 259 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -0,0 +1,32 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 16.0.4, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="644.5px" height="294px" viewBox="0 0 644.5 294" enable-background="new 0 0 644.5 294" xml:space="preserve">
<path fill="#820A0E" d="M10.775,93.754L10.775,93.754c0-10.828,8.667-19.605,19.357-19.605h110.615
c5.134,0,10.058,2.066,13.688,5.743s5.669,8.664,5.669,13.864v78.422c0,10.828-8.667,19.605-19.357,19.605H30.133l0,0
c-10.691,0-19.357-8.777-19.357-19.605L10.775,93.754L10.775,93.754z"/>
<path fill="none" stroke="#000000" stroke-width="2" stroke-linejoin="round" stroke-miterlimit="10" d="M10.775,93.754
L10.775,93.754c0-10.828,8.667-19.605,19.357-19.605h110.615c5.134,0,10.058,2.066,13.688,5.743s5.669,8.664,5.669,13.864v78.422
c0,10.828-8.667,19.605-19.357,19.605H30.133l0,0c-10.691,0-19.357-8.777-19.357-19.605L10.775,93.754L10.775,93.754z"/>
<path fill-opacity="0" d="M299.077,53.848L299.077,53.848c0-21.85,17.485-39.562,39.058-39.562h255.822
c10.358,0,20.293,4.167,27.618,11.587c7.325,7.417,11.438,17.481,11.438,27.973v158.235c0,21.85-17.485,39.562-39.058,39.562
H338.135c-21.57,0-39.058-17.712-39.058-39.562V53.848L299.077,53.848z"/>
<path fill="#DDDDDD" stroke="#000000" stroke-width="2" stroke-linejoin="round" stroke-miterlimit="10" d="M299.077,53.848
L299.077,53.848c0-21.85,17.485-39.562,39.058-39.562h255.822c10.358,0,20.293,4.167,27.618,11.587
c7.325,7.417,11.438,17.481,11.438,27.973v158.235c0,21.85-17.485,39.562-39.058,39.562H338.135
c-21.57,0-39.058-17.712-39.058-39.562V53.848L299.077,53.848z"/>
<path fill-opacity="0" d="M160.105,132.965h138.972"/>
<path fill="none" stroke="#000000" stroke-width="2" stroke-linejoin="round" stroke-miterlimit="10" d="M160.105,132.965h127.253"
/>
<path stroke="#000000" stroke-width="2" stroke-miterlimit="10" d="M287.358,136.233l8.864-3.269l-8.864-3.268V136.233z"/>
<path fill-opacity="0" d="M180.854,79.397h97.472v92.433h-97.472V79.397z"/>
<rect x="17.44" y="125.359" fill="none" width="139" height="16.712"/>
<text transform="matrix(0.9873 0 0 1 26.0718 139.2148)" fill="#FFFFFF" font-family="'OpenSans-Semibold'" font-size="18.2314">Undercloud</text>
<rect x="329.94" y="125.359" fill="none" width="261" height="16.712"/>
<text transform="matrix(0.9873 0 0 1 335.8867 139.2148)" font-family="'OpenSans-Semibold'" font-size="18.2314">Production OpenStack Cloud (Overcloud)</text>
<rect x="160.105" y="80.287" fill="none" width="138.972" height="106.856"/>
<text transform="matrix(0.9873 0 0 1 202.6709 91.0645)" font-family="'OpenSans-Semibold'" font-size="14.18">Deploys</text>
<text transform="matrix(0.9873 0 0 1 201.4199 108.0801)" font-family="'OpenSans-Semibold'" font-size="14.18">Updates</text>
<text transform="matrix(0.9873 0 0 1 199.0068 125.0957)" font-family="'OpenSans-Semibold'" font-size="14.18">Monitors</text>
</svg>

After

Width:  |  Height:  |  Size: 3.0 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 138 KiB

File diff suppressed because it is too large Load Diff

After

Width:  |  Height:  |  Size: 243 KiB

View File

@ -0,0 +1,43 @@
{% extends "!layout.html" %}
{% set script_files = script_files + ["_static/cookies.js"] %}
{% set script_files = script_files + ["_static/expandable.js"] %}
{% set script_files = script_files + ["_static/admonition_selector.js"] %}
{% set script_files = script_files + ["_static/jquery.scrollTo.js"] %}
{% set script_files = script_files + ["_static/jquery.nav.js"] %}
{% set script_files = script_files + ["_static/menu.js"] %}
{% set css_files = css_files + ['_static/custom.css'] %}
{% block sidebarsearch %}
<div id="admonition_selector">
<span class="trigger">Limit Environment Specific Content</span>
<div class="content">
<span class="title">Operating Systems</span>
<ul>
<li><input type="checkbox" id="centos" checked="checked"><label for="centos" title="Step that should only be run when using CentOS.">CentOS</label></li>
<li><input type="checkbox" id="rhel" checked="checked"><label for="rhel" title="Step that should only be run when using RHEL.">RHEL</label></li>
</ul>
<span class="title">RHEL Registration Types</span>
<ul>
<li><input type="checkbox" id="portal" checked="checked"><label for="portal" title="Step that should only be run when registering to the Red Hat Portal.">Portal</label></li>
<li><input type="checkbox" id="satellite" checked="checked"><label for="satellite" title="Step that should only be run when registering to Red Hat Satellite.">Satellite</label></li>
</ul>
<span class="title">Environments</span>
<ul>
<li><input type="checkbox" id="baremetal" checked="checked"><label for="baremetal" title="Step that should only be run when deploying to baremetal.">Baremetal</label></li>
<li><input type="checkbox" id="virtual" checked="checked"><label for="virtual" title="Step that should only be run when deploying to virtual machines.">Virtual</label></li>
</ul>
<span class="title">Additional Overcloud Roles</span>
<ul>
<li><input type="checkbox" id="ceph" checked="checked"><label for="ceph" title="Step that should only be run when deploying Ceph for use by the Overcloud.">Ceph</label></li>
</ul>
</div>
</div>
{{ super() }}
{% endblock %}

View File

@ -0,0 +1,24 @@
Advanced Deployment
===================
In this chapter you will find advanced deployment of various |project| areas.
.. toctree::
Advanced Profile Matching <profile_matching>
Ready-State (BIOS, RAID) <ready_state>
Automated Health Check <automated_health_check>
Additional node configuration <extra_config>
Deploying with Heat Templates <template_deploy>
Network Isolation <network_isolation>
Managing Tuskar Plans and Roles <managing_plans_and_roles>
.. <MOVE THESE UNDER TOCTREE WHEN READY, KEEP LOGICAL WORKFLOW ORDER>
Images <images>
Nodes <nodes>
Flavors <flavors>
Deployment <deployment>
Connection to Overcloud <overcloud>
Updates <updates>

View File

@ -0,0 +1,290 @@
Automated Health Check (AHC)
============================
Start with matching the nodes to profiles as described in
:doc:`profile_matching`.
Enable running benchmarks during discovery
------------------------------------------
By default, the benchmark tests do not run during the discovery process.
You can enable this feature by setting *discovery_runbench = true* in the
**undercloud.conf** file prior to installing the undercloud.
If you want to enable this feature after installing the undercloud, you can set
*discovery_runbench = true* in **undercloud.conf**, and re-run
``openstack undercloud install``
Analyze the collected benchmark data
------------------------------------
After discovery has completed, we can do analysis on the benchmark data.
* Run the ``ahc-report`` script to see a general overview of the hardware
::
$ source stackrc
$ ahc-report --categories
##### HPA Controller #####
3 identical systems :
[u'7F8831F1-0D81-464E-A767-7577DF49AAA5', u'B9FE637A-5B97-4A52-BFDA-9244CEA65E23', u'7884BC95-6EF8-4447-BDE5-D19561718B29']
[]
########################
##### Megaraid Controller #####
3 identical systems :
[u'7F8831F1-0D81-464E-A767-7577DF49AAA5', u'B9FE637A-5B97-4A52-BFDA-9244CEA65E23', u'7884BC95-6EF8-4447-BDE5-D19561718B29']
[]
#############################
##### AHCI Controller #####
3 identical systems :
[u'7F8831F1-0D81-464E-A767-7577DF49AAA5', u'B9FE637A-5B97-4A52-BFDA-9244CEA65E23', u'7884BC95-6EF8-4447-BDE5-D19561718B29']
[]
#########################
##### IPMI SDR #####
3 identical systems :
[u'7F8831F1-0D81-464E-A767-7577DF49AAA5', u'B9FE637A-5B97-4A52-BFDA-9244CEA65E23', u'7884BC95-6EF8-4447-BDE5-D19561718B29']
[]
##################
##### Firmware #####
3 identical systems :
[u'7F8831F1-0D81-464E-A767-7577DF49AAA5', u'B9FE637A-5B97-4A52-BFDA-9244CEA65E23', u'7884BC95-6EF8-4447-BDE5-D19561718B29']
[(u'firmware', u'bios', u'date', u'01/01/2011'),
(u'firmware', u'bios', u'vendor', u'Seabios'),
(u'firmware', u'bios', u'version', u'0.5.1')]
##################
##### Memory Timing(RAM) #####
3 identical systems :
[u'7F8831F1-0D81-464E-A767-7577DF49AAA5', u'B9FE637A-5B97-4A52-BFDA-9244CEA65E23', u'7884BC95-6EF8-4447-BDE5-D19561718B29']
[]
############################
##### Network Interfaces #####
3 identical systems :
[u'7F8831F1-0D81-464E-A767-7577DF49AAA5', u'B9FE637A-5B97-4A52-BFDA-9244CEA65E23', u'7884BC95-6EF8-4447-BDE5-D19561718B29']
[(u'network', u'eth0', u'businfo', u'pci@0000:00:04.0'),
(u'network', u'eth0', u'busy-poll', u'off [fixed]'),
(u'network', u'eth0', u'driver', u'virtio_net'),
(u'network', u'eth0', u'fcoe-mtu', u'off [fixed]'),
(u'network', u'eth0', u'generic-receive-offload', u'on'),
(u'network', u'eth0', u'generic-segmentation-offload', u'on'),
(u'network', u'eth0', u'highdma', u'on [fixed]'),
(u'network', u'eth0', u'large-receive-offload', u'off [fixed]'),
(u'network', u'eth0', u'latency', u'0'),
(u'network', u'eth0', u'link', u'yes'),
(u'network', u'eth0', u'loopback', u'off [fixed]'),
(u'network', u'eth0', u'netns-local', u'off [fixed]'),
(u'network', u'eth0', u'ntuple-filters', u'off [fixed]'),
(u'network', u'eth0', u'receive-hashing', u'off [fixed]'),
(u'network', u'eth0', u'rx-all', u'off [fixed]'),
(u'network', u'eth0', u'rx-checksumming', u'on [fixed]'),
(u'network', u'eth0', u'rx-fcs', u'off [fixed]'),
(u'network', u'eth0', u'rx-vlan-filter', u'on [fixed]'),
(u'network', u'eth0', u'rx-vlan-offload', u'off [fixed]'),
(u'network', u'eth0', u'rx-vlan-stag-filter', u'off [fixed]'),
(u'network', u'eth0', u'rx-vlan-stag-hw-parse', u'off [fixed]'),
(u'network', u'eth0', u'scatter-gather', u'on'),
(u'network', u'eth0', u'scatter-gather/tx-scatter-gather', u'on'),
(u'network', u'eth0', u'scatter-gather/tx-scatter-gather-fraglist', u'on'),
(u'network', u'eth0', u'tcp-segmentation-offload', u'on'),
(u'network',
u'eth0',
u'tcp-segmentation-offload/tx-tcp-ecn-segmentation',
u'on'),
(u'network', u'eth0', u'tcp-segmentation-offload/tx-tcp-segmentation', u'on'),
(u'network',
u'eth0',
u'tcp-segmentation-offload/tx-tcp6-segmentation',
u'on'),
(u'network', u'eth0', u'tx-checksumming', u'on'),
(u'network',
u'eth0',
u'tx-checksumming/tx-checksum-fcoe-crc',
u'off [fixed]'),
(u'network', u'eth0', u'tx-checksumming/tx-checksum-ip-generic', u'on'),
(u'network', u'eth0', u'tx-checksumming/tx-checksum-ipv6', u'off [fixed]'),
(u'network', u'eth0', u'tx-checksumming/tx-checksum-sctp', u'off [fixed]'),
(u'network', u'eth0', u'tx-fcoe-segmentation', u'off [fixed]'),
(u'network', u'eth0', u'tx-gre-segmentation', u'off [fixed]'),
(u'network', u'eth0', u'tx-gso-robust', u'off [fixed]'),
(u'network', u'eth0', u'tx-ipip-segmentation', u'off [fixed]'),
(u'network', u'eth0', u'tx-lockless', u'off [fixed]'),
(u'network', u'eth0', u'tx-mpls-segmentation', u'off [fixed]'),
(u'network', u'eth0', u'tx-nocache-copy', u'on'),
(u'network', u'eth0', u'tx-sit-segmentation', u'off [fixed]'),
(u'network', u'eth0', u'tx-udp_tnl-segmentation', u'off [fixed]'),
(u'network', u'eth0', u'tx-vlan-offload', u'off [fixed]'),
(u'network', u'eth0', u'tx-vlan-stag-hw-insert', u'off [fixed]'),
(u'network', u'eth0', u'udp-fragmentation-offload', u'on'),
(u'network', u'eth0', u'vlan-challenged', u'off [fixed]')]
############################
##### Processors #####
1 identical systems :
[u'B9FE637A-5B97-4A52-BFDA-9244CEA65E23']
[(u'cpu', u'logical', u'number', u'2'),
(u'cpu', u'physical', u'number', u'2'),
(u'cpu',
u'physical_0',
u'flags',
u'fpu fpu_exception wp de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pse36 clflush mmx fxsr sse sse2 syscall nx x86-64 rep_good nopl pni cx16 hypervisor lahf_lm'),
(u'cpu', u'physical_0', u'frequency', u'2000000000'),
(u'cpu', u'physical_0', u'physid', u'401'),
(u'cpu', u'physical_0', u'product', u'QEMU Virtual CPU version 1.5.3'),
(u'cpu', u'physical_0', u'vendor', u'Intel Corp.'),
(u'cpu',
u'physical_1',
u'flags',
u'fpu fpu_exception wp de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pse36 clflush mmx fxsr sse sse2 syscall nx x86-64 rep_good nopl pni cx16 hypervisor lahf_lm'),
(u'cpu', u'physical_1', u'frequency', u'2000000000'),
(u'cpu', u'physical_1', u'physid', u'402'),
(u'cpu', u'physical_1', u'product', u'QEMU Virtual CPU version 1.5.3'),
(u'cpu', u'physical_1', u'vendor', u'Intel Corp.')]
2 identical systems :
[u'7F8831F1-0D81-464E-A767-7577DF49AAA5', u'7884BC95-6EF8-4447-BDE5-D19561718B29']
[(u'cpu', u'logical', u'number', u'1'),
(u'cpu', u'physical', u'number', u'1'),
(u'cpu',
u'physical_0',
u'flags',
u'fpu fpu_exception wp de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pse36 clflush mmx fxsr sse sse2 syscall nx x86-64 rep_good nopl pni cx16 hypervisor lahf_lm'),
(u'cpu', u'physical_0', u'frequency', u'2000000000'),
(u'cpu', u'physical_0', u'physid', u'401'),
(u'cpu', u'physical_0', u'product', u'QEMU Virtual CPU version 1.5.3'),
(u'cpu', u'physical_0', u'vendor', u'Intel Corp.')]
In the example above we have two nodes with a single CPU, and one with two CPU's.
* We can also look for performance outliers
::
$ ahc-report --outliers
Group 0 : Checking logical disks perf
standalone_randread_4k_KBps : INFO : sda : Group performance : min=45296.00, mean=53604.67, max=67923.00, stddev=12453.21
standalone_randread_4k_KBps : ERROR : sda : Group's variance is too important : 23.23% of 53604.67 whereas limit is set to 15.00%
standalone_randread_4k_KBps : ERROR : sda : Group performance : UNSTABLE
standalone_read_1M_IOps : INFO : sda : Group performance : min= 1199.00, mean= 1259.00, max= 1357.00, stddev= 85.58
standalone_read_1M_IOps : INFO : sda : Group performance = 1259.00 : CONSISTENT
standalone_randread_4k_IOps : INFO : sda : Group performance : min=11320.00, mean=13397.33, max=16977.00, stddev= 3113.39
standalone_randread_4k_IOps : ERROR : sda : Group's variance is too important : 23.24% of 13397.33 whereas limit is set to 15.00%
standalone_randread_4k_IOps : ERROR : sda : Group performance : UNSTABLE
standalone_read_1M_KBps : INFO : sda : Group performance : min=1231155.00, mean=1292799.67, max=1393152.00, stddev=87661.11
standalone_read_1M_KBps : INFO : sda : Group performance = 1292799.67 : CONSISTENT
Group 0 : Checking CPU perf
bogomips : INFO : logical_0 : Group performance : min= 4199.99, mean= 4199.99, max= 4199.99, stddev= 0.00
bogomips : INFO : logical_0 : Group performance = 4199.99 : CONSISTENT
bogomips : INFO : logical_1 : Group performance : min= 4199.99, mean= 4199.99, max= 4199.99, stddev= nan
bogomips : INFO : logical_1 : Group performance = 4199.99 : CONSISTENT
loops_per_sec : INFO : logical_0 : Group performance : min= 379.00, mean= 398.67, max= 418.00, stddev= 19.50
loops_per_sec : INFO : logical_0 : Group performance = 398.67 : CONSISTENT
loops_per_sec : INFO : logical_1 : Group performance : min= 423.00, mean= 423.00, max= 423.00, stddev= nan
loops_per_sec : INFO : logical_1 : Group performance = 423.00 : CONSISTENT
loops_per_sec : INFO : CPU Effi. : Group performance : min= 99.28, mean= inf, max= inf, stddev= nan
loops_per_sec : INFO : CPU Effi. : Group performance = inf % : CONSISTENT
Group 0 : Checking Memory perf
Memory benchmark 1K : INFO : logical_0 : Group performance : min= 1677.00, mean= 1698.33, max= 1739.00, stddev= 35.23
Memory benchmark 1K : INFO : logical_0 : Group performance = 1698.33 : CONSISTENT
Memory benchmark 1K : INFO : logical_1 : Group performance : min= 1666.00, mean= 1666.00, max= 1666.00, stddev= nan
Memory benchmark 1K : INFO : logical_1 : Group performance = 1666.00 : CONSISTENT
Memory benchmark 1K : INFO : Thread effi. : Group performance : min= 71.54, mean= 71.54, max= 71.54, stddev= nan
Memory benchmark 1K : INFO : Thread effi. : Group performance = 71.54 : CONSISTENT
Memory benchmark 1K : INFO : Forked Effi. : Group performance : min= 101.97, mean= 101.97, max= 101.97, stddev= nan
Memory benchmark 1K : INFO : Forked Effi. : Group performance = 101.97 % : CONSISTENT
Memory benchmark 4K : INFO : logical_0 : Group performance : min= 4262.00, mean= 4318.00, max= 4384.00, stddev= 61.61
Memory benchmark 4K : INFO : logical_0 : Group performance = 4318.00 : CONSISTENT
Memory benchmark 4K : INFO : logical_1 : Group performance : min= 4363.00, mean= 4363.00, max= 4363.00, stddev= nan
Memory benchmark 4K : INFO : logical_1 : Group performance = 4363.00 : CONSISTENT
Memory benchmark 4K : INFO : Thread effi. : Group performance : min= 77.75, mean= 77.75, max= 77.75, stddev= nan
Memory benchmark 4K : INFO : Thread effi. : Group performance = 77.75 : CONSISTENT
Memory benchmark 4K : INFO : Forked Effi. : Group performance : min= 95.98, mean= 95.98, max= 95.98, stddev= nan
Memory benchmark 4K : INFO : Forked Effi. : Group performance = 95.98 % : CONSISTENT
Memory benchmark 1M : INFO : logical_0 : Group performance : min= 7734.00, mean= 7779.00, max= 7833.00, stddev= 50.11
Memory benchmark 1M : INFO : logical_0 : Group performance = 7779.00 : CONSISTENT
Memory benchmark 1M : INFO : logical_1 : Group performance : min= 7811.00, mean= 7811.00, max= 7811.00, stddev= nan
Memory benchmark 1M : INFO : logical_1 : Group performance = 7811.00 : CONSISTENT
Memory benchmark 1M : INFO : Thread effi. : Group performance : min= 101.20, mean= 101.20, max= 101.20, stddev= nan
Memory benchmark 1M : INFO : Thread effi. : Group performance = 101.20 : CONSISTENT
Memory benchmark 1M : INFO : Forked Effi. : Group performance : min= 99.26, mean= 99.26, max= 99.26, stddev= nan
Memory benchmark 1M : INFO : Forked Effi. : Group performance = 99.26 % : CONSISTENT
Memory benchmark 16M : INFO : logical_0 : Group performance : min= 5986.00, mean= 6702.33, max= 7569.00, stddev= 802.14
Memory benchmark 16M : ERROR : logical_0 : Group's variance is too important : 11.97% of 6702.33 whereas limit is set to 7.00%
Memory benchmark 16M : ERROR : logical_0 : Group performance : UNSTABLE
Memory benchmark 16M : INFO : logical_1 : Group performance : min= 7030.00, mean= 7030.00, max= 7030.00, stddev= nan
Memory benchmark 16M : INFO : logical_1 : Group performance = 7030.00 : CONSISTENT
Memory benchmark 16M : INFO : Thread effi. : Group performance : min= 109.94, mean= 109.94, max= 109.94, stddev= nan
Memory benchmark 16M : INFO : Thread effi. : Group performance = 109.94 : CONSISTENT
Memory benchmark 16M : INFO : Forked Effi. : Group performance : min= 93.14, mean= 93.14, max= 93.14, stddev= nan
Memory benchmark 16M : INFO : Forked Effi. : Group performance = 93.14 % : CONSISTENT
Memory benchmark 128M : INFO : logical_0 : Group performance : min= 6021.00, mean= 6387.00, max= 7084.00, stddev= 603.87
Memory benchmark 128M : ERROR : logical_0 : Group's variance is too important : 9.45% of 6387.00 whereas limit is set to 7.00%
Memory benchmark 128M : ERROR : logical_0 : Group performance : UNSTABLE
Memory benchmark 128M : INFO : logical_1 : Group performance : min= 7089.00, mean= 7089.00, max= 7089.00, stddev= nan
Memory benchmark 128M : INFO : logical_1 : Group performance = 7089.00 : CONSISTENT
Memory benchmark 128M : INFO : Thread effi. : Group performance : min= 107.11, mean= 107.11, max= 107.11, stddev= nan
Memory benchmark 128M : INFO : Thread effi. : Group performance = 107.11 : CONSISTENT
Memory benchmark 128M : INFO : Forked Effi. : Group performance : min= 95.55, mean= 95.55, max= 95.55, stddev= nan
Memory benchmark 128M : INFO : Forked Effi. : Group performance = 95.55 % : CONSISTENT
Memory benchmark 256M : WARNING : Thread effi. : Benchmark not run on this group
Memory benchmark 256M : WARNING : Forked Effi. : Benchmark not run on this group
Memory benchmark 1G : INFO : logical_0 : Group performance : min= 6115.00, mean= 6519.67, max= 7155.00, stddev= 557.05
Memory benchmark 1G : ERROR : logical_0 : Group's variance is too important : 8.54% of 6519.67 whereas limit is set to 7.00%
Memory benchmark 1G : ERROR : logical_0 : Group performance : UNSTABLE
Memory benchmark 1G : INFO : logical_1 : Group performance : min= 7136.00, mean= 7136.00, max= 7136.00, stddev= nan
Memory benchmark 1G : INFO : logical_1 : Group performance = 7136.00 : CONSISTENT
Memory benchmark 1G : INFO : Thread effi. : Group performance : min= 104.29, mean= 104.29, max= 104.29, stddev= nan
Memory benchmark 1G : INFO : Thread effi. : Group performance = 104.29 : CONSISTENT
Memory benchmark 1G : INFO : Forked Effi. : Group performance : min= 98.98, mean= 98.98, max= 98.98, stddev= nan
Memory benchmark 1G : INFO : Forked Effi. : Group performance = 98.98 % : CONSISTENT
Memory benchmark 2G : INFO : logical_0 : Group performance : min= 6402.00, mean= 6724.33, max= 7021.00, stddev= 310.30
Memory benchmark 2G : INFO : logical_0 : Group performance = 6724.33 : CONSISTENT
Memory benchmark 2G : INFO : logical_1 : Group performance : min= 7167.00, mean= 7167.00, max= 7167.00, stddev= nan
Memory benchmark 2G : INFO : logical_1 : Group performance = 7167.00 : CONSISTENT
Memory benchmark 2G : WARNING : Thread effi. : Benchmark not run on this group
Memory benchmark 2G : WARNING : Forked Effi. : Benchmark not run on this group
The output above is from a virtual setup, so the benchmarks are not accurate.
However we can see that the variance of the "standalone_randread_4k_KBps"
metric was above the threshold, so the group is marked as unstable.
Exclude outliers from deployment
--------------------------------
We will use the sample reports above to construct some matching rules
for our deployment. Refer to :doc:`profile_matching` for details.
* Add a rule to the **control.specs** file to match the system with two CPUs
::
[
('cpu', 'logical', 'number', 'ge(2)'),
('disk', '$disk', 'size', 'gt(4)'),
('network', '$eth', 'ipv4', 'network(192.0.2.0/24)'),
('memory', 'total', 'size', 'ge(4294967296)'),
]
* Add a rule to the **control.specs** file to exclude systems with below
average disk performance from the control role
::
[
('disk', '$disk', 'standalone_randread_4k_IOps', 'gt(13397)')
('cpu', 'logical', 'number', 'ge(2)'),
('disk', '$disk', 'size', 'gt(4)'),
('network', '$eth', 'ipv4', 'network(192.0.2.0/24)'),
('memory', 'total', 'size', 'ge(4294967296)'),
]
* Now rerun the matching and proceed with remaining steps from
:doc:`profile_matching`.

View File

@ -0,0 +1,231 @@
Additional node configuration
=============================
It is possible to enable additional configuration during one of the
following deployment phases:
* firstboot - run once config (performed by cloud-init)
* post-deploy - run after the services have been deployed and configured
.. note::
This documentation assumes some knowledge of heat HOT_ template
syntax, and makes use of heat environment_ files. See the upstream
heat documentation_ for further information.
.. _HOT: http://docs.openstack.org/developer/heat/template_guide/hot_guide.html
.. _environment: http://docs.openstack.org/developer/heat/template_guide/environment.html
.. _documentation: http://docs.openstack.org/developer/heat/template_guide/index.html
Firstboot extra configuration
-----------------------------
Firstboot configuration is optional, and is performed on *all* nodes on initial
deployment.
Any configuration possible via cloud-init may be performed at this point,
either by applying cloud-config yaml or running arbitrary additional
scripts.
The heat templates used for deployment provide the `OS::TripleO::NodeUserData`
resource as the interface to enable this configuration. A basic example of its
usage is provided below, followed by some notes related to real world
usage.
The script snippet below shows how to create a simple example containing two
scripts, combined via the MultipartMime_ resource::
mkdir firstboot
cat > firstboot/one_two.yaml << EOF
heat_template_version: 2014-10-16
resources:
userdata:
type: OS::Heat::MultipartMime
properties:
parts:
- config: {get_resource: one_config}
- config: {get_resource: two_config}
one_config:
type: OS::Heat::SoftwareConfig
properties:
config: |
#!/bin/bash
echo "one" > /tmp/one
two_config:
type: OS::Heat::SoftwareConfig
properties:
config: |
#!/bin/bash
echo "two" > /tmp/two
outputs:
OS::stack_id:
value: {get_resource: userdata}
EOF
.. _MultipartMime: http://docs.openstack.org/developer/heat/template_guide/openstack.html#OS::Heat::MultipartMime
.. note::
The stack must expose an `OS::stack_id` output which references an
OS::Heat::MultipartMime resource.
This template is then mapped to the `OS::TripleO::NodeUserData` resource type
via a heat environment file::
cat > userdata_env.yaml << EOF
resource_registry:
OS::TripleO::NodeUserData: firstboot/one_two.yaml
EOF
You may then deploy your overcloud referencing the additional environment file::
openstack overcloud deploy --templates -e userdata_env.yaml
.. note::
The userdata is applied to *all* nodes in the deployment. If you need role
specific logic, the userdata scripts can contain conditionals which use
e.g the node hostname to determine the role.
.. note::
OS::TripleO::NodeUserData is only applied on initial node deployment,
not on any subsequent stack update, because cloud-init only processes the
nova user-data once, on first boot.
For a more complete example, which creates an additional user and configures
SSH keys by accessing the nova metadata server, see
/usr/share/openstack-tripleo-heat-templates/firstboot/userdata_example.yaml
on the undercloud node or the tripleo-heat-templates_ repo.
.. _tripleo-heat-templates: https://git.openstack.org/openstack/tripleo-heat-templates/blob/mgt-master/firstboot/userdata_example.yaml
Post-Deploy extra configuration
-------------------------------
Post-deploy additional configuration is possible via the
`OS::TripleO::NodeExtraConfigPost` interface - this allows a heat template
to be specified which performs additional configuration using standard
heat SoftwareConfig_ resources.
.. _SoftwareConfig: http://docs.openstack.org/developer/heat/template_guide/software_deployment.html
.. note::
The `OS::TripleO::NodeExtraConfigPost` applies configuration to *all* nodes,
there is currently no per-role NodeExtraConfigPost interface.
Below is an example of a post-deployment configuration template::
mkdir -p extraconfig/post-deploy/
cat > extraconfig/post-deploy/example.yaml << EOF
heat_template_version: 2014-10-16
parameters:
servers:
type: json
# Optional implementation specific parameters
some_extraparam:
type: string
resources:
ExtraConfig:
type: OS::Heat::SoftwareConfig
properties:
group: script
config:
str_replace:
template: |
#!/bin/sh
echo "extra _APARAM_" > /root/extra
parameters:
_APARAM_: {get_param: some_extraparam}
ExtraDeployments:
type: OS::Heat::SoftwareDeployments
properties:
servers: {get_param: servers}
config: {get_resource: ExtraConfig}
actions: ['CREATE'] # Only do this on CREATE
EOF
The "servers" parameter must be specified in all NodeExtraConfigPost
templates, this is the server list to apply the configuration to,
and is provided by the parent template.
Optionally, you may define additional parameters which are consumed by the
implementation. These may then be provided via parameter_defaults in the
environment which enables the configuration.
.. note::
If the parameter_defaults approach is used, care must be used to avoid
unintended reuse of parameter names between multiple templates, because
parameter_defaults is applied globally.
The "actions" property of the `OS::Heat::SoftwareDeployments` resource may be
used to specify when the configuration should be applied, e.g only on CREATE,
only on DELETE etc. If this is ommitted, the heat default is to apply the
config on CREATE and UPDATE, e.g on initial deployment and every subsequent
update.
The extra config may be enabled via an environment file::
cat > post_config_env.yaml << EOF
resource_registry:
OS::TripleO::NodeExtraConfigPost: extraconfig/post-deploy/example.yaml
parameter_defaults:
some_extraparam: avalue123
EOF
You may then deploy your overcloud referencing the additional environment file::
openstack overcloud deploy --templates -e post_config_env.yaml
Making configuration changes
----------------------------
If you want to make a configuration change, either prior to initial deployment,
or subsequently via an update, you can update hiera data files which
are used for passing configuration values to Puppet. [#]_ Hiera data files
are part of heat templates and are located in `puppet/hieradata` directory.
You can find a file for each of roles (controller, compute,...) in this
directory, so different configuration can be set for different roles. Put
your configuration changes into the file which corresponds to the role
of nodes you want to change. For example to set reserved host memory on
all compute nodes::
echo "nova::compute::reserved_host_memory: some_value" >> puppet/hieradata/compute.yaml
And then update your overcloud::
openstack overcloud deploy --templates "custom templates dir"
.. note::
If you set a configuration of a puppet class which is not being included
yet, make sure you include it in any of `puppet/manifests` file. For example
if you want to change CPU allocation ratio update controller hieradata::
echo "nova::scheduler::filter::cpu_allocation_ratio: '11.0'" >> puppet/hieradata/controller.yaml
And include `nova::scheduler::filter` class in `puppet/manifests/overcloud_controller_pacemaker.pp`.
.. note::
It's best to copy default heat templates to a custom location before making
any changes. See :ref:`custom-template-location`.
.. rubric:: Footnotes
.. [#] Note that this is a temporary workaround, and a `future version`_ is
expected to provide interfaces that enable specifying extra hieradata
without modifying any templates.
.. _future version: https://bugzilla.redhat.com/show_bug.cgi?id=1243971

View File

@ -0,0 +1,189 @@
Managing Plans and Roles
========================
This section provides a description of the Plan and Role concepts and the
operations available to each from the command line.
To learn how to delete and reload all plans and roles skip to the relevant
`Reload the deployment plan and all deployment roles`_ section below.
Understanding Roles & Plans
---------------------------
Roles represent the functionality that will be served by a node, for example a
Compute or Storage node. Plans define the full deployment and consist of one
or more roles and their related Parameters.
Roles
-----
The Roles included in the Tuskar API can be viewed with the following command::
$ openstack management role list
+--------------------------------------+----------------+---------+---------------------------------------------------+
| uuid | name | version | description |
+--------------------------------------+----------------+---------+---------------------------------------------------+
| 6830e747-6d43-44bf-99b6-97a7145c58c6 | Swift-Storage | 1 | OpenStack swift storage node configured by Puppet |
| 96faca30-8c05-48b2-b9c8-d01cd2d0dc47 | Compute | 1 | OpenStack hypervisor node configured via Puppet. |
| b0c87438-871d-46e2-9fee-4b42c65f3c45 | Ceph-Storage | 1 | OpenStack ceph storage node configured by Puppet |
| d5ffd638-8df3-4ed6-8925-32e8891dae25 | Cinder-Storage | 1 | OpenStack cinder storage configured by Puppet |
| e3e09fa8-00c8-4870-8f2b-de67a9faa5ab | Controller | 1 | OpenStack controller node configured by Puppet. |
+--------------------------------------+----------------+---------+---------------------------------------------------+
Plans
-----
By default Tuskar comes with one Plan named overcloud with all of the above
Roles added to the plan. However, only Control and Compute have their scale
value set and will be deployed by default.
The Plan can be viewed with the following command::
$ openstack management plan list
+--------------------------------------+-----------+-------------+------------------------------------------------------------------+
| uuid | name | description | roles |
+--------------------------------------+-----------+-------------+------------------------------------------------------------------+
| eac9c4cc-9d85-4c6a-85bb-e1f38afcff7e | overcloud | None | Compute, Ceph-Storage, Cinder-Storage, Controller, Swift-Storage |
+--------------------------------------+-----------+-------------+------------------------------------------------------------------+
Once you have the Plan UUID you can view more details about the plan::
$ openstack management plan show eac9c4cc-9d85-4c6a-85bb-e1f38afcff7e
+-------------+------------------------------------------------------------------+
| Field | Value |
+-------------+------------------------------------------------------------------+
| created_at | 2015-07-13T10:09:14 |
| description | None |
| name | overcloud |
| parameters | Parameter output suppressed. Use --long to display them. |
| roles | Compute, Ceph-Storage, Cinder-Storage, Controller, Swift-Storage |
| updated_at | None |
| uuid | eac9c4cc-9d85-4c6a-85bb-e1f38afcff7e |
+-------------+------------------------------------------------------------------+
The command can be repeated with ``--long`` appended to the end for a
detailed output of all the available Plan parameters.
Adding and Removing Roles
~~~~~~~~~~~~~~~~~~~~~~~~~
Roles can be removed from the plan like this::
$ openstack management plan remove role "[plan-uuid]" "[role-uuid]"
And similarly they can be added back to the plan::
$ openstack management plan add role "[plan-uuid]" "[role-uuid]"
The output of both of these commands is the summary of the plan and it will
reflect the role being added or removed.
Assigning Flavors to Roles
~~~~~~~~~~~~~~~~~~~~~~~~~~
Roles can have a flavor assigned with the following command::
$ openstack management plan set "[plan-uuid]" -F Compute-1=baremetal
In this example we are assigning the ``baremetal`` flavor to the Compute role,
but we need to include the role version when doing this. Making the syntax
``-F [role-name]-[role-version]=[flavor-name]``.
Setting Scale values
~~~~~~~~~~~~~~~~~~~~
Similar to assigning Flavors, scaling an individual role can be done like
this::
$ openstack management plan set "[plan-uuid]" -S Compute-1=3
In this example we are scaling the Compute role to three nodes.
Setting other parameters
~~~~~~~~~~~~~~~~~~~~~~~~
Arbitrary parameters can be set for Roles. To do this, the following syntax
needs to be used.::
$ openstack management plan set "[plan-uuid]" -P Compute-1::Image=compute
Making the syntax ``-F [role-name]-[role-version]::[parameter-
name]=[value]``. The above example assigns the compute image to the compute
role - for this to work a compute image needs to be uploaded to glance.
Downloading a Plan
------------------
Plans can be downloaded from Tuskar. The result of doing this is a set of
Heat templates that can be then manipulated or manually passed to the Heat
client.::
$ openstack management plan download "[plan-uuid]" -O path/to/output
Once you have downloaded the templates from Tuskar, they can be sent directly
to Heat with this command.::
$ heat stack-create overcloud \
-f path/to/output/plan.yaml \
-e path/to/output/environment.yaml \
-t 240;
Reload the deployment plan and all deployment roles
---------------------------------------------------
You may wish to recreate the overcloud deployment plan and deployment roles
from scratch, for example to work with a newer version of the tripleo heat
templates from which the deployment roles are created.
.. note::
The steps documented below will completely remove the current
overcloud deployment plan and deployment roles including any overridden
and saved deployment parameters.
Delete the overcloud deployment plan and any roles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Get the current deployment plan uuid and specify it for deletion::
openstack management plan list
openstack management plan delete PLAN_UUID
Now you can safely delete all deployment roles::
roles=`tuskar role-list | grep OpenStack | awk '{print $2}'`
tuskar-delete-roles --config-file /etc/tuskar/tuskar.conf --uuids $roles
Recreate the deployment roles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Roles are reloaded by running the tuskar-db-sync script from the tuskar
tripleo image element::
/usr/share/tripleo-image-elements/tuskar/os-refresh-config/configure.d/90-tuskar-db-sync
This script defaults to using `/usr/share/openstack-tripleo-heat-templates/`
as the path to the local tripleo heat templates from which to define the roles.
This can be overridden by setting the TUSKAR_ROLE_DIRECTORY environment
variable::
TUSKAR_ROLE_DIRECTORY=/foo/ /usr/share/tripleo-image-elements/tuskar/os-refresh-config/configure.d/90-tuskar-db-sync
Recreate the deployment plan and associate the new roles
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Create a new deployment plan and associate the newly created roles to it. This
is achieved by running the plan-add-roles script from the tuskar tripleo image
element::
/usr/share/tripleo-image-elements/tuskar/os-refresh-config/post-configure.d/101-plan-add-roles

View File

@ -0,0 +1,680 @@
Configuring Network Isolation
=============================
Introduction
------------
|project| provides configuration of isolated overcloud networks. Using
this approach it is possible to host traffic for specific types of network
traffic (tenants, storage, API/RPC, etc.) in isolated networks. This allows
for assigning network traffic to specific network interfaces or bonds. Using
bonds provides fault tolerance, and may provide load sharing, depending on the
bonding protocols used. When isolated networks are configured, the OpenStack
services will be configured to use the isolated networks. If no isolated
networks are configured, all services run on the provisioning network.
There are two parts to the network configuration: the parameters that apply
to the network as a whole, and the templates which configure the network
interfaces on the deployed hosts.
Architecture
------------
The following VLANs will be used in the final deployment:
* IPMI* (IPMI System controller, iLO, DRAC)
* Provisioning* (Undercloud control plane for deployment and management)
* Internal API (OpenStack internal API, RPC, and DB)
* Tenant (Tenant tunneling network for GRE/VXLAN networks)
* Storage (Access to storage resources from Compute and Controller nodes)
* Storage Management (Replication, Ceph back-end services)
* External (Public OpenStack APIs, Horzizon dashboard, optionally floating IPs)
.. note::
Networks marked with '*' are usually native VLANs, others may be trunked.
Additionally, if floating IPs will be hosted on a separate VLAN, that VLAN will
need to be trunked to the controller hosts. It will not be included in the
network configuration steps in this document, the VLAN will be added as a
post-configuration step.
The External network should have a gateway router address. This will be used
in the subnet configuration of the network environment.
The Provisioning network will usually be delivered on a dedicated interface.
By default, PXE boot must occur on the native VLAN, although some system
controllers will allow booting from a VLAN. The Provisioning interface is
also used by the Compute and Storage nodes as their default gateway, in order
to contact DNS, NTP, and for system maintenance. The Undercloud can be used
as a default gateway, but in that case all traffic will be behind an IP
masquerade NAT, and will not be reachable from the rest of the network. The
Undercloud is also a single point of failure for the overcloud default route.
If there is an external gateway on a router device on the Provisioning network,
the Undercloud Neutron DHCP server can offer that instead::
neutron subnet-show # Copy the UUID from the provisioning subnet
neutron subnet-update <UUID> --gateway_ip <IP_ADDRESS>
Often, the number of VLANs will exceed the number of physical Ethernet ports,
so some VLANs are delivered with VLAN tagging to separate the traffic. On an
Ethernet bond, typically all VLANs are trunked, and there is no traffic on the
native VLAN (native VLANs on bonds are supported, but will require customizing
the NIC templates).
The networks are connected to the roles as follows:
Controller:
* Provisioning
* Internal API
* Storage
* Storage Management
* External
Compute:
* Provisioning
* Internal API
* Storage
* Tenant
Ceph Storage:
* Provisioning
* Storage
* Storage Management
Cinder Storage:
* Provisioning
* Internal API
* Storage
* Storage Management
Swift Storage:
* Provisioning
* Internal API
* Storage
* Storage Management
Workflow
--------
The procedure for enabling network isolation is this:
1. Create network environment file (e.g. /home/stack/network-environment.yaml)
2. Edit IP subnets and VLANs in the environment file to match local environment
3. Make a copy of the appropriate sample network interface configurations
4. Edit the network interface configurations to match local environment
5. Deploy overcloud with the proper parameters to include network isolation
The next section will walk through the elements that need to be added to
the network-environment.yaml to enable network isolation. The sections
after that deal with configuring the network interface templates. The final step
will deploy the overcloud with network isolation and a custom environment.
Create Network Environment File
-------------------------------
The environment file will describe the network environment and will point to
the network interface configuration files to use for the overcloud nodes.
The subnets that will be used for the isolated networks need to be defined,
along with the IP address ranges that should be used for IP assignment. These
values must be customized for the local environment.
It is important for the ExternalInterfaceDefaultRoute to be reachable on the
subnet that is used for ExternalNetCidr. This will allow the OpenStack Public
APIs and the Horizon Dashboard to be reachable. Without a valid default route,
the post-deployment steps cannot be performed.
.. note::
The ``resource_registry`` section of the network-environment.yaml contains
pointers to the network interface configurations for the deployed roles.
These files must exist at the path referenced here, and will be copied
later in this guide.
Example::
resource_registry:
OS::TripleO::BlockStorage::Net::SoftwareConfig: /home/stack/nic-configs/cinder-storage.yaml
OS::TripleO::Compute::Net::SoftwareConfig: /home/stack/nic-configs/compute.yaml
OS::TripleO::Controller::Net::SoftwareConfig: /home/stack/nic-configs/controller.yaml
OS::TripleO::ObjectStorage::Net::SoftwareConfig: /home/stack/nic-configs/swift-storage.yaml
OS::TripleO::CephStorage::Net::SoftwareConfig: /home/stack/nic-configs/ceph-storage.yaml
parameters:
# Set to "br-ex" if using floating IPs on native VLAN on bridge br-ex
Controller-1::NeutronExternalNetworkBridge: "''"
parameter_defaults:
# Customize all these values to match the local environment
InternalApiNetCidr: 172.17.0.0/24
StorageNetCidr: 172.18.0.0/24
StorageMgmtNetCidr: 172.19.0.0/24
TenantNetCidr: 172.16.0.0/24
ExternalNetCidr: 10.1.2.0/24
InternalApiAllocationPools: [{'start': '172.17.0.10', 'end': '172.17.0.200'}]
StorageAllocationPools: [{'start': '172.18.0.10', 'end': '172.18.0.200'}]
StorageMgmtAllocationPools: [{'start': '172.19.0.10', 'end': '172.19.0.200'}]
TenantAllocationPools: [{'start': '172.16.0.10', 'end': '172.16.0.200'}]
# Use an External allocation pool which will leave room for floating IPs
ExternalAllocationPools: [{'start': '10.1.2.10', 'end': '10.1.2.50'}]
InternalApiNetworkVlanID: 201
StorageNetworkVlanID: 202
StorageMgmtNetworkVlanID: 203
TenantNetworkVlanID: 204
ExternalNetworkVlanID: 100
# Set to the router gateway on the external network
ExternalInterfaceDefaultRoute: 10.1.2.1
# Set to "br-ex" if using floating IPs on native VLAN on bridge br-ex
NeutronExternalNetworkBridge: "''"
# Customize bonding options if required (will be ignored if bonds are not used)
BondInterfaceOvsOptions:
"bond_mode=balance-tcp lacp=active other-config:lacp-fallback-ab=true"
Creating Custom Interface Templates
-----------------------------------
In order to configure the network interfaces on each node, the network
interface templates may need to be customized.
Start by copying the configurations from one of the example directories. The
first example copies the templates which include network bonding. The second
example copies the templates which use a single network interface with
multiple VLANs (this configuration is mostly intended for testing).
To copy the bonded example interface configurations, run::
$ cp /usr/share/openstack-tripleo-heat-templates/network/config/bond-with-vlans/* ~/nic-configs
To copy the single NIC with VLANs example interface configurations, run::
$ cp /usr/share/openstack-tripleo-heat-templates/network/config/single-nic-vlans/* ~/nic-configs
Or, if you have custom NIC templates from another source, copy them to the location
referenced in the ``resource_registry`` section of the environment file.
Customizing the Interface Templates
-----------------------------------
The following example configures a bond on interfaces 3 and 4 of a system
with 4 interfaces. This example is based on the controller template from the
bond-with-vlans sample templates, but the bond has been placed on nic3 and nic4
instead of nic2 and nic3. The other roles will have a similar configuration,
but will have only a subset of the networks attached.
.. note::
The nic1, nic2... abstraction considers only network interfaces which are
connected to an Ethernet switch. If interfaces 1 and 4 are the only
interfaces which are plugged in, they will be referred to as nic1 and nic2.
Example::
heat_template_version: 2015-04-30
description: >
Software Config to drive os-net-config with 2 bonded nics on a bridge
with a VLANs attached for the controller role.
parameters:
ExternalIpSubnet:
default: ''
description: IP address/subnet on the external network
type: string
InternalApiIpSubnet:
default: ''
description: IP address/subnet on the internal API network
type: string
StorageIpSubnet:
default: ''
description: IP address/subnet on the storage network
type: string
StorageMgmtIpSubnet:
default: ''
description: IP address/subnet on the storage mgmt network
type: string
TenantIpSubnet:
default: ''
description: IP address/subnet on the tenant network
type: string
BondInterfaceOvsOptions:
default: ''
description: The ovs_options string for the bond interface. Set things like
lacp=active and/or bond_mode=balance-slb using this option.
type: string
ExternalNetworkVlanID:
default: 10
description: Vlan ID for the external network traffic.
type: number
InternalApiNetworkVlanID:
default: 20
description: Vlan ID for the internal_api network traffic.
type: number
StorageNetworkVlanID:
default: 30
description: Vlan ID for the storage network traffic.
type: number
StorageMgmtNetworkVlanID:
default: 40
description: Vlan ID for the storage mgmt network traffic.
type: number
TenantNetworkVlanID:
default: 50
description: Vlan ID for the tenant network traffic.
type: number
ExternalInterfaceDefaultRoute:
default: '10.0.0.1'
description: Default route for the external network.
type: string
resources:
OsNetConfigImpl:
type: OS::Heat::StructuredConfig
properties:
group: os-apply-config
config:
os_net_config:
network_config:
-
type: ovs_bridge
name: {get_input: bridge_name}
members:
-
type: ovs_bond
name: bond1
ovs_options: {get_param: BondInterfaceOvsOptions}
members:
-
type: interface
name: nic3
primary: true
-
type: interface
name: nic4
-
type: vlan
device: bond1
vlan_id: {get_param: ExternalNetworkVlanID}
addresses:
-
ip_netmask: {get_param: ExternalIpSubnet}
routes:
-
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ExternalInterfaceDefaultRoute}
-
type: vlan
device: bond1
vlan_id: {get_param: InternalApiNetworkVlanID}
addresses:
-
ip_netmask: {get_param: InternalApiIpSubnet}
-
type: vlan
device: bond1
vlan_id: {get_param: StorageNetworkVlanID}
addresses:
-
ip_netmask: {get_param: StorageIpSubnet}
-
type: vlan
device: bond1
vlan_id: {get_param: StorageMgmtNetworkVlanID}
addresses:
-
ip_netmask: {get_param: StorageMgmtIpSubnet}
-
type: vlan
device: bond1
vlan_id: {get_param: TenantNetworkVlanID}
addresses:
-
ip_netmask: {get_param: TenantIpSubnet}
outputs:
OS::stack_id:
description: The OsNetConfigImpl resource.
value: {get_resource: OsNetConfigImpl}
Configuring Interfaces
----------------------
The individual interfaces may need to be modified. As an example, below are
the modifications that would be required to use the second NIC to connect to
an infrastructure network with DHCP addresses, and to use the third and fourth
NICs for the bond:
Example::
network_config:
# Add a DHCP infrastructure network to nic2
-
type: interface
name: nic2
use_dhcp: true
defroute: false
-
type: ovs_bridge
name: br-bond
members:
-
type: ovs_bond
name: bond1
ovs_options: {get_param: BondInterfaceOvsOptions}
members:
# Modify bond NICs to use nic3 and nic4
-
type: interface
name: nic3
primary: true
-
type: interface
name: nic4
When using numbered interfaces ("nic1", "nic2", etc.) instead of named
interfaces ("eth0", "eno2", etc.), the network interfaces of hosts within
a role do not have to be exactly the same. For instance, one host may have
interfaces em1 and em2, while another has eno1 and eno2, but both hosts' NICs
can be referred to as nic1 and nic2.
The numbered NIC scheme only takes into account the interfaces that are live
(have a cable attached to the switch). So if you have some hosts with 4
interfaces, and some with 6, you should use nic1-nic4 and only plug in 4
cables on each host.
Configuring Routes and Default Routes
-------------------------------------
There are two ways that a host may have its default routes set. If the interface
is using DHCP, and the DHCP server offers a gateway address, the system will
install a default route for that gateway. Otherwise, a default route may be set
manually on an interface with a static IP.
Although the Linux kernel supports multiple default gateways, it will only use
the one with the lowest metric. If there are multiple DHCP interfaces, this can
result in an unpredictable default gateway. In this case, it is recommended that
defroute=no be set for the interfaces other than the one where we want the
default route. In this case, we want a DHCP interface (NIC 2) to be the default
route (rather than the Provisioning interface), so we disable the default route
on the provisioning interface:
Example::
# No default route on the Provisioning network
-
type: interface
name: nic1
use_dhcp: true
defroute: no
# Instead use this DHCP infrastructure VLAN as the default route
-
type: interface
name: nic2
use_dhcp: true
To set a static route on an interface with a static IP, specify a route to the
subnet. For instance, here is a hypothetical route to the 10.1.2.0/24 subnet
via the gateway at 172.17.0.1 on the Internal API network:
Example::
-
type: vlan
device: bond1
vlan_id: {get_param: InternalApiNetworkVlanID}
addresses:
-
ip_netmask: {get_param: InternalApiIpSubnet}
routes:
-
ip_netmask: 10.1.2.0/24
next_hop: 172.17.0.1
Using a Dedicated Interface For Tenant VLANs
--------------------------------------------
When using a dedicated interface or bond for tenant VLANs, a bridge must be
created. Neutron will create OVS ports on that bridge with the VLAN tags for the
provider VLANs. For example, to use NIC 4 as a dedicated interface for tenant
VLANs, you would add the following to the Controller and Compute templates:
Example::
-
type: ovs_bridge
name: br-vlan
members:
-
type: interface
name: nic4
primary: true
A similar configuration may be used to define an interface or a bridge that
will be used for Provider VLANs. Provider VLANs are external networks which
are connected directly to the Compute hosts. VMs may be attached directly to
Provider networks to provide access to datacenter resources outside the cloud.
Using the Native VLAN for Floating IPs
--------------------------------------
By default, Neutron will be expecting the floating IP network to be delivered
on a tagged VLAN. If the floating IP network will use the native VLAN, then we
need to tell Neutron to put the floating IPs directly on the ``br-ex`` bridge.
The value must be set in both of these parameters in the parameters section:
Example::
parameters:
# Set to "br-ex" when using floating IPs on the native VLAN
Controller-1::NeutronExternalNetworkBridge: "''"
parameter_defaults:
# Set to "br-ex" when using floating IPs on the native VLAN
NeutronExternalNetworkBridge: "''"
The next section contains the changes to the NIC config that need to happen
to put the External network on the native VLAN (the External network may be
used for floating IPs in addition to the Horizon dashboard and Public APIs).
Using the Native VLAN on a Trunked Interface
--------------------------------------------
If a trunked interface or bond has a network on the native VLAN, then the IP
address will be assigned directly to the bridge and there will be no VLAN
interface. If the native VLAN is used for the External network, make sure to
set the NeutronExternalNetworkBridge parameters to "br-ex" instead of "''"
in the ``network-environment.yaml``.
For example, if the external network is on the native VLAN, the bond
configuration would look like this:
Example::
network_config:
-
type: ovs_bridge
name: {get_input: bridge_name}
addresses:
-
ip_netmask: {get_param: ExternalIpSubnet}
routes:
-
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ExternalInterfaceDefaultRoute}
members:
-
type: ovs_bond
name: bond1
ovs_options: {get_param: BondInterfaceOvsOptions}
members:
-
type: interface
name: nic3
primary: true
-
type: interface
name: nic4
.. note::
When moving the address (and possibly route) statements onto the bridge, be
sure to remove the corresponding VLAN interface from the bridge. Make sure to
make the changes to all applicable roles. The External network is only on the
controllers, so only the controller template needs to be changed. The Storage
network on the other hand is attached to all roles, so if the storage network
were the default VLAN, all roles would need to be edited.
Configuring Jumbo Frames
------------------------
The Maximum Transmission Unit (MTU) setting determines the maximum amount of
data that can be transmitted by a single Ethernet frame. Using a larger value
can result in less overhead, since each frame adds data in the form of a
header. The default value is 1500, and using a value higher than that will
require the switch port to be configured to support jumbo frames. Most switches
support an MTU of at least 9000, but many are configured for 1500 by default.
The MTU of a VLAN cannot exceed the MTU of the physical interface. Make sure to
include the MTU value on the bond and/or interface.
Storage, Storage Management, Internal API, and Tenant networking can all
benefit from jumbo frames. In testing, tenant networking throughput was
over 300% greater when using jumbo frames in conjunction with VXLAN tunnels.
.. note::
It is recommended that the Provisioning interface, External interface, and
any floating IP interfaces be left at the default MTU of 1500. Connectivity
problems are likely to occur otherwise.
Example::
-
type: ovs_bond
name: bond1
mtu: 9000
ovs_options: {get_param: BondInterfaceOvsOptions}
members:
-
type: interface
name: nic3
mtu: 9000
primary: true
-
type: interface
name: nic4
mtu: 9000
-
# The external interface should stay at default
type: vlan
device: bond1
vlan_id: {get_param: ExternalNetworkVlanID}
addresses:
-
ip_netmask: {get_param: ExternalIpSubnet}
routes:
-
ip_netmask: 0.0.0.0/0
next_hop: {get_param: ExternalInterfaceDefaultRoute}
-
# MTU 9000 for Internal API, Storage, and Storage Management
type: vlan
device: bond1
mtu: 9000
vlan_id: {get_param: InternalApiNetworkVlanID}
addresses:
-
ip_netmask: {get_param: InternalApiIpSubnet}
Assinging OpenStack Services to Isolated Networks
-------------------------------------------------
Each OpenStack service is assigned to a network using a default mapping. The
service will be bound to the host IP within the named network on each host.
..note::
The services will be assigned to the networks according to the
``ServiceNetMap`` in ``overcloud-without-mergepy.yaml``. Unless these
defaults need to be overridden, the ServiceNetMap does not need to be defined
in the environment file.
A service can be assigned to an alternate network by overriding the service to
network map in an environment file. The defaults should generally work, but
can be overridden. To override these values, add the ServiceNetMap to the
``parameter_defaults`` section of the network environment.
Example::
parameter_defaults:
ServiceNetMap:
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
MongoDbNetwork: internal_api
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
GlanceApiNetwork: storage
GlanceRegistryNetwork: internal_api
KeystoneAdminApiNetwork: internal_api
KeystonePublicApiNetwork: internal_api
NeutronApiNetwork: internal_api
HeatApiNetwork: internal_api
NovaApiNetwork: internal_api
NovaMetadataNetwork: internal_api
NovaVncProxyNetwork: internal_api
SwiftMgmtNetwork: storage_mgmt
SwiftProxyNetwork: storage
HorizonNetwork: internal_api
MemcachedNetwork: internal_api
RabbitMqNetwork: internal_api
RedisNetwork: internal_api
MysqlNetwork: internal_api
CephClusterNetwork: storage_mgmt
CephPublicNetwork: storage
# Define which network will be used for hostname resolution
ControllerHostnameResolveNetwork: internal_api
ComputeHostnameResolveNetwork: internal_api
BlockStorageHostnameResolveNetwork: internal_api
ObjectStorageHostnameResolveNetwork: internal_api
CephStorageHostnameResolveNetwork: storage
.. note::
If an entry in the ServiceNetMap points to a network which does not exist,
that service will be placed on the Provisioning network. To avoid that,
make sure that each entry points to a valid network.
Deploying the Overcloud With Network Isolation
----------------------------------------------
When deploying with network isolation, you should specify the NTP server for the
overcloud nodes. If the clocks are not synchronized, some OpenStack services may
be unable to start, especially when using HA. The NTP server should be reachable
from both the External and Provisioning subnets. The neutron network type should
be specified, along with the tunneling or VLAN parameters.
To deploy with network isolation and include the network environment file, use
the ``-e`` parameters with the ``openstack overcloud deploy`` command. For
instance, to deploy VXLAN mode, the deployment command might be::
openstack overcloud deploy -e /home/stack/network-environment.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \
--plan openstack --ntp-server pool.ntp.org --neutron-network-type vxlan \
--neutron-tunnel-types vxlan
To deploy with VLAN mode, you should specify the range of VLANs that will be
used for tenant networks::
openstack overcloud deploy -e /home/stack/network-environment.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \
--plan openstack --ntp-server pool.ntp.org --neutron-network-type vlan \
--neutron-bridge-mappings datacentre:br-ex \
--neutron-network-vlan-ranges datacentre:30:100 \
--neutron-disable-tunneling
If a dedicated interface or bridge is used for tenant VLANs, it should be
included in the bridge mappings. For instance, if the tenant VLANs were on a
bridge named ``br-vlan``, then use these values in the deployment command
above::
--neutron-bridge-mappings datacentre:br-ex,tenant:br-vlan \
--neutron-network-vlan-ranges tenant:30:100
.. note::
You must also pass the environment files (again using the ``-e`` or
``--environment-file`` option) whenever you make subsequent changes to the
overcloud, such as :doc:`../post_deployment/scale_roles`,
:doc:`../post_deployment/delete_nodes` or
:doc:`../post_deployment/package_update`.

View File

@ -0,0 +1,207 @@
Advanced Profile Matching
=========================
Here are additional setup steps to take advantage of the advanced profile
matching and the AHC features.
Enable advanced profile matching
--------------------------------
* Install the ahc-tools package::
sudo yum install -y ahc-tools
* Add the credentials for Ironic and Swift to the
**/etc/ahc-tools/ahc-tools.conf** file.
These will be the same credentials that ironic-discoverd uses,
and can be copied from **/etc/ironic-discoverd/discoverd.conf**::
$ sudo -i
# mkdir -p /etc/ahc-tools
# sed 's/\[discoverd/\[ironic/' /etc/ironic-discoverd/discoverd.conf > /etc/ahc-tools/ahc-tools.conf
# chmod 0600 /etc/ahc-tools/ahc-tools.conf
# exit
Example::
[ironic]
os_auth_url = http://192.0.2.1:5000/v2.0
os_username = ironic
os_password = <PASSWORD>
os_tenant_name = service
[swift]
os_auth_url = http://192.0.2.1:5000/v2.0
os_username = ironic
os_password = <PASSWORD>
os_tenant_name = service
Accessing additional introspection data
---------------------------------------
Every introspection run (as described in
:doc:`../basic_deployment/basic_deployment_cli`) collects a lot of additional
facts about the hardware and puts them as JSON in Swift. Swift container name
is ``ironic-inspector`` and can be modified in
**/etc/ironic-discoverd/discoverd.conf**. Swift object name is stored under
``hardware_swift_object`` key in Ironic node extra field.
State file
----------
Configuration file **/etc/ahc-tools/edeploy/state** defines how many nodes of
each profile we want to match. This file contains list of tuples with profile
name and number of nodes for this profile. ``*`` symbol can be used to match
any number, but make sure that such tuple will go last.
For example to start with 1 control node and any number of compute ones,
populate this file with the following contents::
[('control', '1'), ('compute', '*')]
Matching rules
--------------
These matching rules will determine what profile gets assigned to each node
and are stored in files named **/etc/ahc-tools/edeploy/PROFILE.specs** for
each profile defined in **/etc/ahc-tools/edeploy/state**.
Open the **/etc/ahc-tools/edeploy/control.specs** file.
This is a JSON-like file that might look like this::
[
('disk', '$disk', 'size', 'gt(4)'),
('network', '$eth', 'ipv4', 'network(192.0.2.0/24)'),
('memory', 'total', 'size', 'ge(4294967296)'),
]
These rules match on the data collected during introspection.
Note that disk size is in GiB, while memory size is in KiB.
There is a set of helper functions to make matching more flexible.
* network() : the network interface shall be in the specified network
* gt(), ge(), lt(), le() : greater than (or equal), lower than (or equal)
* in() : the item to match shall be in a specified set
* regexp() : match a regular expression
* or(), and(), not(): boolean functions. or() and and() take 2 parameters
and not() one parameter.
There are also placeholders, *$disk* and *$eth* in the above example.
These will store the value in that place for later use.
* For example if we had a "fact" from discovery::
('disk', 'sda', 'size', '40')
This would match the first rule in the above control.specs file,
and we would store ``"disk": "sda"``.
Running advanced profile matching
---------------------------------
* After adjusting the matching rules, we are ready to do the matching::
sudo ahc-match
* This will attempt to match all of the available nodes to the roles
we have defined in the **/etc/ahc-tools/edeploy/state** file.
When a node matches a role, the role is added to the node in Ironic in
the form of a capability. We can check this with ``ironic node-show``::
[stack@instack ~]# ironic node-show b73fb5fa-1a2c-49c6-b38e-8de41e3c0532 | grep properties -A2
| properties | {u'memory_mb': u'4096', u'cpu_arch': u'x86_64', u'local_gb': u'40', |
| | u'cpus': u'1', u'capabilities': u'profile:control,boot_option:local'} |
| instance_uuid | None
* In the above output, we can see that the control profile is added
as a capability to the node. Next we will need to create flavors in Nova
that actually map to these profiles.
[Optional] Manually add the profiles to the nodes
-------------------------------------------------
In order to use the matching functionality without using the AHC tools. We can
instead add the profile "tags" manually. The example below will add the
"control" profile to a node::
ironic node-update <UUID> replace properties/capabilities='profile:control,boot_option:local'
.. note::
We can not update only a single key from the capabilities dictionary, so we
need to specify both the profile and the boot_option above. Otherwise, the
boot_option key will get removed.
Create flavors to use advanced matching
---------------------------------------
In order to use the profiles assigned to the Ironic nodes, Nova needs to have
flavors that have the property "capabilities:profile" set to the intended profile.
For example, with just the compute and control profiles:
* Create the flavors
::
openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 control
openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 compute
.. note::
The values for ram, disk, and vcpus should be set to a minimal lower bound,
as Nova will still check that the Ironic nodes have at least this much
even if we set lower properties in the **.specs** files.
* Assign the properties
::
openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="compute" compute
openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" --property "capabilities:profile"="control" control
Use the flavors to deploy
-------------------------
By default, all nodes are deployed to the **baremetal** flavor.
The |project| CLI has options to support more advanced role matching.
Continuing with the example with only a control and compute profile:
* Get the Tuskar plan name
::
openstack management plan list
* Deploy the overcloud
::
openstack overcloud deploy --control-flavor control --compute-flavor compute --plan <Name or UUID from above>
Use the flavors to scale
-------------------------
The process to scale an overcloud that uses our advanced profiles is the same
as the process used when we only have the **baremetal** flavor.
.. note::
The original overcloud must have been deployed as above in order to scale
using advanced profiles, as the flavor to role mapping happens then.
* Update the **/etc/ahc-tools/edeploy/state** file to match the number
of nodes we want to match to each role.
* Run `sudo ahc-match` to match available nodes to the defined roles.
* Scale the overcloud (example below adds two more nodes to the compute role)
::
openstack overcloud scale stack overcloud overcloud -r Compute-1 -n 2

View File

@ -0,0 +1,109 @@
Ready-State (BIOS, RAID)
========================
Ready-state configuration can be used to prepare bare-metal resources for
deployment. It includes BIOS and RAID configuration based on a predefined
profile.
To define the target BIOS and RAID configuration for a deployment profile, you
need to create a JSON-like ``<profile>.cmdb`` file in
``/etc/ahc-tools/edeploy``. The configuration will be applied only to nodes
that match the ``<profile>.specs`` rules.
Define the target BIOS configuration
------------------------------------
To define a BIOS setting, list the name of the setting and its target
value::
[
{
'bios_settings': {'ProcVirtualization': 'Enabled'}
}
]
Define the target RAID configuration
------------------------------------
The RAID configuration can be defined in 2 ways: either by listing the IDs
of the physical disks, or letting Ironic assign physical disks to the
RAID volume.
By providing a list of physical disk IDs the following attributes are required:
``controller``, ``size_gb``, ``raid_level`` and the list of ``physical_disks``.
``controller`` should be the FQDD of the RAID controller assigned by the DRAC
card. Similarly, the list of ``physical_disks`` should be the FQDDs of physical
disks assigned by the DRAC card. An example::
[
{
'logical_disks': [
{'controller': 'RAID.Integrated.1-1',
'size_gb': 100,
'physical_disks': [
'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1',
'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'],
'raid_level': '5'},
]
}
]
By letting Ironic assign physical disks to the RAID volume, the following
attributes are required: ``controller``, ``size_gb``, ``raid_level`` and the
``number_of_physical_disks``. ``controller`` should be the FQDD of the RAID
controller assigned by the DRAC card. An example::
[
{
'logical_disks': [
{'controller': 'RAID.Integrated.1-1',
'size_gb': 50,
'raid_level': '1',
'number_of_physical_disks': 2},
]
}
]
Complete example for a ``control.cmdb``
---------------------------------------
::
[
{
'bios_settings': {'ProcVirtualization': 'Enabled'},
'logical_disks': [
{'controller': 'RAID.Integrated.1-1',
'size_gb': 50,
'raid_level': '1',
'number_of_physical_disks': 2,
'disk_type': 'hdd',
'interface_type': 'sas',
'volume_name': 'root_volume',
'is_root_volume': True},
{'controller': 'RAID.Integrated.1-1',
'size_gb': 100,
'physical_disks': [
'Disk.Bay.0:Enclosure.Internal.0-1:RAID.Integrated.1-1',
'Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1',
'Disk.Bay.2:Enclosure.Internal.0-1:RAID.Integrated.1-1'],
'raid_level': '5',
'volume_name': 'data_volume1'}
]
}
]
Trigger the ready-state configuration
-------------------------------------
Continue with matching the nodes to profiles as described in
:doc:`profile_matching`.
Then trigger the BIOS and RAID configuration based on the matched deployment
profile::
instack-ironic-deployment --configure-nodes

View File

@ -0,0 +1,87 @@
Deploying with Heat Templates
=============================
It is possible to deploy to heat directly, using a directory of templates,
e.g without using any Tuskar plan. This is potentially a more convenient
approach if you are only deploying via the CLI (the UI requires Tuskar),
and/or if you are developing significant enhancements or site-specific
additions to the templates.
Deploying an Overcloud without Tuskar
-------------------------------------
You may use the ``--templates`` option to enable deploying only using heat
templates, e.g no tuskar plan::
openstack overcloud deploy --templates
.. note::
The default location for the templates is
`/usr/share/openstack-tripleo-heat-templates`. Using this option
will not include any modifications to the Tuskar role templates which
may have been performed, templates are always read from local files.
Overriding specific templates with local versions
-------------------------------------------------
You may use heat environment files (via the ``--environment-file`` or ``-e``
option), combined with the ``--templates`` option to override specific
templates, e.g to test a bugfix outside of the location of the packaged
templates.
The mapping between heat resource types and the underlying templates can be
found in
`/usr/share/\
openstack-tripleo-heat-templates/overcloud-resource-registry-puppet.yaml`
Here is an example of copying a specific resource template and overriding
so the deployment uses the local version::
mkdir local_templates
cp /usr/share/openstack-tripleo-heat-templates/puppet/controller-puppet.yaml local_templates
cat > override_templates.yaml << EOF
resource_registry:
OS::TripleO::Controller: local_templates/controller-puppet.yaml
EOF
openstack overcloud deploy --templates --environment-file override_templates.yaml
.. note::
The ``--environment-file``/``-e`` option may be specified multiple times,
if duplicate keys are specified in the environment files, the last one
takes precedence.
.. note::
You must also pass the environment files (again using the ``-e`` or
``--environment-file`` option) whenever you make subsequent changes to the
overcloud, such as :doc:`../post_deployment/scale_roles`,
:doc:`../post_deployment/delete_nodes` or
:doc:`../post_deployment/package_update`.
.. _custom-template-location:
Using a custom location for all templates
-----------------------------------------
You may specify a path to the ``--templates`` option, such that the packaged
tree may be copied to another location, which is useful e.g for developer usage
where you wish to check the templates into a revision control system.
.. note::
Use caution when using this approach as you will need to rebase any local
changes on updates to the openstack-tripleo-heat-templates package, and
care will be needed to avoid modifying anything in the tree which the CLI
tools rely on (such as top-level parameters). In many cases using the
:doc:`ExtraConfig <extra_config>` interfaces or specific template overrides
as outlined above may be preferable.
Here is an example of copying the entire tripleo-heat-templates tree to a
local directory and launching a deployment using the new location::
cp -r /usr/share/openstack-tripleo-heat-templates /home/stack/
openstack overcloud deploy --templates /home/stack/openstack-tripleo-heat-templates

View File

@ -0,0 +1,408 @@
Basic Deployment (CLI)
======================
With these few steps you will be able to simply deploy via |project| to your
environment using our defaults in a few steps.
Prepare Your Environment
------------------------
#. Make sure you have your environment ready and undercloud running:
* :doc:`../environments/environments`
* :doc:`../installation/installing`
#. Log into your undercloud (instack) virtual machine as non-root user::
ssh root@<undercloud-machine>
su - stack
#. In order to use CLI commands easily you need to source needed environment
variables::
source stackrc
Get Images
----------
.. note::
If you already have images built, perhaps from a previous installation of
|project|, you can simply copy those image files into your regular user's
home directory and skip this section.
If you do this, be aware that sometimes newer versions of |project| do not
work with older images, so if the deployment fails it may be necessary to
delete the older images and restart the process from this step.
The image files required are::
deploy-ramdisk-ironic.initramfs
deploy-ramdisk-ironic.kernel
discovery-ramdisk.initramfs
discovery-ramdisk.kernel
overcloud-full.initrd
overcloud-full.qcow2
overcloud-full.vmlinuz
Images must be built prior to doing a deployment. A discovery ramdisk,
deployment ramdisk, and openstack-full image can all be built using
instack-undercloud.
It's recommended to build images on the installed undercloud directly since all
the dependencies are already present.
The following steps can be used to build images. They should be run as the same
non-root user that was used to install the undercloud.
#. Choose image operating system:
The built images will automatically have the same base OS as the
running undercloud. To choose a different OS use one of the following
commands (make sure you have your OS specific content visible):
.. admonition:: CentOS
:class: centos
::
export NODE_DIST=centos7
.. admonition:: RHEL
:class: rhel
::
export NODE_DIST=rhel7
#. Build the required images:
.. admonition:: RHEL
:class: rhel
Download the RHEL 7.1 cloud image or copy it over from a different location,
for example:
https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.1/x86_64/product-downloads,
and define the needed environment variables for RHEL 7.1 prior to running
``openstack overcloud image build --all``::
export DIB_LOCAL_IMAGE=rhel-guest-image-7.1-20150224.0.x86_64.qcow2
.. admonition:: RHEL Portal Registration
:class: portal
To register the image builds to the Red Hat Portal define the following variables::
export REG_METHOD=portal
export REG_USER="[your username]"
export REG_PASSWORD="[your password]"
# Find this with `sudo subscription-manager list --available`
export REG_POOL_ID="[pool id]"
export REG_REPOS="rhel-7-server-rpms rhel-7-server-extras-rpms rhel-ha-for-rhel-7-server-rpms \
rhel-7-server-optional-rpms rhel-7-server-openstack-6.0-rpms"
.. admonition:: RHEL Satellite Registration
:class: satellite
To register the image builds to a Satellite define the following
variables. Only using an activation key is supported when registering to
Satellite, username/password is not supported for security reasons. The
activation key must enable the repos shown::
export REG_METHOD=satellite
# REG_SAT_URL should be in the format of:
# http://<satellite-hostname>
export REG_SAT_URL="[satellite url]"
export REG_ORG="[satellite org]"
# Activation key must enable these repos:
# rhel-7-server-rpms
# rhel-7-server-optional-rpms
# rhel-7-server-extras-rpms
# rhel-7-server-openstack-6.0-rpms
export REG_ACTIVATION_KEY="[activation key]"
.. note ::
By default, images are built with the latest delorean repository which has passed CI. If you need to manually test packages before CI has passed, you can use:
::
export DELOREAN_TRUNK_MGT_REPO="http://trunk-mgt.rdoproject.org/centos-kilo/current"
::
openstack overcloud image build --all
.. note::
This script will build **overcloud-full** images (\*.qcow2, \*.initrd,
\*.vmlinuz), **deploy-ramdisk-ironic** images (\*.initramfs, \*.kernel),
**discovery-ramdisk** images (\*.initramfs, \*.kernel) and **testing**
fedora-user.qcow2 (which is always Fedora based).
Upload Images
-------------
Load the images into the undercloud Glance::
openstack overcloud image upload
Register Nodes
--------------
Register nodes for your deployment with Ironic::
openstack baremetal import --json instackenv.json
.. note::
It's not recommended to delete nodes and/or rerun this command after
you have proceeded to the next steps. Particularly, if you start introspection
and then re-register nodes, you won't be able to retry introspection until
the previous one times out (1 hour by default). If you are having issues
with nodes after registration, please follow
:ref:`node_registration_problems`.
.. note::
By default Ironic will not sync the power state of the nodes,
because in our HA (high availability) model Pacemaker is the
one responsible for controlling the power state of the nodes
when fencing. If you are using a non-HA setup and want Ironic
to take care of the power state of the nodes please change the
value of the "force_power_state_during_sync" configuration option
in the /etc/ironic/ironic.conf file to "True" and restart the
openstack-ironic-conductor service.
Also, note that if "openstack undercloud install" is re-run the value
of the "force_power_state_during_sync" configuration option will be
set back to the default, which is "False".
Assign kernel and ramdisk to nodes::
openstack baremetal configure boot
Introspect Nodes
----------------
Introspect hardware attributes of nodes::
openstack baremetal introspection bulk start
.. note:: **Introspection has to finish without errors.**
The process can take up to 5 minutes for VM / 15 minutes for baremetal. If
the process takes longer, see :ref:`introspection_problems`.
Introspecting a single node
^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can also introspect nodes one by one.
When doing so, you must take care to set the correct node states manually.
Use ``ironic node-show UUID`` command to figure out whether nodes are in
``manageable`` or ``available`` state. For all nodes in ``available`` state,
start with putting a node to ``manageable`` state::
ironic node-set-provision-state UUID manage
Then you can run introspection::
openstack baremetal introspection start UUID
This command won't poll for the introspection result, use the following command
to check the current introspection state::
openstack baremetal introspection status UUID
Repeat it for every node until you see ``True`` in the ``finished`` field.
The ``error`` field will contain an error message if introspection failed,
or ``None`` if introspection succeeded for this node.
Do not forget to make nodes available for deployment afterwards::
ironic node-set-provision-state UUID provide
Create Flavors
--------------
Create the necessary flavor::
openstack flavor create --id auto --ram 4096 --disk 40 --vcpus 1 baremetal
openstack flavor set --property "cpu_arch"="x86_64" --property "capabilities:boot_option"="local" baremetal
Configure a nameserver for the Overcloud
----------------------------------------
Overcloud nodes need to have a configured nameserver so that they can resolve
hostnames via DNS. The nameserver is defined in the undercloud's neutron
subnet. Define the nameserver to be used for the environment::
# List the available subnets
neutron subnet-list
neutron subnet-update <subnet-uuid> --dns-nameserver <nameserver-ip>
.. note::
A public DNS server, such as 8.8.8.8 can be used if there is no internal DNS
server.
.. admonition:: Virtual
:class: virtual
In virtual environments, the libvirt default network DHCP server address,
typically 192.168.122.1, can be used as the overcloud nameserver.
Deploy the Overcloud
--------------------
By default 1 compute and 1 control node will be deployed, with networking
configured for the virtual environment. To customize this, see the output of::
openstack help overcloud deploy
.. admonition:: Ceph
:class: ceph
When deploying Ceph it is necessary to use the regular Heat templates
instead of Tuskar, to specify the number of Ceph OSD nodes to be
deployed and to provide some additional parameters to enable usage
of Ceph for Glance, Cinder, Nova or all.
Make a copy of the file ``/usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml``
and edit it as appropriate, then pass the following additional arguments::
--ceph-storage-scale <number of nodes> --templates -e /path/to/customized/storage-environment.yaml
to the deploy command below.
By default when Ceph is enabled the Cinder LVM back-end is disabled. This
behavior may be changed by also passing::
--cinder-lvm
.. admonition:: RHEL Satellite Registration
:class: satellite
To register the Overcloud nodes to a Satellite add the following flags
to the deploy command::
--rhel-reg --reg-method satellite --reg-org <ORG ID#> --reg-sat-url <satellite URL> --reg-activation-key <KEY>
.. note::
Only using an activation key is supported when registering to
Satellite, username/password is not supported for security reasons.
The activation key must enable the following repos:
rhel-7-server-rpms
rhel-7-server-optional-rpms
rhel-7-server-extras-rpms
rhel-7-server-openstack-6.0-rpms
::
openstack overcloud deploy --templates
.. note::
To deploy the overcloud with network isolation, bonds, and/or custom
network interface configurations, instead follow the workflow here to
deploy: :doc:`../advanced_deployment/network_isolation`
Post-Deployment
---------------
Access the Overcloud
^^^^^^^^^^^^^^^^^^^^
``openstack overcloud deploy`` generates an overcloudrc file appropriate for
interacting with the deployed overcloud in the current user's home directory.
To use it, simply source the file::
source ~/overcloudrc
To return to working with the undercloud, source the stackrc file again::
source ~/stackrc
Setup the Overcloud network
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Initial networks in Neutron in the Overlcoud need to be created for tenant
instances. The following are example commands to create the initial networks.
Edit the address ranges, or use the necessary neutron commands to match the
environment appropriately. This assumes a dedicated interface or native VLAN::
neutron net-create nova --router:external --provider:network_type flat \
--provider:physical_network datacentre
neutron subnet-create --name nova --disable-dhcp \
--allocation-pool start=172.16.23.140,end=172.16.23.240 \
--gateway 172.16.23.251 nova 172.16.23.128/25
The example shows naming the network "nova" because that will make tempest
tests to pass, based on the default floating pool name set in nova.conf. You
can confirm that the network was created with::
neutron net-list
+--------------------------------------+-------------+-------------------------------------------------------+
| id | name | subnets |
+--------------------------------------+-------------+-------------------------------------------------------+
| d474fe1f-222d-4e32-802b-cde86e746a2a | nova | 01c5f621-1e0f-4b9d-9c30-7dc59592a52f 172.16.23.128/25 |
+--------------------------------------+-------------+-------------------------------------------------------+
To use a VLAN, the following example should work. Customize the address ranges
and VLAN id based on the environment::
neutron net-create nova --router:external --provider:network_type vlan \
--provider:physical_network datacentre --provider:segmentation_id 195
neutron subnet-create --name nova --disable-dhcp \
--allocation-pool start=172.16.23.140,end=172.16.23.240 \
--gateway 172.16.23.251 nova 172.16.23.128/25
Validate the Overcloud
^^^^^^^^^^^^^^^^^^^^^^
To verify the Overcloud by running Tempest::
openstack overcloud validate --overcloud-auth-url $OS_AUTH_URL \
--overcloud-admin-password $OS_PASSWORD
.. note:: The full Tempest test suite might take hours to run on a single CPU.
To run only a part of the Tempest test suite (eg. tests with ``smoke`` tag)::
openstack overcloud validate --overcloud-auth-url $OS_AUTH_URL \
--overcloud-admin-password $OS_PASSWORD \
--tempest-args smoke
Redeploy the Overcloud
^^^^^^^^^^^^^^^^^^^^^^
The overcloud can be redeployed when desired.
#. First, delete any existing Overcloud::
heat stack-delete overcloud
#. Confirm the Overcloud has deleted. It may take a few minutes to delete::
# This command should show no stack once the Delete has completed
heat stack-list
#. Although not required, introspection can be rerun::
openstack baremetal introspection bulk start
#. Deploy the Overcloud again::
openstack overcloud deploy --templates

View File

@ -0,0 +1,258 @@
Basic Deployment (GUI)
======================
Access the GUI
--------------
Part of the Undercloud installation is also Tuskar-UI which you can use to drive
the deployment.
.. admonition:: Virtual
:class: virtual
In the case of a virtual deployment, Tuskar-UI runs on the instack virtual
machine on ``http://localhost/dashboard``. Considering this and the fact that
the virt host is a remote host machine, to access the UI in the browser,
follow these steps:
#. On host machine create ssh tunnel from instack vm to virt host::
ssh -g -N -L 8080:127.0.0.1:80 root@<undercloud_vm_ip>
#. On instack VM edit ``/etc/openstack-dashboard/local_settings`` and add virt host ``hostname`` to ``ALLOWED_HOSTS`` array
#. Restart Apache::
systemctl restart httpd
#. Navigate to ``http://<virt_host_hostname>:8080/dashboard`` in the browser
When logging into the dashboard the default user and password are found in the ``/root/stackrc`` file on the instack virtual machine, ``OS_USERNAME`` and ``OS_PASSWORD``.
After logging into the dashboard, make sure that the project is set to *admin* (if it is not, change the project to *admin* using the Project switcher at the top bar).
Overview Page
-------------
When you log into the GUI, you will land on the *Overview* page. This page contains all the information about the
current state of the deployment. On the left side, there is a list of available deployment roles. On the right side,
there is a deployment checklist which indicates whether all the prerequisites for the deployment have been satisfied.
Get Images
----------
To perform a successful deployment, you will need the following images: discovery ramdisk, deployment ramdisk, and
openstack-full image. To upload the images and load them into Glance, navigate to *Provisioning Images* and use the
*Create Image* button to create the necessary images. You will need to following image files handy::
overcloud-full.vmlinuz
overcloud-full.initrd
overcloud-full.qcow2
deploy-ramdisk-ironic.kernel
deploy-ramdisk-ironic.initramfs
discovery-ramdisk.kernel
discovery-ramdisk.initramfs
To create the 'overcloud-full-vmlinuz' image, fill the *Create Image* form like so::
Name: overcloud-full-vmlinuz
Image Source: Image File
Image File: overcloud-full.vmlinuz
Format: AKI
Public: checked
To create the 'overcloud-full-initrd' image, fill the *Create Image* form like so::
Name: overcloud-full-initrd
Image Source: Image File
Image File: overcloud-full.initrd
Format: ARI
Public: checked
To create the 'overcloud-full' image, fill the *Create Image* form like so::
Name: overcloud-full
Image Source: Image File
Image File: overcloud-full.qcow2
Kernel: overcloud-full-vmlinuz
Ramdisk: overcloud-full-initrd
Format: QCOW2
Public: checked
To create the 'bm-deploy-kernel' image, fill the *Create Image* form like so::
Name: bm-deploy-kernel
Image Source: Image File
Image File: deploy-ramdisk-ironic.kernel
Format: AKI
Public: checked
To create the 'bm-deploy-ramdisk' image, fill the *Create Image* form like so::
Name: bm-deploy-ramdisk
Image Source: Image File
Image File: deploy-ramdisk-ironic.initramfs
Format: ARI
Public: checked
You will also need to copy the discovery images to HTTP BOOT directory on the undercloud node. Assuming you have these
images handy in your home directory on the undercloud node, run the following commands to copy them to /httpboot::
sudo cp -f discovery-ramdisk.kernel /httpboot/discovery.kernel
sudo cp -f discovery-ramdisk.initramfs /httpboot/discovery.ramdisk
Register Nodes
--------------
To register nodes for your deployment, navigate to the *Nodes* page, and click the + sign to open the *Register Nodes* form.
This form gives you a choice of manually adding node data using the *Add* link, or supplying a CSV file with node data
using the *Upload* link. Choose one of these two options to register your nodes with Ironic. Make sure you provide, at
minimum, the *Power Management* properties, the *MAC address(es)* for the nodes and the *Deployment Images*. The rest of the
attributes are optional and in case you don't supply them, they will be obtained by running the introspection on the
nodes. When you have finished providing the node data, click the *Register Nodes* button to register your nodes.
.. note::
When using a CSV file to upload node data, make sure the fields are in this order::
driver,address,username,password/ssh key,mac addresses,cpu architecture,number of CPUs,available memory,available storage
As stated above, hardware properties (cpu architecture, number of CPUs, available memory, available storage)
are not mandatory, and in case they are not provided in the CSV file, they will be obtained via introspection.
Here are the contents of an example CSV file to register two virtual nodes::
pxe_ssh,192.168.122.1,root,"-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAw6J6supEV40toLTiH6Taj8k6bI2CSJSK31spMfLIKzPuvzCV
PGZdhKMx1o++u9TcFFh7U1caojg1Jj/XKdPcktGBQvAmiNa9nybmTjiOqq/b1svr
W1Yn23WvkEBL7peFUZVAgJDvxcf42LtA72RdgzegFdrP0y4z6UJlJRnxAckxfa/o
b05N3nrK2yteZQVuMBVB2P7QAgy62aIqJBacWrLplaZMJZZYQJ9ialXZXAMPIN3P
5l9emMOJjBbXv76G6B/Ik9J6Ryv8SFhQbLzWu1eXjT8g3d5KlD/lvh6fwN/HjYOb
6o5LvMD61vpOaR0B8Ta/+vu4R+GiLB+ArhS9WQIDAQABAoIBAAUWLGqKfMxp902+
ZkK8XpJugP5hj4mjkxjLnf6WeW3mI8cE1FyFrNjOBXW2txbbKf29bzdzhFXDcF5W
Opnz0EBhAiNjax0TuEpzEHnoLo1xlR24n534V4D1RmNRyKejeOvuHYc6PYG++VFp
TP7sdSH8SEVJMy4ifWcLOuYEDqglL1uSPQgO8HkmlvOvgI1LnLx9wjeNC1D7weZu
Eh75GTGRLL8i0X0bLmaNZ1Fs3Ge2tLNS0hfu6epCiT3ZAQTBVyFbVmN6btnQ/BHM
nDSZQ2JEDjQByCiBch9hTk/V2UNmn5dOUGPTwp2IP5Blpq2X7u1IoXQiAhI+zVcN
9mqbK6ECgYEA9hbQF7iEP4RhClNUVmQJd1zQjC2D5Vj0ik68MdgcT1QFrrCGaRPb
eayCxyDoNyUAWGtqToTQ5v3b1dxwsJryMekHD0OL75fl1KbD0bRiawVG81QbyC3U
I05Lr5LCdm80xdktC6caIkpoRF6e9xhAQduXDHZyQ6TdJtRHS6E3uPsCgYEAy4N6
xFml63vk2qFPyMkSfp82ey6YiGchXxZSDl/tDiDDHgDVRtYi5+7iCNVrjkwtUXI3
zK+G+m74AIx1C9ZSW81y5ymxKLGz1+OUy3Vtp0Zf5R1/Q+l9I4sl1dkB2wJcb2Ls
2A3yl9NEt5M3bHZUQk4ttmhfqOFiSmNd/uFbersCgYEAvlAYMDAPfnum/HBDKeiF
dZz+31mXxjeMLqYDXtzNz/+fwWBdIkgsFKX4IX1ueK8R3E990Clg0TMD3xlywPGj
WjvnrMNFJk6nfFRX3gaNkkxreWTTc3UVuRQk7iwmXadU/akd8AQT7u7yQeWXNGq5
zvS+lPHJHk0ShqPmWzPbvx8CgYBAiB9slXSsN+v4e4AeDcwkhH21D7BkSDdnvF8m
mbpEaZUVNXRrcsk8vB3GaU4in/sawVn6OIpXbMqM+fy/VSVmYL4XmLvJSJfbVBnB
binoCcOsle7d7PK2S5AiwB37gUMoOrkZRUrwY5h9kVvYs6jCIaITHgN/PIB7UAjl
IjZsswKBgC9AgnXvw4M1bcS1SK1WdJXACrmfX5tGMLzCEVJgmJtiWobvpNsfcZ/Q
EanIrYxnJ1zWZstefEuLWGzja+xwL/rsnTl77DPuvZRr/QxXMRaPFn5VTnH5kh0y
9FlihAmgG1n2k3CCFNtdxAKBqPGLz2wUuRDHYhF4WKYuvghIpQA7
-----END RSA PRIVATE KEY-----",00:d1:2c:a2:ed:58
pxe_ssh,192.168.122.1,root,"-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAw6J6supEV40toLTiH6Taj8k6bI2CSJSK31spMfLIKzPuvzCV
PGZdhKMx1o++u9TcFFh7U1caojg1Jj/XKdPcktGBQvAmiNa9nybmTjiOqq/b1svr
W1Yn23WvkEBL7peFUZVAgJDvxcf42LtA72RdgzegFdrP0y4z6UJlJRnxAckxfa/o
b05N3nrK2yteZQVuMBVB2P7QAgy62aIqJBacWrLplaZMJZZYQJ9ialXZXAMPIN3P
5l9emMOJjBbXv76G6B/Ik9J6Ryv8SFhQbLzWu1eXjT8g3d5KlD/lvh6fwN/HjYOb
6o5LvMD61vpOaR0B8Ta/+vu4R+GiLB+ArhS9WQIDAQABAoIBAAUWLGqKfMxp902+
ZkK8XpJugP5hj4mjkxjLnf6WeW3mI8cE1FyFrNjOBXW2txbbKf29bzdzhFXDcF5W
Opnz0EBhAiNjax0TuEpzEHnoLo1xlR24n534V4D1RmNRyKejeOvuHYc6PYG++VFp
TP7sdSH8SEVJMy4ifWcLOuYEDqglL1uSPQgO8HkmlvOvgI1LnLx9wjeNC1D7weZu
Eh75GTGRLL8i0X0bLmaNZ1Fs3Ge2tLNS0hfu6epCiT3ZAQTBVyFbVmN6btnQ/BHM
nDSZQ2JEDjQByCiBch9hTk/V2UNmn5dOUGPTwp2IP5Blpq2X7u1IoXQiAhI+zVcN
9mqbK6ECgYEA9hbQF7iEP4RhClNUVmQJd1zQjC2D5Vj0ik68MdgcT1QFrrCGaRPb
eayCxyDoNyUAWGtqToTQ5v3b1dxwsJryMekHD0OL75fl1KbD0bRiawVG81QbyC3U
I05Lr5LCdm80xdktC6caIkpoRF6e9xhAQduXDHZyQ6TdJtRHS6E3uPsCgYEAy4N6
xFml63vk2qFPyMkSfp82ey6YiGchXxZSDl/tDiDDHgDVRtYi5+7iCNVrjkwtUXI3
zK+G+m74AIx1C9ZSW81y5ymxKLGz1+OUy3Vtp0Zf5R1/Q+l9I4sl1dkB2wJcb2Ls
2A3yl9NEt5M3bHZUQk4ttmhfqOFiSmNd/uFbersCgYEAvlAYMDAPfnum/HBDKeiF
dZz+31mXxjeMLqYDXtzNz/+fwWBdIkgsFKX4IX1ueK8R3E990Clg0TMD3xlywPGj
WjvnrMNFJk6nfFRX3gaNkkxreWTTc3UVuRQk7iwmXadU/akd8AQT7u7yQeWXNGq5
zvS+lPHJHk0ShqPmWzPbvx8CgYBAiB9slXSsN+v4e4AeDcwkhH21D7BkSDdnvF8m
mbpEaZUVNXRrcsk8vB3GaU4in/sawVn6OIpXbMqM+fy/VSVmYL4XmLvJSJfbVBnB
binoCcOsle7d7PK2S5AiwB37gUMoOrkZRUrwY5h9kVvYs6jCIaITHgN/PIB7UAjl
IjZsswKBgC9AgnXvw4M1bcS1SK1WdJXACrmfX5tGMLzCEVJgmJtiWobvpNsfcZ/Q
EanIrYxnJ1zWZstefEuLWGzja+xwL/rsnTl77DPuvZRr/QxXMRaPFn5VTnH5kh0y
9FlihAmgG1n2k3CCFNtdxAKBqPGLz2wUuRDHYhF4WKYuvghIpQA7
Introspect Nodes
----------------
When registering nodes as described above, if you leave out any of the hardware properties for any of the nodes,
introspection will be run on the nodes to obtain these missing properties, as soon as you click the *Register Nodes*
button. In this case, the nodes will be located in the *Maintenance* tab and will have the status *Discovering*. After
the introspection process has finished (this can take up to 5 minutes for VM / 15 minutes for baremetal), the hardware
properties will get populated and the nodes will have the status *Discovered*. At this point, you can move the nodes
to the *Free* tab, by selecting them using the checkbox on the left side and clicking the *Activate Nodes* button. Now
the nodes are ready and available for deployment.
Create Flavors
--------------
To create the necessary flavor, navigate to the *Flavors* page. One suggested flavor, matching the hardware properties
of the created nodes, will be available. To create it, open the dropdown menu under *Actions*, click *Edit before creating*,
change the name to 'baremetal' and click the *Create Flavors* button.
Configure Roles
---------------
To configure deployment roles, navigate to the *Deployment Roles* page. *Flavor* and *Image* needs to be set to all the
deployment roles. For each of the deployment roles, click the *edit* button and set the *Flavor* to 'baremetal' and
*Image* to 'overcloud-full'. Save the form.
Service Configuration
---------------------
To perform the necessary service configuration, navigate to the *Service Configuration* page and click the
*Simplified Configuration* button. In the *Service Configuration* form, make sure that the values of the *Deployment Type*
and *Public Interface* fields are correct. Also make sure you set the *SNMP Password* and the *Cloud name*.
Deploy the Overcloud
--------------------
To deploy the overcloud, navigate to the *Overview* page. The deployment plan validation will be performed and if the
plan is valid, the *Verify and Deploy* button will be enabled. Click this button to open the deployment confirmation
dialog. In case you want to enable network isolation, check the *Enable Network Isolation* box. Click *Deploy*.
This will trigger the creation of the overcloud heat stack. The page will reload and you will be able to monitor the
current status of the deployment. On the right side you will see the progress bar as well as the last event from
the Heat event list. If you want to see the full event list, you can navigate to the *Deployment Log* page.
Initialize the Overcloud
------------------------
Once the deployment has successfully completed, you need to perform the initialization of Keystone and Neutron in the
overcloud. To do this, click the *Initialize* button, fill out the form and click *Initialize*. Once the initialization has
completed, the page will reload and you will see deployment details on the *Overview* page. On the left side the
information about roles and node counts will be displayed, along with the system load charts for each deployment role.
On the right side, the access information for the overcloud Horizon will be displayed.
Post-Deployment
---------------
Access the Overcloud
^^^^^^^^^^^^^^^^^^^^
When the overcloud is deployed, the access information needed to to log into the overcloud Horizon is located on
the *Overview* page.
Redeploy the Overcloud
^^^^^^^^^^^^^^^^^^^^^^
The overcloud can be redeployed when desired. First, you have to delete the existing overcloud by clicking the
*Undeploy* button on the *Overview* page. This will trigger the deletion of the Heat stack. After the overcloud has been
deleted, the *Overview* page will again display the deployment checklist along with the *Verify and Deploy* button. If you
wish to deploy the overcloud again, repeat the steps from the *Deploy the Overcloud* section on this page.

View File

@ -1,49 +1,125 @@
# -*- coding: utf-8 -*-
#
# instack-undercloud documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 25 10:56:57 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import sys, os
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('./'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'oslosphinx',
'oslosphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TripleO'
copyright = u'2013, OpenStack Foundation'
copyright = u'2015, OpenStack Foundation'
bug_tracker = u'Launchpad'
bug_tracker_url = u'https://launchpad.net/tripleo'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.0.0'
# The full version, including alpha/beta/rc tags.
release = '3.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
html_static_path = ['_custom']
# html_style = 'custom.css'
templates_path = ['_templates']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
rst_prolog = """
.. |project| replace:: %s
.. |bug_tracker| replace:: %s
.. |bug_tracker_url| replace:: %s
""" % (project, bug_tracker, bug_tracker_url)

View File

@ -0,0 +1,64 @@
How to Contribute
=================
|project| source code is available. You can contribute code to individual
projects, documentation, report bugs and vulnerabilities, request features.
Contributing Code
-----------------
As long as |project| is a set of integrated OpenStack projects, all
development is happening in OpenStack upstream.
Learn `how to contribute into OpenStack's upstream <https://wiki.openstack.org/
wiki/How_To_Contribute>`_.
See :doc:`../introduction/components` to find out how to contribute into
individual projects.
Contributing to this Documentation
-----------------------------------
|project| User Documentation lives on Github under |project|
organization.
Learn `how to contribute into |project| Docs
<https://git.openstack.org/openstack/tripleo-docs>`_.
Reporting Bugs
--------------
**OpenStack Upstream**: If you find bugs or volnurabilities which affect
upstream projects, please follow OpenStack's process of filing bugs.
* Learn `how to create Bugs in OpenStack
<https://wiki.openstack.org/wiki/Bugs>`_.
* If you want to file a bug against upstream project, you can find useful links
in our list of :doc:`../introduction/components`.
**|project| If the bug impacts the |project| project as a whole, you can file a
bug in |bug_tracker|:
#. Go to |bug_tracker_url|
#. Fill in needed information (If you filed also upstream bug, please provide
its URL in advanced fields)
#. Submit bug
Requesting Features
-------------------
**OpenStack Upstream**: Since we are developing projects in OpenStack community,
all the features are being requested upstream via Blueprints.
* Learn `how to create Blueprints in OpenStack
<https://wiki.openstack.org/wiki/Blueprints>`_.
* If you want to file a bug against upstream project, you can find useful links
in our list of :doc:`../introduction/components`.

View File

@ -0,0 +1,200 @@
Baremetal Environment
=====================
|project| can be used in an all baremetal environment. One machine will be
used for Undercloud, the others will be used for your Overcloud.
Minimum System Requirements
---------------------------
To deploy a minimal TripleO cloud with |project| you need the following baremetal
machines:
* 1 Undercloud
* 1 Overcloud Controller
* 1 Overcloud Compute
For each additional Overcloud role, such as Block Storage or Object Storage,
you need an additional baremetal machine.
..
<REMOVE WHEN HA IS AVAILABLE>
For minimal **HA (high availability)** deployment you need at least 3 Overcloud
Controller machines and 2 Overcloud Compute machines.
The baremetal machines must meet the following minimum specifications:
* multi-core CPU
* 4 GB memory
* 60 GB free disk space
TripleO is supporting only the following operating systems:
* RHEL 7.1 x86_64 or
* CentOS 7 x86_64
Preparing the Baremetal Environment
-----------------------------------
Networking
^^^^^^^^^^
The overcloud nodes will be deployed from the undercloud machine and therefore the machines need to have have their network settings modified to allow for the overcloud nodes to be PXE boot'ed using the undercloud machine. As such, the setup requires that:
* All overcloud machines in the setup must support IPMI
* A management provisioning network is setup for all of the overcloud machines.
One NIC from every machine needs to be in the same broadcast domain of the
provisioning network. In the tested environment, this required setting up a new
VLAN on the switch. Note that you should use the same NIC on each of the
overcloud machines ( for example: use the second NIC on each overcloud
machine). This is because during installation we will need to refer to that NIC
using a single name across all overcloud machines e.g. em2
* The provisioning network NIC should not be the same NIC that you are using
for remote connectivity to the undercloud machine. During the undercloud
installation, a openvswitch bridge will be created for Neutron and the
provisioning NIC will be bridged to the openvswitch bridge. As such,
connectivity would be lost if the provisioning NIC was also used for remote
connectivity to the undercloud machine.
* The overcloud machines can PXE boot off the NIC that is on the private VLAN.
In the tested environment, this required disabling network booting in the BIOS
for all NICs other than the one we wanted to boot and then ensuring that the
chosen NIC is at the top of the boot order (ahead of the local hard disk drive
and CD/DVD drives).
* For each overcloud machine you have: the MAC address of the NIC that will PXE
boot on the provisioning network the IPMI information for the machine (i.e. IP
address of the IPMI NIC, IPMI username and password)
Refer to the following diagram for more information
.. image:: ../_images/TripleO_Network_Diagram_.jpg
Setting Up The Undercloud Machine
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#. Select a machine within the baremetal environment on which to install the
undercloud.
#. Install RHEL 7.1 x86_64 or CentOS 7 x86_64 on this machine.
#. If needed, create a non-root user with sudo access to use for installing the
Undercloud::
sudo useradd stack
sudo passwd stack # specify a password
echo "stack ALL=(root) NOPASSWD:ALL" | sudo tee -a /etc/sudoers.d/stack
sudo chmod 0440 /etc/sudoers.d/stack
.. admonition:: RHEL
:class: rhel
If using RHEL, register the Undercloud for package installations/updates.
.. admonition:: RHEL Portal Registration
:class: portal
Register the host machine using Subscription Management::
sudo subscription-manager register --username="[your username]" --password="[your password]"
# Find this with `subscription-manager list --available`
sudo subscription-manager attach --pool="[pool id]"
# Verify repositories are available
sudo subscription-manager repos --list
# Enable repositories needed
sudo subscription-manager repos --enable=rhel-7-server-rpms \
--enable=rhel-7-server-optional-rpms --enable=rhel-7-server-extras-rpms \
--enable=rhel-7-server-openstack-6.0-rpms
.. admonition:: RHEL Satellite Registration
:class: satellite
To register the host machine to a Satellite, the following repos must
be synchronized on the Satellite and enabled for registered systems::
rhel-7-server-rpms
rhel-7-server-optional-rpms
rhel-7-server-extras-rpms
rhel-7-server-openstack-6.0-rpms
See the `Red Hat Satellite User Guide`_ for how to configure the system to
register with a Satellite server. It is suggested to use an activation
key that automatically enables the above repos for registered systems.
.. _Red Hat Satellite User Guide: https://access.redhat.com/documentation/en-US/Red_Hat_Satellite/
Configuration Files
-------------------
instackenv.json
^^^^^^^^^^^^^^^
Create a JSON file describing your baremetal nodes, call it
``instackenv.json`` and place in your home directory. The file should contain
a JSON object with the only field ``nodes`` containing list of node
descriptions.
Each node description should contains required fields:
* ``pm_type`` - ``pxe_ipmitool`` for bare metal, ``pxe_ssh`` for virtual
environment
* ``pm_addr`` - node BMC IP address
* ``pm_user``, ``pm_password`` - node BMC credentials
Some fields are optional if you're going to use introspection later:
* ``mac`` - list of MAC addresses, optional for bare metal
* ``cpu`` - number of CPU's in system
* ``arch`` - CPU architecture (common values are ``i386`` and ``x86_64``)
* ``memory`` - memory size in MiB
* ``disk`` - hard driver size in GiB
For example::
{
"nodes": [
{
"pm_type":"pxe_ipmitool",
"mac":[
"fa:16:3e:2a:0e:36"
],
"cpu":"2",
"memory":"4096",
"disk":"40",
"arch":"x86_64",
"pm_user":"admin",
"pm_password":"password",
"pm_addr":"10.0.0.8"
},
{
"pm_type":"pxe_ipmitool",
"mac":[
"fa:16:3e:da:39:c9"
],
"cpu":"2",
"memory":"4096",
"disk":"40",
"arch":"x86_64",
"pm_user":"admin",
"pm_password":"password",
"pm_addr":"10.0.0.15"
},
{
"pm_type":"pxe_ipmitool",
"mac":[
"fa:16:3e:51:9b:68"
],
"cpu":"2",
"memory":"4096",
"disk":"40",
"arch":"x86_64",
"pm_user":"admin",
"pm_password":"password",
"pm_addr":"10.0.0.16"
}
]
}

View File

@ -0,0 +1,10 @@
Environment Setup
=================
|project| can be used in baremetal as well as in virtual environments. This
section contains instructions on how to setup your environments properly.
.. toctree::
Virtual Environment <virtual>
Baremetal Environment <baremetal>

View File

@ -0,0 +1,256 @@
Virtual Environment
===================
|project| can be used in a virtual environment using virtual machines instead
of actual baremetal. However, one baremetal machine is still
needed to act as the host for the virtual machines.
Minimum System Requirements
---------------------------
By default, this setup creates 3 virtual machines:
* 1 Undercloud
* 1 Overcloud Controller
* 1 Overcloud Compute
Each virtual machine must consist of at least 4 GB of memory and 40 GB of disk
space [#]_.
.. note::
The virtual machine disk files are thinly provisioned and will not take up
the full 40GB initially.
The baremetal machine must meet the following minimum system requirements:
* Virtualization hardware extenstions enabled (nested KVM is **not** supported)
* 1 quad core CPU
* 12 GB free memory
* 120 GB disk space
..
<REMOVE WHEN HA IS AVAILABLE>
For minimal **HA (high availability)** deployment you need at least 3 Overcloud
Controllers and 2 Overcloud Computes which increases the minimum system
requirements up to:
* 24 GB free memory
* 240 GB disk space.
|project| currently supports the following operating systems:
* RHEL 7.1 x86_64 or
* CentOS 7 x86_64
.. _preparing_virtual_environment:
Preparing the Virtual Environment (Automated)
---------------------------------------------
#. Install RHEL 7.1 Server x86_64 or CentOS 7 x86_64 on your host machine.
.. admonition:: RHEL Portal Registration
:class: portal
Register the host machine using Subscription Management.::
sudo subscription-manager register --username="[your username]" --password="[your password]"
# Find this with `subscription-manager list --available`
sudo subscription-manager attach --pool="[pool id]"
# Verify repositories are available
sudo subscription-manager repos --list
# Enable repositories needed
sudo subscription-manager repos --enable=rhel-7-server-rpms \
--enable=rhel-7-server-optional-rpms --enable=rhel-7-server-extras-rpms \
--enable=rhel-7-server-openstack-6.0-rpms
.. admonition:: RHEL Satellite Registration
:class: satellite
To register the host machine to a Satellite, the following repos must
be synchronized on the Satellite::
rhel-7-server-rpms
rhel-7-server-optional-rpms
rhel-7-server-extras-rpms
rhel-7-server-openstack-6.0-rpms
See the `Red Hat Satellite User Guide`_ for how to configure the system to
register with a Satellite server. It is suggested to use an activation
key that automatically enables the above repos for registered systems.
#. Make sure sshd service is installed and running.
#. The user performing all of the installation steps on the virt host needs to
have sudo enabled. You can use an existing user or use the following commands
to create a new user called stack with password-less sudo enabled. Do not run
the rest of the steps in this guide as root.
* Example commands to create a user::
sudo useradd stack
sudo passwd stack # specify a password
echo "stack ALL=(root) NOPASSWD:ALL" | sudo tee -a /etc/sudoers.d/stack
sudo chmod 0440 /etc/sudoers.d/stack
#. Make sure you are logged in as the non-root user you intend to use.
* Example commands to log in as the non-root user::
su - stack
#. Enable needed repositories:
::
# Enable RDO Kilo
sudo yum install -y https://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm
# Enable RDO Trunk
sudo curl http://trunk.rdoproject.org/centos7/38/1c/381cac9139096bfef49952f3fd67e19451160b61_4bc2d731/delorean.repo -O /etc/yum.repos.d/delorean.repo
The above Delorean repository is updated after a successful CI run. The following repo can be used instead if the newest packages are needed before a CI run has passed.
::
sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current/delorean.repo
#. Install instack-undercloud::
sudo yum install -y instack-undercloud
#. The virt setup automatically sets up a vm for the Undercloud installed with
the same base OS as the host. See the Note below to choose a different
OS.:
.. note::
To setup the undercloud vm with a base OS different from the host,
set the ``$NODE_DIST`` environment variable prior to running
``instack-virt-setup``:
.. admonition:: CentOS
:class: centos
::
export NODE_DIST=centos7
.. admonition:: RHEL
:class: rhel
::
export NODE_DIST=rhel7
#. Run the script to setup your virtual environment:
.. admonition:: RHEL
:class: rhel
Download the RHEL 7.1 cloud image or copy it over from a different
location, for example: https://access.redhat.com/downloads/content/69/ver=/rhel---7/7.1/x86_64/product-downloads,
and define the needed environment variables for RHEL 7.1 prior to
running ``instack-virt-setup``::
export DIB_LOCAL_IMAGE=rhel-guest-image-7.1-20150224.0.x86_64.qcow2
.. admonition:: RHEL Portal Registration
:class: portal
To register the Undercloud vm to the Red Hat Portal define the following
variables::
export REG_METHOD=portal
export REG_USER="[your username]"
export REG_PASSWORD="[your password]"
# Find this with `sudo subscription-manager list --available`
export REG_POOL_ID="[pool id]"
export REG_REPOS="rhel-7-server-rpms rhel-7-server-extras-rpms rhel-ha-for-rhel-7-server-rpms \
rhel-7-server-optional-rpms rhel-7-server-openstack-6.0-rpms"
.. admonition:: RHEL Satellite Registration
:class: satellite
To register the Undercloud vm to a Satellite define the following
variables. Only using an activation key is supported when registering
to Satellite, username/password is not supported for security reasons.
The activation key must enable the repos shown::
export REG_METHOD=satellite
# REG_SAT_URL should be in the format of:
# http://<satellite-hostname>
export REG_SAT_URL="[satellite url]"
export REG_ORG="[satellite org]"
# Activation key must enable these repos:
# rhel-7-server-rpms
# rhel-7-server-optional-rpms
# rhel-7-server-extras-rpms
# rhel-7-server-openstack-6.0-rpms
export REG_ACTIVATION_KEY="[activation key]"
.. admonition:: Ceph
:class: ceph
To use Ceph you will need at least one additional virtual machine to be
provisioned as a Ceph OSD; set the ``NODE_COUNT`` variable to 3, from a
default of 2, so that the overcloud will have exactly one more::
export NODE_COUNT=3
.. note::
The ``TESTENV_ARGS`` environment variable can be used to customize the
virtual environment configuration. For example, it could be used to
enable additional networks as follows::
export TESTENV_ARGS="--baremetal-bridge-names 'brbm brbm1 brbm2'"
::
instack-virt-setup
If the script encounters problems, see
:doc:`../troubleshooting/troubleshooting-virt-setup`.
When the script has completed successfully it will output the IP address of the
instack vm that has now been installed with a base OS.
Running ``sudo virsh list --all`` [#]_ will show you now have one virtual machine called
*instack* and 2 called *baremetal[0-1]*.
You can ssh to the instack vm as the root user::
ssh root@<instack-vm-ip>
The vm contains a ``stack`` user to be used for installing the undercloud. You
can ``su - stack`` to switch to the stack user account.
Continue with :doc:`../installation/installing`.
.. rubric:: Footnotes
.. [#] Note that some default partitioning scheme will most likely not provide
enough space to the partition containing the default path for libvirt image
storage (/var/lib/libvirt/images). The easiest fix is to customize the
partition layout at the time of install to provide at least 200 GB of space for
that path.
.. [#] The libvirt virtual machines have been defined under the system
instance (qemu:///system). The user account executing these instructions
gets added to the libvirtd group which grants passwordless access to
the system instance. It does however require logging into a new shell (or
desktop environment session if wanting to use virt-manager) before this
change will be fully applied. To avoid having to re-login, you can use
``sudo virsh``.
.. _Red Hat Satellite User Guide: https://access.redhat.com/documentation/en-US/Red_Hat_Satellite/

View File

@ -0,0 +1,5 @@
TripleO is a project aimed at installing, upgrading and operating OpenStack
clouds using OpenStack's own cloud facilities as the foundations - building on
Nova, Ironic, Neutron and Heat to automate cloud management at datacenter
scale.

View File

@ -1,9 +1,65 @@
TripleO Documentation
=====================
Welcome to |project| documentation
====================================
.. include:: index-introduction.rst
Contents:
Getting Started
---------------
.. toctree::
:maxdepth: 1
:maxdepth: 2
introduction
Introduction <introduction/introduction>
Environment Setup <environments/environments>
Undercloud Installation <installation/installation>
Basic Deployment (CLI) <basic_deployment/basic_deployment_cli>
Basic Deployment (GUI) <basic_deployment/basic_deployment_gui>
Advanced Deployment <advanced_deployment/advanced_deployment>
Post Deployment <post_deployment/post_deployment>
Troubleshooting <troubleshooting/troubleshooting>
How to Contribute <contributions/contributions>
Documentation Conventions
=========================
Some steps in the following instructions only apply to certain environments,
such as deployments to real baremetal and deployments using RHEL. These
steps are marked as follows:
.. admonition:: RHEL
:class: rhel
Step that should only be run when using RHEL
.. admonition:: RHEL Portal Registration
:class: portal
Step that should only be run when using RHEL Portal Registration
.. admonition:: RHEL Satellite Registration
:class: satellite
Step that should only be run when using RHEL Satellite Registration
.. admonition:: CentOS
:class: centos
Step that should only be run when using CentOS
.. admonition:: Baremetal
:class: baremetal
Step that should only be run when deploying to baremetal
.. admonition:: Virtual
:class: virtual
Step that should only be run when deploying to virtual machines
.. admonition:: Ceph
:class: ceph
Step that should only be run when deploying Ceph for use by the Overcloud
Any such steps should *not* be run if the target environment does not match
the section marking.

View File

@ -0,0 +1,30 @@
Accessing the GUI
=================
Part of the Undercloud installation is also Tuskar-UI which you can use to drive
the deployment. It runs on the instack virtual machine on ``http://localhost/dashboard``
Example of how to access Tuskar-UI:
-----------------------------------
Considering that Tuskar-UI runs in a instack VM and virt host is a remote host
machine, to access the UI in the browser, follow these steps:
#. On host machine create ssh tunnel from instack vm to virt host::
ssh -g -N -L 8080:127.0.0.1:80 root@<undercloud_vm_ip>
#. On instack VM edit ``/etc/openstack-dashboard/local_settings`` and add virt host ``hostname`` to ``ALLOWED_HOSTS`` array
#. Restart Apache::
systemctl restart httpd
#. Allow port ``8080`` on host machine::
sudo iptables -I INPUT -p tcp -m tcp --dport 8080 -j ACCEPT
#. Navigate to ``http://<virt_host_hostname>:8080/dashboard`` in the browser
When logging into the dashboard the default user and password are found in the ``/root/stackrc`` file on the instack virtual machine, ``OS_USERNAME`` and ``OS_PASSWORD``.

View File

@ -0,0 +1,10 @@
Undercloud Installation
=======================
This section contains instructions on how to install the undercloud and how to
update components after installation.
.. toctree::
Installing the Undercloud <installing>
Updating Undercloud Components <updating>

View File

@ -0,0 +1,99 @@
Installing the Undercloud
==========================
#. Log in to your machine (baremetal or VM) where you want to install the
undercloud as a non-root user (such as the stack user)::
ssh <non-root-user>@<undercloud-machine>
.. note::
If you don't have a non-root user created yet, log in as root and create
one with following commands::
sudo useradd stack
sudo passwd stack # specify a password
echo "stack ALL=(root) NOPASSWD:ALL" | sudo tee -a /etc/sudoers.d/stack
sudo chmod 0440 /etc/sudoers.d/stack
su - stack
.. note::
The undercloud is intended to work correctly with SELinux enforcing, and
cannot be installed to a system with SELinux disabled. If SELinux
enforcement must be turned off for some reason, it should instead be set
to permissive.
.. admonition:: Baremetal
:class: baremetal
Ensure that there is a FQDN hostname set and that the $HOSTNAME environment
variable matches that value.
Use ``hostnamectl`` to set a hostname if needed::
sudo hostnamectl set-hostname myhost.mydomain
sudo hostnamectl set-hostname --transient myhost.mydomain
An entry for the system's FQDN hostname is also needed in /etc/hosts. For
example, if the system is named *myhost.mydomain*, /etc/hosts should have
an entry like::
127.0.0.1 myhost.mydomain
#. Enable needed repositories:
.. admonition:: RHEL
:class: rhel
Enable optional repo::
sudo yum install -y yum-utils
sudo yum-config-manager --enable rhelosp-rhel-7-server-opt
::
# Enable RDO Kilo
sudo yum install -y https://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm
# Enable RDO Trunk
sudo curl http://trunk.rdoproject.org/centos7/38/1c/381cac9139096bfef49952f3fd67e19451160b61_4bc2d731/delorean.repo -O /etc/yum.repos.d/delorean.repo
The above Delorean repository is updated after a successful CI run. The following repo can be used instead if the newest packages are needed before a CI run has passed.
::
sudo curl -o /etc/yum.repos.d/rdo-management-trunk.repo http://trunk-mgt.rdoproject.org/centos-kilo/current/delorean-rdo-management.repo
#. Install the TripleO CLI, which will pull in all other necessary packages as dependencies::
sudo yum install -y python-rdomanager-oscplugin
#. Run the script to install the undercloud:
.. admonition:: Baremetal
:class: baremetal
Copy in the sample configuration file and edit it to reflect your environment::
cp /usr/share/instack-undercloud/undercloud.conf.sample ~/undercloud.conf
Install the undercloud::
openstack undercloud install
Once the install has completed, you should take note of the files ``stackrc`` and
``undercloud-passwords.conf``. You can source ``stackrc`` to interact with the
undercloud via the OpenStack command-line client. ``undercloud-passwords.conf``
contains the passwords used for each service in the undercloud. These passwords
will be automatically reused if the undercloud is reinstalled on the same system,
so it is not necessary to copy them to ``undercloud.conf``.
.. note::
Any passwords set in ``undercloud.conf`` will take precedence over the ones in
``undercloud-passwords.conf``.

View File

@ -0,0 +1,19 @@
Updating Undercloud Components
------------------------------
You can upgrade any packages that are installed on the undercloud machine.
#. Update the Delorean Trunk repository::
# Remove old and enable new Delorean Trunk repository
sudo rm /etc/yum.repos.d/delorean.repo
sudo curl -o /etc/yum.repos.d/delorean.repo http://trunk.rdoproject.org/centos7/current-passed-ci/delorean.repo
#. Use yum to update all installed packages::
sudo yum update -y
# You can specify the package names to update as options in the yum update command.
You do not need to restart any services after you update.

View File

@ -0,0 +1,422 @@
|project| Architecture
========================
This document lists the main components of |project|, and gives some
description of how each component is used. There are links to additional sources
of information throughout the document.
.. contents::
:depth: 3
:backlinks: none
Architecture Overview
---------------------
|project| is a community developed approach and set of tools for deploying,
and managing an OpenStack cloud.
Some concepts, particularly the role matching and Automated Health checking,
are inspired by `SpinalStack
<http://spinal-stack.readthedocs.org/en/latest/>`_.
TripleO
^^^^^^^
TripleO is the friendly name for “OpenStack on OpenStack”. It is an official
OpenStack project with the goal of allowing you to deploy and manage a
production cloud onto bare metal hardware using a subset of existing OpenStack
components.
.. image:: ../_images/overview.png
With TripleO, you start by creating an “undercloud” (a deployment cloud)
that will contain the necessary OpenStack components to deploy and manage an
“overcloud” (a workload cloud). The overcloud is the deployed solution
and can represent a cloud for any purpose (e.g. production, staging, test, etc).
.. image:: ../_images/logical_view.png
TripleO leverages several existing core components of OpenStack including Nova,
Ironic, Neutron, Heat, Glance and Ceilometer to deploy OpenStack on baremetal
hardware. Nova and Ironic are used in the undercloud to manage baremetal
instances that comprise the infrastructure for the overcloud. Neutron is
utilized to provide a networking environment in which to deploy the overcloud,
machine images are stored in Glance, and Ceilometer collects metrics about your
overcloud.
The following diagram illustrates a physical view of how the undercloud may be
hosted on one physical server and the overcloud distributed across many physical
servers.
.. image:: ../_images/physical_view.png
SpinalStack's Inspiration
^^^^^^^^^^^^^^^^^^^^^^^^^
Some key aspects of SpinalStack workflow have been incorporated into
|project|, providing options to perform introspection, benchmarking and role
matching of your hardware prior to deploying OpenStack.
Hardware introspection features enable you to collect data about the properties
of your hardware prior to deployment, such that specific classes of hardware may
be matched to specific roles (e.g a special hardware configuration for Compute
or Storage roles). There is also the option to enable performance benchmarking
during this phase, such that outliers which do not match the expected
performance profile may be excluded from the deployment.
|project| also configures servers in a similar way to SpinalStack, using
stable community puppet implementations, applied in a series of steps, such
that granular control and validation of the deployment is possible
Benefits
--------
Using |project|s combination of OpenStack components, and their APIs, as the
infrastructure to deploy and operate OpenStack itself delivers several benefits:
* |project|s APIs are the OpenStack APIs. Theyre well maintained, well
documented, and come with client libraries and command line tools. Users who
invest time in learning about |project|s APIs are also learning about
OpenStack itself, and users who are already familiar with OpenStack will find
a great deal in |project| that they already understand.
* Using the OpenStack components allows more rapid feature development of
|project| than might otherwise be the case; |project| automatically
inherits all the new features which are added to Glance, Heat etc., even when
the developer of the new feature didnt explicitly have |project| in mind.
* The same applies to bug fixes and security updates. When OpenStack developers
fix bugs in the common components, those fixes are inherited by |project|.
* Users can invest time in integrating their own scripts and utilities with
|project|s APIs with some confidence. Those APIs are cooperatively
maintained and developed by the OpenStack community. Theyre not at risk of
being suddenly changed or retired by a single controlling vendor.
* For developers, tight integration with the openstack APIs provides a solid
architecture, which has gone through extensive community review.
It should be noted that not everything in |project| is a reused OpenStack
element. The Tuskar API, for example (which lets users design the workload cloud
that they want to deploy), is found in |project| but not, so far at least, in
a typical Openstack instance. The Tuskar API is described in more detail below.
Deployment Workflow Overview
----------------------------
#. Environment Preparation
* Prepare your environemnt (baremetal or virtual)
* Install undercloud
#. Undercloud Data Preparation
* Create images to establish the overcloud
* Register hardware nodes with undercloud
* Introspect hardware
* Create flavors (node profiles)
#. Deployment Planning
* Configure overcloud roles
* Assign flavor (node profile to match desired hardware specs)
* Assign image (provisioning image)
* Size the role (how many instances to deploy)
* Configure service parameters
* Create a Heat template describing the overcloud (auto-generated from above)
#. Deployment
* Use Heat to deploy your template
* Heat will use Nova to identify and reserve the appropriate nodes
* Nova will use Ironic to startup nodes and install the correct images
#. Per-node Setup
* When each node of the overcloud starts it will gather its configuration
metadata from Heat Template configuration files
* Hiera files are distributed across all nodes and Heat applies puppet
manifests to configure the services on the nodes
* Puppet runs in multiple steps, so that after each step there can be test
triggered to check progress of the deployment and allow easier debugging.
#. Overcloud Initialization
* Services on nodes of the overcloud are registered with Keystone
Deployment Workflow Detail
--------------------------
Environment Preparation
^^^^^^^^^^^^^^^^^^^^^^^
In the first place, you need to check that your environment is ready.
|project| can deploy OpenStack into baremetal as well as virtual environments.
You need to make sure that your environment satisfies minimum requirements for
given environemnt type and that networking is correctly set up.
Next step is to install the undercloud. We install undercloud using `Instack
<https://github.com/rdo-management/instack-undercloud>`_'s script and it calls
puppet scripts in the background. Upstream TripleO developers also use the
developer-based steps known as `devtest <http://docs.openstack.org/developer/
tripleo-incubator/devtest.html>`_.
Undercloud Data Preparation
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Images
""""""
Before deploying the overcloud, you must first download or build images which
will be installed on each of the nodes of the overcloud. TripleO uses
`diskimage-builder <https://github.com/openstack/diskimage-builder>`_ for
building these so called "Golden Images". The diskimage-builder tool takes a
base image e.g. `CentOS 7 <http://cloud.centos.org/centos/7/images/
CentOS-7-x86_64-GenericCloud.qcow2>`_ and then layers additional software via
configuration scripts (called elements) on top of that. The final result is a
qcow2 formatted image with software installed but not configured.
While the diskimage-builder repository provides operating-system specific
elements, ones specific to OpenStack, e.g. nova-api, are found in
`tripleo-image-elements <https://github.com/openstack/tripleo-image-elements>`_.
You can add different elements to an image to provide specific applications and
services. Once all the images required to deploy the overcloud are built, they
are stored in Glance running on the undercloud.
Nodes
"""""
Deploying the overcloud requires suitable hardware. The first task is to
register the available hardware with Ironic, OpenStacks equivalent of a
hypervisor for managing baremetal servers. User can define the hardware
attributes (such as number of CPUs, RAM, disk) manually or he can leave the
fields out and run introspection of the nodes afterwards.
The sequence of events is pictured below:
.. image:: ../_images/discovery_diagram.png
* The user, via the GUI, the command-line tools, or through direct API calls,
registers the power management credentials for a node with Ironic.
* The user then instructs Ironic to reboot the node.
* Because the node is new, and not already fully registered, there are no
specific PXE-boot instructions for it. In that case, the default action is to
boot into a discovery ramdisk
* The discovery ramdisk probes the hardware on the node and gathers facts,
including the number of CPU cores, the local disk size and the amount of RAM.
* The ramdisk posts the facts to the discoverd API.
* All facts are passed and stored in the Ironic databse.
* There can be performed advanced role matching via the ''ahc-match'' tool,
which simply adds an additional role categorization to Ironic based on
discovered node facts and specified conditions.
Flavors
"""""""
When users are creating virtual machines (VMs) in an OpenStack cloud, the flavor
that they choose specifies the capacity of the VM which should be created. The
flavor defines the CPU count, the amount of RAM, the amount of disk space etc.
As long as the cloud has enough capacity to grant the users wish, and the user
hasnt reached their quota limit, the flavor acts as a set of instructions on
exactly what kind of VM to create on the users behalf.
In the undercloud, where the machines are usually physical rather than virtual
(or, at least, pre-existing, rather than created on demand), flavors have a
slightly different effect. Essentially, they act as a constraint. Of all of the
discovered hardware, only nodes which match a specified flavor are suitable for
a particular role. This can be used to ensure that the large machines with a
great deal of RAM and CPU capacity are used to run Nova in the overcloud, and
the smaller machines run less demanding services, such as Keystone.
The version of TripleO included in |project| is capable of handling flavors in
two different modes.
The simpler PoC (Proof of Concept) mode is intended to enable new users to
experiment, without worrying about matching hardware profiles. In this mode,
theres one single, global flavor, and any hardware can match it. That
effectively removes flavor matching. Users can use whatever hardware they wish.
For the second mode, named Scale because it is suited to larger scale overcloud
deployments, flavor matching is in full effect. A node will only be considered
suitable for a given role if the role is associated with a flavor which matches
the capacity of the node. Nodes without a matching flavor are effectively
unusable.
This second mode allows users to ensure that their different hardware types end
up running their intended role, though requires manual configuration of the role
definitions and role matching via the ahc-match tool (see
:doc:`../advanced_deployment/profile_matching`).
Deployment Planning
^^^^^^^^^^^^^^^^^^^
Whole part of planning your deployment is based on concept of **overcloud
roles**.
Roles are stored in the Tuskar DB, and are used through interaction with the
Tuskar API. A role brings together following things:
* An image; the software to be installed on a node
* A flavor; the size of node suited to the role
* A size; number of instances which should be deployed having given role
* A set of heat templates; instructions on how to configure the node for its
task
In the case of the “Compute” role:
* the image must contain all the required software to boot an OS and then run
the KVM hypervisor and the Nova compute service
* the flavor (at least for a deployment which isnt a simple proof of concept),
should specify that the machine has enough CPU capacity and RAM to host
several VMs concurrently
* the Heat templates will take care of ensuring that the Nova service is
correctly configured on each node when it first boots.
Currently, the roles in |project| are very prescriptive, and in particular
individual services cannot easily be scaled independently of the Controller role
(other than storage nodes). More flexibility in this regard is planned in a
future release.
Customizable things during deployment planning are:
* Number of nodes for each role
* Service parameters configuration
* Network configuration (NIC configuration options, isolated vs. single overlay)
* Ceph rbd backend options and defaults
* Ways to pass in extra configuration, e.g site-specific customzations
Deployment
^^^^^^^^^^
Deployment to physical servers happens through a collaboration of Tuskar, Heat,
Nova, Neutron, Glance and Ironic.
To deploy the overcloud Tuskar needs gather all plan information it keeps and
build a Heat templates which describe desired overcloud.
This template is served to to Heat which will orchestrate the whole deployment
and it will create a stack. Stack is Heats own term for the applications that
it creates. The overcloud, in Heat terms, is a particularly complex instance of
a stack.
In order to the stack to be deployed, Heat makes successive calls to Nova,
OpenStacks compute service controller. Nova depends upon Ironic, which, as
described above has acquired an inventory of discovered hardware by this stage
in the process.
At this point, Nova flavors may act as a constraint, influencing the range of
machines which may be picked for deployment by the Nova scheduler. For each
request to deploy a new node with a specific role, Nova filters the of available
nodes, ensuring that the selected nodes meets the hardware requirements.
Once the target node has been selected, Ironic does the actual provisioning of
the node, Ironic retrieves the OS image associated with the role from Glance,
causes the node to boot a deployment ramdisk and then, in the typical case,
exports the nodes local disk over iSCSI so that the disk can be partitioned and
the have the OS image written onto it by the Ironic Conductor.
See Ironics `Understanding Baremetal Deployment <http://docs.openstack.org/
developer/ironic/deploy/user-guide.html#understanding-bare-metal-deployment>`_
for further details.
Per-node Setup
^^^^^^^^^^^^^^
TBD - Puppet
Overcloud Initialization
^^^^^^^^^^^^^^^^^^^^^^^^
After the overcloud has been deployed, the initialization of OpenStack services
(e.g Keystone, Neutron, etc) needs to occur. That is accomplished today by
scripts in the `tripleo-incubator <https://github.com/openstack/
tripleo-incubator>`_ source repository and it uses bits from `os-cloud-config
<https://github.com/openstack/os-cloud-config>`_ which contains common code,
the seed initialisation logic, and the post heat completion initial
configuration of a cloud. There are three primary steps to completing the
initialization:
* Initializing Identity Services (Keystone)
* Registering service endpoints (e.g. Glance, Nova)
* Specify a block of IP addresses for overcloud instances (Neutron)
The first step initializes Keystone for use with normal authentication by
creating the admin and service tenants, the admin and Member roles, the admin
user, configure certificates and finally registers the initial identity
endpoint. The next step registers image, orchestration, network and compute
services running on the default ports on the controlplane node. Finally, Neutron
is given a starting IP address, ending IP address, and a CIDR notation to
represent the subnet for the block of floating IP addresses that will be used
within the overcloud.
High Availability (HA)
----------------------
|project| will use Pacemaker to achieve high-availability.
Reference architecture document: https://github.com/beekhof/osp-ha-deploy
.. note:: **Current HA solution is being developed by our community.**
Managing the Deployment
-----------------------
After the overcloud deployment is completed, it will be possible to monitor,
scale it out or perform basic maintenance operations via GUI or CLI.
Monitoring the Overcloud
^^^^^^^^^^^^^^^^^^^^^^^^
When the overcloud is deployed, Ceilometer can be configured to track a set of
OS metrics for each node (system load, CPU utiization, swap usage etc.) These
metrics are graphed in the GUI, both for individual nodes, and for groups
of nodes, such as the collection of nodes which are all delivering a particular
role.
Additionally, Ironic exports IPMI metrics for nodes, which can also be stored in
Ceilometer. This enables checks on hardware state such as fan operation/failure
and internal chassis temperatures.
The metrics which Ceilometer gathers can be queried for Ceilometer's REST API,
or by using the command line client.
.. Note::
There are plans to add more operational tooling to the future release.
Scaling-out the Overcloud
^^^^^^^^^^^^^^^^^^^^^^^^^
The process of scaling out the overcloud by adding new nodes involves these
stages:
* Making sure you have enough nodes to deploy on (or register new nodes as
described in the "Undercloud Data Preparation" section above).
* Updating the plan managed by Tuskar, as described in the “Deployment Planning"
section above.
* Calling Heat to update the stack which will apply the set of changes to the
overcloud.

View File

@ -0,0 +1,461 @@
|project| Components
======================
.. contents::
:depth: 2
:backlinks: none
This section contains a list of components that |project| uses. The components
are organized in categories, and include a basic description, useful links, and
contribution information.
..
[Example Category Name]
-----------------------
[Example Component Name]
^^^^^^^^^^^^^^^^^^^^^^^^
This is short description what the project is about and how |project| uses
this project. Three sentences max.
**How to contribute**
* Instructions to prepare development environment. Should be mostly pointing to
upstream docs. If upstream docs doesn't exist, please, create one. Add tips
how to test the feature in |project| + other useful information.
**Useful links**
* Upstream Project: `link <#>`_
* Bugs: `link <#>`_
* Blueprints: `link <#>`_
Shared Libraries
----------------
diskimage-builder
^^^^^^^^^^^^^^^^^
diskimage-builder is an image building tool. It is used by
``openstack overcloud image build --all``.
**How to contribute**
See the diskimage-builder `README.rst
<https://git.openstack.org/cgit/openstack/diskimage-builder/tree/README.rst>`_
for a further explanation of the tooling. Submit your changes via
OpenStack Gerrit (see `OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Upstream Project Documentation: http://docs.openstack.org/developer/diskimage-builder/
* Bugs: https://bugs.launchpad.net/diskimage-builder
* Git repository: https://git.openstack.org/cgit/openstack/diskimage-builder/
dib-utils
^^^^^^^^^
dib-utils contains tools that are used by diskimage-builder.
**How to contribute**
Submit your changes via OpenStack Gerrit (see `OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Bugs: https://bugs.launchpad.net/diskimage-builder
* Git repository: https://git.openstack.org/cgit/openstack/dib-utils/
os-\*-config
^^^^^^^^^^^^
The os-\*-config projects are a suite of tools used to configure instances
deployed via TripleO. They include:
* os-collect-config
* os-refresh-config
* os-apply-config
* os-net-config
**How to contribute**
Each tool uses `tox <https://tox.readthedocs.org/en/latest/>`_ to manage the
development environment. Submit your changes via OpenStack Gerrit (see
`OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Bugs:
* os-collect-config: https://bugs.launchpad.net/os-collect-config
* os-refresh-config: https://bugs.launchpad.net/os-refresh-config
* os-apply-config: https://bugs.launchpad.net/os-apply-config
* os-net-config: https://bugs.launchpad.net/os-net-config
* Git repositories:
* os-collect-config: https://git.openstack.org/cgit/openstack/os-collect-config
* os-refresh-config https://git.openstack.org/cgit/openstack/os-refresh-config
* os-apply-config https://git.openstack.org/cgit/openstack/os-apply-config
* os-net-config https://git.openstack.org/cgit/openstack/os-net-config
tripleo-image-elements
^^^^^^^^^^^^^^^^^^^^^^
tripleo-image-elements is a repository of diskimage-builder style elements used
for installing various software components.
**How to contribute**
Submit your changes via OpenStack Gerrit (see
`OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Git repository: https://git.openstack.org/cgit/openstack/tripleo-image-elements
Installer
---------
instack
^^^^^^^
instack executes diskimage-builder style elements on the current system. This
enables a current running system to have an element applied in the same way
that diskimage-builder applies the element to an image build.
instack, in its current form, should be considered low level tooling. It is
meant to be used by higher level scripting that understands what elements and
hook scripts need execution. Using instack requires a rather in depth knowledge
of the elements within diskimage-builder and tripleo-image-elements.
**How to contribute**
Submit patches to gerrithub https://review.gerrithub.io/#/q/project:rdo-management/instack
**Useful links**
* Bugs: https://bugzilla.redhat.com/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&classification=Community&component=instack
instack-undercloud
^^^^^^^^^^^^^^^^^^
instack-undercloud is a TripleO style undercloud installer based around
instack.
**How to contribute**
Submit patches to gerrithub https://review.gerrithub.io/#/q/project:rdo-management/instack-undercloud
**Useful links**
* Bugs: https://bugzilla.redhat.com/buglist.cgi?bug_status=NEW&bug_status=ASSIGNED&classification=Community&component=instack-undercloud
tripleo-incubator
^^^^^^^^^^^^^^^^^
tripleo-incubator contains various scripts to aid in deploying a TripleO cloud.
**How to contribute**
Submit your changes via OpenStack Gerrit (see
`OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Documentation: http://docs.openstack.org/developer/tripleo-incubator/index.html
* Git repository: https://git.openstack.org/cgit/openstack/tripleo-incubator
Node Management
---------------
ironic
^^^^^^
Ironic project is responsible for provisioning and managing bare metal
instances.
For testing purposes Ironic can also be used for provisioning and managing
virtual machines which act as bare metal nodes via special driver ``pxe_ssh``.
**How to contribute**
Ironic uses `tox <https://tox.readthedocs.org/en/latest/>`_ to manage the
development environment, see `OpenStack's Documentation
<http://docs.openstack.org/developer/ironic/dev/contributing.html>`_,
`Ironic Developer Guidelines
<https://wiki.openstack.org/wiki/Ironic/Developer_guidelines>`_
and `OpenStack Developer's Guide`_ for details.
**Useful links**
* Upstream Project: http://docs.openstack.org/developer/ironic/index.html
* Bugs: https://bugs.launchpad.net/ironic
* Blueprints: https://blueprints.launchpad.net/ironic
* `Specs process <https://wiki.openstack.org/wiki/Ironic/Specs_Process>`_
should be followed for suggesting new features.
* Approved Specs: http://specs.openstack.org/openstack/ironic-specs/
ironic inspector (former ironic-discoverd)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Ironic Inspector project is responsible for inspection of hardware properties
for newly enrolled nodes (see also ironic_).
Ironic Inspector was called ironic-discoverd previously, and the RPM packages
are still based on the old version, so they are using the old name for now.
**How to contribute**
Ironic Inspector uses `tox <https://tox.readthedocs.org/en/latest/>`_ to manage
the development environment, see `upstream documentation
<https://github.com/openstack/ironic-inspector/blob/master/CONTRIBUTING.rst>`_
for details.
**Useful links**
* Upstream Project: https://github.com/openstack/ironic-inspector
* PyPI: https://pypi.python.org/pypi/ironic-inspector
Also https://pypi.python.org/pypi/ironic-discoverd for the old version which
|project| currently uses.
* Bugs: https://bugs.launchpad.net/ironic-inspector
* Blueprints: https://blueprints.launchpad.net/ironic-inspector
Deployment Planning
-------------------
Tuskar
^^^^^^
The Tuskar project is responsible for planning of deployments through the use
of two main concepts: Role (unit of functionality, e.g. 'Compute') and Plan.
A given Role is associated with a number of Heat templates and extra
data files and Tuskar allows the user to provide values for a Role's template
attributes.
Once a Plan is specified in terms of Roles (and any desired
template attributes have been set) Tuskar can assemble and generate the
corresponding Heat deployment files and return these to the caller
(ready to be passed to Heat).
**How to contribute**
The Tuskar project uses the usual OpenStack code review process with gerrit
reviews (see links below). Tuskar is a sub-project falling under TripleO
and as such you can use the #tripleo irc channel (freenode) or the weekly
TripleO meeting to bring up issues about Tuskar, as well as the openstack-dev
mailing list of course.
**Useful links**
* Upstream Project: https://github.com/openstack/tuskar
* PyPI: https://pypi.python.org/pypi/tuskar
* Bugs: https://bugs.launchpad.net/tuskar
* Blueprints: https://blueprints.launchpad.net/tuskar
* REST API http://specs.openstack.org/openstack/tripleo-specs/specs/juno/tripleo-juno-tuskar-rest-api.html
* Reviews: https://review.openstack.org/#/q/status:open+project:openstack/tuskar,n,z
Deployment & Orchestration
--------------------------
heat
^^^^
Heat is OpenStack's orchestration tool. It reads YAML files describing
the OpenStack deployment's resources (machines, their configurations
etc.) and gets those resources into the desired state, often by
talking to other components (e.g. Nova).
**How to contribute**
* Use `devstack with Heat
<http://docs.openstack.org/developer/heat/getting_started/on_devstack.html>`_
to set up a development environment. Submit your changes via
OpenStack Gerrit (see `OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Upstream Project: https://wiki.openstack.org/wiki/Heat
* Bugs: https://bugs.launchpad.net/heat
* Blueprints: https://blueprints.launchpad.net/heat
heat-templates
^^^^^^^^^^^^^^
The heat-templates repository contains additional image elements for
producing disk images ready to be configured by Puppet via Heat.
**How to contribute**
* Use `devtest with Puppet
<http://docs.openstack.org/developer/tripleo-incubator/puppet.html>`_
to set up a development environment. Submit your changes via
OpenStack Gerrit (see `OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Upstream Project: https://git.openstack.org/cgit/openstack/heat-templates
* Bugs: https://bugs.launchpad.net/heat-templates
* Blueprints: https://blueprints.launchpad.net/heat-templates
tripleo-heat-templates
^^^^^^^^^^^^^^^^^^^^^^
The tripleo-heat-templates describe the OpenStack deployment in Heat
Orchestration Template YAML files and Puppet manifests. The templates
are processed through Tuskar and materialized into an actual
deployment via Heat.
**How to contribute**
* Use `devtest with Puppet
<http://docs.openstack.org/developer/tripleo-incubator/puppet.html>`_
to set up a development environment. Submit your changes via
OpenStack Gerrit (see `OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Upstream Project: https://git.openstack.org/cgit/openstack/tripleo-heat-templates
* Bugs: https://bugs.launchpad.net/tripleo
* Blueprints: https://blueprints.launchpad.net/tripleo
nova
^^^^
TBD
puppet-\*
^^^^^^^^^
The OpenStack Puppet modules are used to configure the OpenStack
deployment (write configuration, start services etc.). They are used
via the tripleo-heat-templates.
**How to contribute**
* Use `devtest with Puppet
<http://docs.openstack.org/developer/tripleo-incubator/puppet.html>`_
to set up a development environment. Submit your changes via
OpenStack Gerrit (see `OpenStack Developer's Guide
<http://docs.openstack.org/infra/manual/developers.html>`_).
**Useful links**
* Upstream Project: https://wiki.openstack.org/wiki/Puppet
tripleo-puppet-elements
^^^^^^^^^^^^^^^^^^^^^^^
The tripleo-puppet-elements describe the contents of disk images which
|project| uses to deploy OpenStack. It's the same kind of elements
as in tripleo-image-elements, but tripleo-puppet-elements are specific
for Puppet-enabled images.
**How to contribute**
* Use `devtest with Puppet
<http://docs.openstack.org/developer/tripleo-incubator/puppet.html>`_
to set up a development environment. Submit your changes via
OpenStack Gerrit (see `OpenStack Developer's Guide`_).
**Useful links**
* Upstream Project: https://git.openstack.org/cgit/openstack/tripleo-puppet-elements
* Bugs: https://bugs.launchpad.net/tripleo
* Blueprints: https://blueprints.launchpad.net/tripleo
User Interfaces
---------------
tuskar-ui
^^^^^^^^^
Tuskar-UI provides a GUI to install and manage OpenStack. It is implemented as
a plugin to Horizon.
**How to contribute**
* See `upstream documentation <http://tuskar-ui.readthedocs.org/en/latest/install.html>`_
for instuctions on how to set up a development environment. Submit your
changes via OpenStack Gerrit (see `OpenStack Developer's Guide`_).
**Useful links**
* Upstream Project: https://git.openstack.org/cgit/openstack/tuskar-ui
* Documentation: http://tuskar-ui.readthedocs.org
* Bugs: https://bugs.launchpad.net/tuskar-ui
* Blueprints: https://blueprints.launchpad.net/tuskar-ui
tuskar-ui-extras
^^^^^^^^^^^^^^^^
Tuskar-UI extras provides GUI enhancements for Tuskar-UI. It is implemented as
a plugin to Horizon.
**How to contribute**
* See `upstream documentation <http://tuskar-ui-extras.readthedocs.org/en/latest/install.html#development-install-instructions>`_
for instuctions on how to set up a development environment. Submit your
changes via `Gerrithub <https://review.gerrithub.io/#/q/project:rdo-management/tuskar-ui-extras>`_.
**Useful links**
* Project: https://github.com/rdo-management/tuskar-ui-extras
* Documentation: http://tuskar-ui-extras.readthedocs.org
python-openstackclient
^^^^^^^^^^^^^^^^^^^^^^
The python-openstackclient is an upstream CLI tool which can manage multiple
openstack services. It wraps openstack clients like glance, nova, etc. and maps
them under intuitive names like openstack image, compute, etc.
The main value is that all services can be controlled by a single (openstack)
command with consistent syntax and behaviour.
**How to contribute**
* python-openstackclient uses `tox <https://tox.readthedocs.org/en/latest/>`_
to manage the development environment, see `upstream documentation
<https://github.com/openstack/python-openstackclient/blob/master/README.rst>`_
for details. Submit your changes via OpenStack Gerrit
(see `OpenStack Developer's Guide`_).
**Useful links**
* Upstream Project: http://git.openstack.org/cgit/openstack/python-openstackclient
* Bugs: https://bugs.launchpad.net/python-openstackclient
* Blueprints: https://blueprints.launchpad.net/python-openstackclient
* Human interface guide: http://docs.openstack.org/developer/python-openstackclient/humaninterfaceguide.html
python-rdomanager-oscplugin
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The python-rdomanager-oscplugin is a CLI tool embedded into
python-openstackclient. It provides functions related to instack
installation and initial configuration like node discovery, overcloud image
building and uploading, etc.
**How to contribute**
* python-rdomanager-oscplugin uses `tox <https://tox.readthedocs.org/en/latest/>`_
to manage the development environment, see `documentation
<https://github.com/rdo-management/python-rdomanager-oscplugin/blob/master/CONTRIBUTING.rst>`_
for details. Submit your changes via
`Gerrithub <https://review.gerrithub.io/#/q/project:rdo-management/python-rdomanager-oscplugin>`_.
**Useful links**
* Project: https://github.com/rdo-management/python-rdomanager-oscplugin
..
<GLOBAL_LINKS>
.. _OpenStack Developer's Guide: http://docs.openstack.org/infra/manual/developers.html

View File

@ -0,0 +1,32 @@
|project| Introduction
========================
|project| is an OpenStack Deployment & Management tool.
**Architecture**
With |project|, you start by creating an **undercloud** (an actual operator
facing deployment cloud) that will contain the necessary OpenStack components to
deploy and manage an **overcloud** (an actual tenant facing workload cloud). The
overcloud is the deployed solution and can represent a cloud for any purpose
(e.g. production, staging, test, etc). The operator can choose any of available
Overcloud Roles (controller, compute, etc.) they want to deploy to the environment.
Go to :doc:`architecture` to learn more.
|
**Components**
|project| is composed of set of official OpenStack components accompanied by
few other open sourced plugins which are increasing |project| capabilities.
Go to :doc:`components` to learn more.
.. toctree::
:hidden:
Architecture <architecture>
Components <components>

View File

@ -0,0 +1,44 @@
.. _delete_nodes:
Deleting Overcloud Nodes
========================
You can delete specific nodes from an overcloud with command::
openstack overcloud node delete --stack $STACK_NAME --plan $PLAN_UUID <list of nova instance IDs>
This command updates number of nodes in tuskar plan and then updates heat stack
with updated numbers and list of resource IDs (which represent nodes) to be
deleted.
.. note::
If you passed any extra environment files when you created the overcloud (for
instance, in order to configure :doc:`network isolation
<../advanced_deployment/network_isolation>`), you must pass them again here
using the ``-e`` or ``--environment-file`` option to avoid making undesired
changes to the overcloud.
.. note::
Before deleting a compute node please make sure that the node is quiesced,
see :ref:`quiesce_compute`.
.. note::
A list of nova instance IDs can be listed with command::
nova list
Deleting nodes without using Tuskar
-----------------------------------
If the overcloud was :doc:`deployed from heat templates directly
<../advanced_deployment/template_deploy>` then use the ``--templates``
parameter when deleting nodes::
openstack overcloud node delete --stack $STACK_NAME --templates [templates dir] <list of nova instance IDs>
If you passed any extra environment files when you created the overcloud (for
instance, in order to configure :doc:`network isolation
<../advanced_deployment/network_isolation>`), you must pass them again here
using the ``-e`` or ``--environment-file`` option to avoid making undesired
changes to the overcloud.

View File

@ -0,0 +1,53 @@
Migrating Workloads from an existing OpenStack cloud
====================================================
|project| provides the ability to manage changes over time to a cloud that it
has deployed. However, it cannot automatically take over the management of
existing OpenStack clouds deployed with another installer. Since there can be
no one-size-fits-all procedure for upgrading an existing cloud to use
|project|, it is recommended that a new cloud be deployed with |project| and
any workloads running on an existing cloud be migrated off.
Migrating User Workloads
------------------------
Since the best way of avoiding or handling any downtime associated with moving
an application from one cloud to another is application-dependent, it is
preferable to have end users migrate their own applications at a time and in
the manner of their choosing. This can also help to spread out the network
bandwidth requirements, rather than copying a large number of snapshots in
bulk.
Ideally applications can be re-created from first principles (an Orchestration
tool such as Heat can help make this repeatable) and any data populated after
the fact. This allows the new VMs to be backed by a copy-on-write disk image
overlaid on the original base image. The alternative is to :doc:`export and
then import <./vm_snapshot>` snapshots of the VM images. This may require
considerably more disk space as each VM's base image becomes its snapshot,
where previously multiple VMs may have shared the same base image.
Reclaiming Excess Capacity
--------------------------
As workloads are migrated off the previous cloud, compute node hardware can be
freed up to reallocate to the new cloud. Since there is likely no guarantee as
to the order in which users will migrate, it will be necessary to consolidate
the remaining VMs onto a smaller number of machines as utilization drops. This
can be done by performing live migration within the old cloud.
Select a compute node to remove from service and follow the procedure for
:doc:`quiesce_compute`. Once this is done, the node can be removed from the old
cloud and the hardware reused, possibly by adding it to the new cloud.
Adding New Capacity
-------------------
As utilization of the new cloud increases and hardware becomes available from
the old cloud, additional compute nodes can be added to the new cloud with
|project|.
First, register and introspect the additional hardware with Ironic just as you
would have done when :doc:`initially deploying
<../basic_deployment/basic_deployment_cli>` the cloud with |project|. Then
:doc:`scale out <scale_roles>` the 'Compute' role in the new overcloud to start
making use of the additional capacity.

View File

@ -0,0 +1,41 @@
Updating Packages on Overcloud Nodes
====================================
You can update packages on all overcloud nodes with command::
openstack overcloud update stack --plan $PLAN_UUID -i overcloud
This command updates UpdateIdentifier parameter in the overcloud tuskar plan
and triggers stack update operation. If this parameter is set 'yum update'
command is executed and each node. Because running update on all nodes in
parallel might be unsafe (an update of a package might involve restarting
a service), the command above sets breakpoints on each overcloud node so nodes
are updated one by one. When the update is finished on a node the command
will prompt for removing breakpoint on next one.
.. note::
Multiple breakpoints can be removed by specifying list of nodes with a
regular expression.
.. note::
Make sure you use `-i` parameter, otherwise update runs on background and
doesn't prompt for removing of breakpoints.
.. note::
If the update command is aborted for some reason you can always continue
in the process by re-running same command.
Updating Packages on Overcloud Nodes Without Using Tuskar
---------------------------------------------------------
If the overcloud was :doc:`deployed from heat templates directly
<../advanced_deployment/template_deploy>` then use the ``--templates``
parameter when updating packages::
openstack overcloud update stack --templates [templates dir] -i overcloud
If you passed any extra environment files when you created the overcloud (for
instance, in order to configure :doc:`network isolation
<../advanced_deployment/network_isolation>`), you must pass them again here
using the ``-e`` or ``--environment-file`` option to avoid making undesired
changes to the overcloud.

View File

@ -0,0 +1,15 @@
Post Deployment
===============
In this chapter you will find advanced management of various |project| areas.
.. toctree::
Migrating Workloads from an Existing OpenStack Cloud <migration>
Scaling Overcloud Roles <scale_roles>
Deleting Overcloud Nodes <delete_nodes>
Quiescing a Compute node <quiesce_compute>
Replacing a Failed Controller Node <replace_controller>
Import/Export of VM Snapshots <vm_snapshot>
Updating Packages on Overcloud Nodes <package_update>

View File

@ -0,0 +1,83 @@
.. _quiesce_compute:
Quiescing a Compute Node
========================
The process of quiescing a compute node means to migrate workload off the node
so that it can be shut down without affecting the availability of end-users'
VMs. You might want to perform this procedure when rebooting a compute node to
ensure that package updates are applied (e.g. after a kernel update); to
consolidate workload onto a smaller number of machines when scaling down an
overcloud; or when replacing the compute node hardware.
Setting up Keys
---------------
Assuming that the backing files for Nova VMs are not hosted on a shared storage
volume (with all compute nodes having access), the compute nodes will need to
be configured with ssh keys so that the `nova` user on each compute node has
ssh access to the corresponding account on the other compute nodes.
First, generate an ssh key::
ssh-keygen -t rsa -f nova_id_rsa
Then, on each compute node, run the following script to set up the keys::
NOVA_SSH=/var/lib/nova/.ssh
mkdir ${NOVA_SSH}
cp nova_id_rsa ${NOVA_SSH}/id_rsa
chmod 600 ${NOVA_SSH}/id_rsa
cp nova_id_rsa.pub ${NOVA_SSH}/id_rsa.pub
cp nova_id_rsa.pub ${NOVA_SSH}/authorized_keys
chown -R nova.nova ${NOVA_SSH}
# enable login for nova user on compute hosts:
usermod -s /bin/bash nova
# add ssh keys of overcloud nodes into known hosts:
ssh-keyscan -t rsa `os-apply-config --key hosts --type raw --key-default '' | awk '{print $1}'` >>/etc/ssh/ssh_known_hosts
Initiating Migration
--------------------
First, obtain a list of the current Nova services::
source ~stack/overcloudrc # admin credentials for the overcloud
nova service-list
Disable the `nova-compute` service on the node you wish to quiesce, to prevent
new VMs being scheduled on it::
nova service-disable <service-host> nova-compute
Begin the process of migrating VMs off the node::
nova host-servers-migrate <service-host>
Completing Migration
--------------------
The current status of the migration process can be retrieved with the command::
nova migration-list
When migration of each VM completes, its state in Nova will change to
`VERIFY_RESIZE`. This gives you an opportunity to confirm that the migration
completed successfully, or to roll it back. To confirm the migration, use the
command::
nova resize-confirm <server-name>
Finally, once all migrations are complete and confirmed, remove the service
running (but disabled) on the compute node from Nova altogether::
nova service-delete <service-id>
You are now free to reboot or shut down the node (using the Ironic API), or
even remove it from the overcloud altogether by scaling down the overcloud
deployment, see :ref:`delete_nodes`.

View File

@ -0,0 +1,105 @@
.. _replace_controller:
Replacing a Controller Node
===========================
Replace Failed Node
-------------------
First, create a YAML file defining node index to remove. Node index reflects
suffix of instance name in `nova list` output. In this following text
`overcloud-controller-1` node is replaced with `overcloud-controller-3`::
$ cat ~/remove.yaml
parameters:
  ControllerRemovalPolicies:
      [{'resource_list': ['1']}]
Then, re-deploy overcloud including the extra environment file::
openstack overcloud deploy --templates --control-scale 3 -e ~/remove.yaml
The old node will be removed and a new one will be added.
Because some puppet modules don't support nodes replacement, re-deployment
fails and a couple of manual changes are needed to fix controller nodes setup.
Connect to one of remaining controller nodes and delete the failed node
from Pacemaker/Corosync cluster::
crm_node -R overcloud-controller-1 --force
Delete the failed node from RabbitMQ cluster::
rabbitmqctl forget_cluster_node rabbit@overcloud-controller-1
Delete the failed node from MongoDB::
# connect to MongoDB on any of remaining nodes:
mongo --host <node ip>
# check status of MongoDB cluster:
rs.status()
# remove the failed node:
rs.remove('<node_ip>:27017')
Update list of nodes in Galera cluster::
pcs resource update galera wsrep_cluster_address=gcomm://overcloud-controller-0,overcloud-controller-3,overcloud-controller-2
Start Pacemaker/Corosync on the new node::
pcs cluster node add overcloud-controller-3
pcs cluster start overcloud-controller-3
Enable keystone service on the new node::
copy /etc/keystone from a remaining node to the new node
set admin_bind_host and public_bind_host in /etc/keystone/keystone.conf to node's IP
pcs resource cleanup openstack-keystone-clone overcloud-controller-3
Re-deploy overcloud again::
openstack overcloud deploy --templates --control-scale 3
.. note::
If deployment fails with error `Failed to call refresh: Could not restart Service[httpd]`
then try re-deploy again.
Completing Update
-----------------
Delete the failed node from `/etc/corosync/corosync.conf` file and restart
Corosync one by one on each node::
systemctl restart corosync
When re-deployment finishes, connect to one of controller nodes and start
services on the new node::
pcs resource cleanup neutron-server-clone
pcs resource cleanup openstack-nova-api-clone
pcs resource cleanup openstack-nova-consoleauth-clone
pcs resource cleanup openstack-heat-engine-clone
pcs resource cleanup openstack-cinder-api-clone
pcs resource cleanup openstack-glance-registry-clone
pcs resource cleanup httpd-clone
Replacing Bootstrap Node
------------------------
If node with index 0 is being replaced it's necessary to edit heat templates
and change bootstrap node index before starting replacement. Open
`overcloud-without-mergepy.yaml` file in root directory of heat templates and
change lines::
bootstrap_nodeid: {get_attr: [Controller, resource.0.hostname]}
bootstrap_nodeid_ip: {get_attr: [Controller, resource.0.ip_address]}
to::
bootstrap_nodeid: {get_attr: [Controller, resource.1.hostname]}
bootstrap_nodeid_ip: {get_attr: [Controller, resource.1.ip_address]}
Tuskar doesn't support template editing so it's possible to do this change only
if overcloud is deployed with :doc:`templates directly <../advanced_deployment/template_deploy>`.

View File

@ -0,0 +1,42 @@
Scaling overcloud roles
=======================
If you want to increase or decrease resource capacity of a running overcloud,
you can start more servers of a selected role or delete some servers if
capacity should be decreased. First update the Overcloud plan with a new
number of nodes of the role::
openstack management plan set $PLAN_UUID -S Compute-1=5
.. note::
The role is specified including the role version. Both role names
and role versions can be listed with command::
openstack role list
And then re-deploy the Overcloud with the updated plan::
openstack overcloud deploy --plan $PLAN_UUID
.. note::
Scaling out assumes that newly added nodes has already been
registered in Ironic.
.. note::
When scaling down random servers of specified role will be deleted, how to
delete specific nodes is decribed in :ref:`delete_nodes`.
Scaling overcloud roles without using Tuskar
--------------------------------------------
If the overcloud was :doc:`deployed from heat templates directly
<../advanced_deployment/template_deploy>` then you can just re-deploy the
overcloud with ``--templates`` and ``--<role>-scale`` parameters::
openstack overcloud deploy --templates [templates dir] --compute-scale 5
If you passed any extra environment files when you created the overcloud (for
instance, in order to configure :doc:`network isolation
<../advanced_deployment/network_isolation>`), you must pass them again here
using the ``-e`` or ``--environment-file`` option to avoid making undesired
changes to the overcloud.

View File

@ -0,0 +1,26 @@
Import/Export of VM Snapshots
=============================
Create a snapshot of a running server
-------------------------------------
Create a new image by taking a snapshot of a running server and download the
image.
::
nova image-create instance_name image_name
glance image-download image_name --file exported_vm.qcow2
Import an image into Overcloud and launch an instance
-----------------------------------------------------
Upload the exported image into glance in Overcloud and launch a new instance.
::
glance image-create --name imported_image --file exported_vm.qcow2 --disk-format qcow2 --container-format bare
nova boot --poll --key-name default --flavor m1.demo --image imported_image --nic net-id=net_id imported
.. note::
**Warning**: disadvantage of using glance image for export/import VMs is
that each VM disk has to be copied in and out into glance in source and
target clouds. Also by making snapshot qcow layering system is lost.

View File

@ -0,0 +1,16 @@
Troubleshooting Image Build
===========================
Images fail to build
--------------------
More space needed
^^^^^^^^^^^^^^^^^
Images are built in tmpfs by default, to speed up the builds. In case
your machine doesn't have enough free RAM, the image building step
can fail with a message like "At least 174MB more space needed on
the / filesystem". If freeing up more RAM isn't a possibility,
images can be built on disk by exporting an environment variable::
export DIB_NO_TMPFS=1

View File

@ -0,0 +1,91 @@
Troubleshooting Node Management Failures
========================================
Where Are the Logs?
-------------------
Some logs are stored in *journald*, but most are stored as text files in
``/var/log``. Ironic and ironic-discoverd logs are stored in journald. Note
that Ironic has 2 units: ``openstack-ironic-api`` and
``openstack-ironic-conductor``. Similarly, ironic-discoverd has
``openstack-ironic-discoverd`` and ``openstack-ironic-discoverd-dnsmasq``. So
for example to get all ironic-discoverd logs use::
sudo journalctl -u openstack-ironic-discoverd -u openstack-ironic-discoverd-dnsmasq
If something fails during the discovery ramdisk run, ironic-discoverd
stores the ramdisk logs in ``/var/log/ironic-discoverd/ramdisk/`` as
gz-compressed tar files. File names contain date, time and IPMI address of the
node if it was detected (only for bare metal).
.. _node_registration_problems:
Node Registration Problems
--------------------------
Any problems with node data registered into Ironic can be fixed using the
Ironic CLI.
For example, a wrong MAC can be fixed in two steps:
* Find out the assigned port UUID by running
::
ironic node-port-list <NODE UUID>
* Update the MAC address by running
::
ironic port-update <PORT UUID> replace address=<NEW MAC>
A Wrong IPMI address can be fixed with the following command::
ironic node-update <NODE UUID> replace driver_info/ipmi_address=<NEW IPMI ADDRESS>
.. _introspection_problems:
Hardware Introspection Problems
--------------------------------
Discovery hangs and times out
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ironic-discoverd times out discovery process after some time (defaulting to 1
hour) if it never gets response from the discovery ramdisk. This can be
a sign of a bug in the discovery ramdisk, but usually it happens due to
environment misconfiguration, particularly BIOS boot settings. Please refer to
`ironic-discoverd troubleshooting documentation`_ for information on how to
detect and fix such problems.
Refusing to introspect node with provision state "available"
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you're running discovery directly using ironic-discoverd CLI (or in case of
bugs in our scripts), a node can be in the "AVAILABLE" state, which is meant for
deployment, not for discovery. You should advance node to the "MANAGEABLE" state
before discovery and move it back before deployment.
Please refer to `upstream node states documentation
<https://github.com/openstack/ironic-inspector#node-states>`_ for information
on how to fix it.
How can discovery be stopped?
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Currently ironic-discoverd does not provide means for stopping discovery. The
recommended path is to wait until it times out. Changing ``timeout`` setting
in ``/etc/ironic-discoverd/discoverd.conf`` may be used to reduce this timeout
from 1 hour (which usually too much, especially on virtual environment).
If you do need to stop discovery **for all nodes** right now, do the
following for each node::
ironic node-set-power-state UUID off
then remove ironic-discoverd cache and restart it::
rm /var/lib/ironic-discoverd/discoverd.sqlite
sudo systemctl restart openstack-ironic-discoverd
.. _ironic-discoverd troubleshooting documentation: https://github.com/openstack/ironic-inspector#troubleshooting

View File

@ -0,0 +1,288 @@
Troubleshooting a Failed Overcloud Deployment
=============================================
If an Overcloud deployment has failed, the OpenStack clients and service log
files can be used to troubleshoot the failed deployment. The following commands
are all run on the Undercloud and assume a stackrc file has been sourced.
Identifying Failed Component
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In most cases, Heat will show the failed overcloud stack when a deployment
has failed::
$ heat stack-list
+--------------------------------------+------------+--------------------+----------------------+
| id | stack_name | stack_status | creation_time |
+--------------------------------------+------------+--------------------+----------------------+
| 7e88af95-535c-4a55-b78d-2c3d9850d854 | overcloud | CREATE_FAILED | 2015-04-06T17:57:16Z |
+--------------------------------------+------------+--------------------+----------------------+
Occassionally, Heat is not even able to create the the stack, so the ``heat
stack-list`` output will be empty. If this is the case, observe the message
that was printed to the terminal when ``instack-deploy-overcloud`` or ``heat
stack-create`` was run.
Next, there are a few layers on which the deployment can fail:
* Orchestration (Heat and Nova services)
* Bare metal provisioning (Ironic service)
* Post-deploy configuration (Puppet)
As Ironic service is in the middle layer, you can use its shell to guess the
failed layer. Issue ``ironic node-list`` command to see all registered nodes
and their current status, you will see something like::
+--------------------------------------+------+---------------+-------------+-----------------+-------------+
| UUID | Name | Instance UUID | Power State | Provision State | Maintenance |
+--------------------------------------+------+---------------+-------------+-----------------+-------------+
| f1e26112-5fbd-4fc4-9612-ecce7a1d86aa | None | None | power off | available | False |
| f0b8c105-f1d7-4059-a9a3-b050c3340340 | None | None | power off | available | False |
+--------------------------------------+------+---------------+-------------+-----------------+-------------+
Pay close attention to **Provision State** and **Maintenance** columns
in the resulting table.
* If the command shows empty table or less nodes that you expect, or
**Maintenance** is ``True``, or **Provision State** is ``manageable``,
there was a problem during node enrolling and introspection.
Please go back to these steps.
For example, **Maintenance** goes to ``True`` automatically, if wrong power
credentials are provided.
* If **Provision State** is ``available`` then the problem occured before
bare metal deployment has even started. Proceed with `Debugging Using Heat`_.
* If **Provision State** is ``active`` and **Power State** is ``power on``,
then bare metal deployment has finished successfully, and problem happened
during the post-deployment configuration step. Again, refer to `Debugging
Using Heat`_.
* If **Provision State** is ``wait call-back``, then bare metal deployment is
not finished for this node yet. You may want to wait until the status
changes.
* If **Provision State** is ``error`` or ``deploy failed``, then bare metal
deployment has failed for this node. Issue
::
ironic node-show <UUID>
and look for **last_error** field. It will contain error description.
If the error message is vague, you can use logs to clarify it::
sudo journalctl -u openstack-ironic-conductor -u openstack-ironic-api
If you see wait timeout error, and node **Power State** is ``power on``,
then try to connect to the virtual console of the failed machine. Use
``virt-manager`` tool for virtual machines and vendor-specific virtual
console (e.g. iDRAC for DELL) for bare metal machines.
Debugging Using Heat
~~~~~~~~~~~~~~~~~~~~
* Identifying the failed Heat resource
List all the stack resources to see which one failed.
::
$ heat resource-list overcloud
+-----------------------------------+-----------------------------------------------+---------------------------------------------------+-----------------+----------------------+
| resource_name | physical_resource_id | resource_type | resource_status | updated_time |
+-----------------------------------+-----------------------------------------------+---------------------------------------------------+-----------------+----------------------+
| BlockStorage | 9e40a1ee-96d3-4920-868d-683d3788e129 | OS::Heat::ResourceGroup | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| BlockStorageAllNodesDeployment | 2c453f6b-7378-44c8-a0ad-57de57d9c57f | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| BlockStorageNodesPostDeployment | | OS::TripleO::BlockStoragePostDeployment | INIT_COMPLETE | 2015-04-06T21:15:20Z |
| CephClusterConfig | 1684e7a3-0e42-44fe-9db4-7543b742fbfc | OS::TripleO::CephClusterConfig::SoftwareConfig | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| CephStorage | 48b3460c-bf9a-4663-99fc-2b4fa01b8dc1 | OS::Heat::ResourceGroup | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| CephStorageAllNodesDeployment | 76beb3a9-8327-4d2e-a206-efe12f1613fb | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| CephStorageCephDeployment | af8fb02a-5bc6-468c-8fac-fbe7e5b2c689 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| CephStorageNodesPostDeployment | | OS::TripleO::CephStoragePostDeployment | INIT_COMPLETE | 2015-04-06T21:15:20Z |
| Compute | e5e6ec84-197f-4bf6-b8ac-eb11fe494cdf | OS::Heat::ResourceGroup | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ComputeAllNodesDeployment | e6d44fbf-9683-4765-acbb-4a3d31c8fd48 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControllerNodesPostDeployment | e551e472-f2db-4468-b586-0374678d71a3 | OS::TripleO::ControllerPostDeployment | CREATE_FAILED | 2015-04-06T21:15:20Z |
| ComputeCephDeployment | 673608d5-70d7-453a-ac78-7987bc2c0158 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ComputeNodesPostDeployment | 1078e3e3-9f6f-48b9-8961-a30f44098856 | OS::TripleO::ComputePostDeployment | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControlVirtualIP | 6402b396-84aa-4cf6-9849-305205755604 | OS::Neutron::Port | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| Controller | ffc45352-9708-486d-81ac-3b60efa8e8b8 | OS::Heat::ResourceGroup | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControllerAllNodesDeployment | f73c6e33-3dd2-46f1-9eca-0d2981a4a986 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControllerBootstrapNodeConfig | 01ce5b6a-794a-4828-bad9-49d5fbfd55bf | OS::TripleO::BootstrapNode::SoftwareConfig | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControllerBootstrapNodeDeployment | c963d53d-879b-4a41-a10a-9000ac9f02a1 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControllerCephDeployment | 2d4281df-31ea-4433-820d-984a6dca6eb1 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControllerClusterConfig | 719c0d30-a4b8-4f77-9ab6-b3c9759abeb3 | OS::Heat::StructuredConfig | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControllerClusterDeployment | d929aa40-1b73-429e-81d5-aaf966fa6756 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ControllerSwiftDeployment | cf28f9fe-025d-4eed-b3e5-3a5284a2aa60 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| HeatAuthEncryptionKey | overcloud-HeatAuthEncryptionKey-5uw6wo7kavnq | OS::Heat::RandomString | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| MysqlClusterUniquePart | overcloud-MysqlClusterUniquePart-vazyj2s4n2o5 | OS::Heat::RandomString | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| MysqlRootPassword | overcloud-MysqlRootPassword-nek2iky7zfdm | OS::Heat::RandomString | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ObjectStorage | 47327c98-533e-4cc2-b1f3-d8d0eedba822 | OS::Heat::ResourceGroup | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ObjectStorageAllNodesDeployment | 7bb691aa-fa93-4f10-833e-6edeccc61408 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ObjectStorageNodesPostDeployment | d4d16f39-384a-4d6a-9719-1dd9b2d4ff09 | OS::TripleO::ObjectStoragePostDeployment | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| ObjectStorageSwiftDeployment | afc87385-8b40-4097-b529-2a5bc81c94c8 | OS::Heat::StructuredDeployments | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| PublicVirtualIP | 4dd92878-8f29-49d8-9d3d-bc0cd44d26a9 | OS::Neutron::Port | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| RabbitCookie | overcloud-RabbitCookie-uthzbos3l66v | OS::Heat::RandomString | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| SwiftDevicesAndProxyConfig | e2141170-bb77-4509-b8bd-58447b2cd15f | OS::TripleO::SwiftDevicesAndProxy::SoftwareConfig | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
| allNodesConfig | cbd42692-fffa-4527-a519-bd4014ebf0fb | OS::TripleO::AllNodes::SoftwareConfig | CREATE_COMPLETE | 2015-04-06T21:15:20Z |
+-----------------------------------+-----------------------------------------------+---------------------------------------------------+-----------------+----------------------+
In this example, notice how the **ControllerNodesPostDeployment** resource
has failed. The **\*PostDeployment** resources are the configuration that is
applied to the deployed Overcloud nodes. When these resources have failed it
indicates that something went wrong during the Overcloud node configuration,
perhaps when Puppet was run.
* Show the failed resource
::
$ heat resource-show overcloud ControllerNodesPostDeployment
+------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Property | Value |
+------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| attributes | {} |
| description | |
| links | http://192.0.2.1:8004/v1/cea2a0c78d2447bc9a0f7caa35c9224c/stacks/overcloud/ec3e3251-f949-4df9-92be-dbd37c6992a1/resources/ControllerNodesPostDeployment (self) |
| | http://192.0.2.1:8004/v1/cea2a0c78d2447bc9a0f7caa35c9224c/stacks/overcloud/ec3e3251-f949-4df9-92be-dbd37c6992a1 (stack) |
| | http://192.0.2.1:8004/v1/cea2a0c78d2447bc9a0f7caa35c9224c/stacks/overcloud-ControllerNodesPostDeployment-6kcqm5zuymqu/e551e472-f2db-4468-b586-0374678d71a3 (nested) |
| logical_resource_id | ControllerNodesPostDeployment |
| physical_resource_id | e551e472-f2db-4468-b586-0374678d71a3 |
| required_by | BlockStorageNodesPostDeployment |
| | CephStorageNodesPostDeployment |
| resource_name | ControllerNodesPostDeployment |
| resource_status | CREATE_FAILED |
| resource_status_reason | ResourceUnknownStatus: Resource failed - Unknown status FAILED due to "None" |
| resource_type | OS::TripleO::ControllerPostDeployment |
| updated_time | 2015-04-06T21:15:20Z |
+------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+
The ``resource-show`` doesn't always show a clear reason why the resource
failed. In these cases, logging into the Overcloud node is required to
further troubleshoot the issue.
* Logging into Overcloud nodes
Use the nova client to see the IP addresses of the Overcloud nodes.
::
$ nova list
+--------------------------------------+-------------------------------------------------------+--------+------------+-------------+---------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+-------------------------------------------------------+--------+------------+-------------+---------------------+
| 18014b02-b143-4ca2-aeb9-5553bec93cff | ov-4tvbtgpv7w-0-soqocxy2w4fr-NovaCompute-nlrxd3lgmmlt | ACTIVE | - | Running | ctlplane=192.0.2.13 |
| 96a57a46-1e48-4c66-adaa-342ee4e98972 | ov-rf4hby6sblk-0-iso3zlqmyzfe-Controller-xm2imjkzalhi | ACTIVE | - | Running | ctlplane=192.0.2.14 |
+--------------------------------------+-------------------------------------------------------+--------+------------+-------------+---------------------+
Login as the ``heat-admin`` user to one of the deployed nodes. In this
example, since the **ControllerNodesPostDeployment** resource failed, login
to the controller node. The ``heat-admin`` user has sudo access.
::
$ ssh heat-admin@192.0.2.14
While logged in to the controller node, examine the log for the
``os-collect-config`` log for a possible reason for the failure.
::
$ sudo journalctl -u os-collect-config
* Failed Nova Server ResourceGroup Deployments
In some cases, Nova fails deploying the node in entirety. This situation
would be indicated by a failed ``OS::Heat::ResourceGroup`` for one of the
Overcloud role types such as Control or Compute.
Use nova to see the failure in this case.
::
$ nova list
$ nova show <server-id>
The most common error shown will reference the error message ``No valid host
was found``. Refer to `No Valid Host Found Error`_ below.
In other cases, look at the following log files for further troubleshooting::
/var/log/nova/*
/var/log/heat/*
/var/log/ironic/*
* Using SOS
SOS is a set of tools that gathers information about system hardware and
configuration. The information can then be used for diagnostic purposes and
debugging. SOS is commonly used to help support technicians and developers.
SOS is useful on both the undercloud and overcloud. Install the ``sos``
package and then generate a report::
$ sudo sosreport --all-logs
No Valid Host Found Error
~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes ``/var/log/nova/nova-conductor.log`` contains the following error::
NoValidHost: No valid host was found. There are not enough hosts available.
"No valid host was found" means that the Nova Scheduler could not find a bare
metal node suitable for booting the new instance.
This in turn usually means some mismatch between resources that Nova expects
to find and resources that Ironic advertised to Nova.
A few things should be checked in this case:
#. Introspection should have succeeded for you before, or you should have
entered the required Ironic node properties manually.
For each node in ``ironic node-list`` use
::
ironic node-show <IRONIC-NODE-UUID>
and make sure that ``properties`` JSON field has valid values for keys
``cpus``, ``cpu_arch``, ``memory_mb`` and ``local_gb``.
#. Nova flavor that you are using does not exceed the Ironic node properties
above for a required number of nodes. Use
::
nova flavor-show <FLAVOR NAME>
to compare.
#. Make sure that enough nodes are in ``available`` state according to
``ironic node-list``. Nodes in ``manageable`` state usually mean they
have failed introspection.
#. Make sure nodes you're going to deploy to are not in maintenance mode.
Again, use ``ironic node-list`` to check. A node automatically going to
maintenance mode usually means wrong power credentials for this node. Check
them and then remove maintenance mode::
ironic node-set-maintenance <IRONIC-NODE-UUID> off
#. If you're using advanced profile matching with multiple flavors, make sure
you have enough nodes corresponding to each flavor/profile. Watch
``capabilities`` key in ``properties`` field for ``ironic node-show``.
It should contain e.g. ``profile:compute``.
#. It takes some time for nodes information to propagate from Ironic to Nova
after introspection. Our tooling usually accounts for it, but if you did
some steps manually, there may be a period of time when nodes are not
available to Nova yet. Check that
::
nova hypervisor-stats
correctly shows total amount of resources in your system.

View File

@ -0,0 +1,5 @@
Debugging TripleO Heat Templates
================================
Please follow this flow:
http://hardysteven.blogspot.co.uk/2015/04/debugging-tripleo-heat-templates.html

View File

@ -0,0 +1,22 @@
Troubleshooting instack-virt-setup Failures
===========================================
* Due to a `bug in libvirt`_, it is possible for instack-virt-setup to fail
with an error such as the following::
libvirt: QEMU Driver error : unsupported configuration: This QEMU doesn't support virtio scsi controller
Traceback (most recent call last):
File "/usr/libexec/openstack-tripleo/configure-vm", line 133, in <module>
main()
File "/usr/libexec/openstack-tripleo/configure-vm", line 129, in main
a = conn.defineXML(libvirt_template)
File "/usr/lib64/python2.7/site-packages/libvirt.py", line 3445, in defineXML
if ret is None:raise libvirtError('virDomainDefineXML() failed', conn=self)
libvirt.libvirtError: unsupported configuration: This QEMU doesn't support virtio scsi controller
The workaround is to do delete the libvirt capabilities cache and restart the service::
rm -Rf /var/cache/libvirt/qemu/capabilities/
systemctl restart libvirtd
.. _bug in libvirt: https://bugzilla.redhat.com/show_bug.cgi?id=1195882

View File

@ -0,0 +1,14 @@
Troubleshooting
===============
At this chapter you will find answers for frequently asked questions and
help with troubleshooting when using |project|.
.. toctree::
Virtual Setup Problems <troubleshooting-virt-setup>
Image Building Problems <troubleshooting-image-build>
Node Management Problems <troubleshooting-nodes>
Failed Overcloud Deployment <troubleshooting-overcloud>
Tripleo Heat Templates <troubleshooting-tripleo-heat-templates>

3
requirements.txt Normal file
View File

@ -0,0 +1,3 @@
six>=1.9.0
python-novaclient
oslo.config

View File

@ -1,30 +1,20 @@
[metadata]
name = tripleo-docs
author = OpenStack
author-email = openstack-dev@lists.openstack.org
summary = Docs for TripleO
description-file =
README.rst
home-page = http://git.openstack.org/cgit/openstack/tripleo-docs
classifier =
Environment :: OpenStack
Intended Audience :: Developers
Intended Audience :: Information Technology
License :: OSI Approved :: Apache Software License
Operating System :: OS Independent
[build_sphinx]
all_files = 1
build-dir = doc/build
source-dir = doc/source
[egg_info]
tag_build =
tag_date = 0
tag_svn_revision = 0
[wheel]
universal = 1
[pbr]
warnerrors = True
summary = tripleo-docs
description-file =
README.md
author = James Slagle
author-email = jslagle@redhat.com
home-page = http://docs.openstack.org/tripleo-docs
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 2.6
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3

View File

@ -1,4 +1,5 @@
#!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@ -1,2 +1,16 @@
oslosphinx
sphinx>=1.1.2,<1.2
# Doc requirements
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
oslosphinx>=2.2.0 # Apache-2.0
sphinx_rtd_theme==0.1.7
hacking>=0.10.0,<0.11
discover
fixtures>=0.3.14
python-subunit>=0.0.18
testrepository>=0.0.18
testscenarios>=0.4
testtools>=0.9.36,!=1.2.0
mock>=1.0
oslotest>=1.5.1 # Apache-2.0
bashate

27
tox.ini
View File

@ -1,11 +1,32 @@
[tox]
envlist = docs
minversion = 1.6
skipsdist = True
envlist = py34,py27,pep8
[testenv]
usedevelop = True
install_command = pip install {opts} {packages}
setenv = VIRTUAL_ENV={envdir}
deps = -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
commands = python setup.py testr --slowest --testr-args='{posargs}'
[testenv:venv]
commands = {posargs}
[testenv:docs]
commands = python setup.py build_sphinx
commands = sphinx-build -a doc/source/ build/sphinx/html
[testenv:pep8]
whitelist_externals = bash
# We fail pretty horribly on bashate right now
#commands = bash -c "find scripts -type f | xargs bashate -v"
# flake8
commands = flake8
[flake8]
ignore = H803
show-source = True
exclude = .tox,dist,doc,*.egg,build
[testenv:genconfig]
commands = oslo-config-generator --config-file config-generator/undercloud.conf